diff options
312 files changed, 5152 insertions, 4335 deletions
diff --git a/Documentation/networking/dccp.txt b/Documentation/networking/dccp.txt index 7a3bb1abb830..b132e4a3cf0f 100644 --- a/Documentation/networking/dccp.txt +++ b/Documentation/networking/dccp.txt | |||
@@ -141,7 +141,8 @@ rx_ccid = 2 | |||
141 | Default CCID for the receiver-sender half-connection; see tx_ccid. | 141 | Default CCID for the receiver-sender half-connection; see tx_ccid. |
142 | 142 | ||
143 | seq_window = 100 | 143 | seq_window = 100 |
144 | The initial sequence window (sec. 7.5.2). | 144 | The initial sequence window (sec. 7.5.2) of the sender. This influences |
145 | the local ackno validity and the remote seqno validity windows (7.5.1). | ||
145 | 146 | ||
146 | tx_qlen = 5 | 147 | tx_qlen = 5 |
147 | The size of the transmit buffer in packets. A value of 0 corresponds | 148 | The size of the transmit buffer in packets. A value of 0 corresponds |
diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt index c7712787933c..ff3f219ee4d7 100644 --- a/Documentation/networking/ip-sysctl.txt +++ b/Documentation/networking/ip-sysctl.txt | |||
@@ -782,6 +782,12 @@ arp_ignore - INTEGER | |||
782 | The max value from conf/{all,interface}/arp_ignore is used | 782 | The max value from conf/{all,interface}/arp_ignore is used |
783 | when ARP request is received on the {interface} | 783 | when ARP request is received on the {interface} |
784 | 784 | ||
785 | arp_notify - BOOLEAN | ||
786 | Define mode for notification of address and device changes. | ||
787 | 0 - (default): do nothing | ||
788 | 1 - Generate gratuitous arp replies when device is brought up | ||
789 | or hardware address changes. | ||
790 | |||
785 | arp_accept - BOOLEAN | 791 | arp_accept - BOOLEAN |
786 | Define behavior when gratuitous arp replies are received: | 792 | Define behavior when gratuitous arp replies are received: |
787 | 0 - drop gratuitous arp frames | 793 | 0 - drop gratuitous arp frames |
diff --git a/drivers/char/pcmcia/synclink_cs.c b/drivers/char/pcmcia/synclink_cs.c index dc073e167abc..5608a1e5a3b3 100644 --- a/drivers/char/pcmcia/synclink_cs.c +++ b/drivers/char/pcmcia/synclink_cs.c | |||
@@ -4311,10 +4311,17 @@ static void hdlcdev_rx(MGSLPC_INFO *info, char *buf, int size) | |||
4311 | dev->stats.rx_bytes += size; | 4311 | dev->stats.rx_bytes += size; |
4312 | 4312 | ||
4313 | netif_rx(skb); | 4313 | netif_rx(skb); |
4314 | |||
4315 | dev->last_rx = jiffies; | ||
4316 | } | 4314 | } |
4317 | 4315 | ||
4316 | static const struct net_device_ops hdlcdev_ops = { | ||
4317 | .ndo_open = hdlcdev_open, | ||
4318 | .ndo_stop = hdlcdev_close, | ||
4319 | .ndo_change_mtu = hdlc_change_mtu, | ||
4320 | .ndo_start_xmit = hdlc_start_xmit, | ||
4321 | .ndo_do_ioctl = hdlcdev_ioctl, | ||
4322 | .ndo_tx_timeout = hdlcdev_tx_timeout, | ||
4323 | }; | ||
4324 | |||
4318 | /** | 4325 | /** |
4319 | * called by device driver when adding device instance | 4326 | * called by device driver when adding device instance |
4320 | * do generic HDLC initialization | 4327 | * do generic HDLC initialization |
@@ -4341,11 +4348,8 @@ static int hdlcdev_init(MGSLPC_INFO *info) | |||
4341 | dev->irq = info->irq_level; | 4348 | dev->irq = info->irq_level; |
4342 | 4349 | ||
4343 | /* network layer callbacks and settings */ | 4350 | /* network layer callbacks and settings */ |
4344 | dev->do_ioctl = hdlcdev_ioctl; | 4351 | dev->netdev_ops = &hdlcdev_ops; |
4345 | dev->open = hdlcdev_open; | 4352 | dev->watchdog_timeo = 10 * HZ; |
4346 | dev->stop = hdlcdev_close; | ||
4347 | dev->tx_timeout = hdlcdev_tx_timeout; | ||
4348 | dev->watchdog_timeo = 10*HZ; | ||
4349 | dev->tx_queue_len = 50; | 4353 | dev->tx_queue_len = 50; |
4350 | 4354 | ||
4351 | /* generic HDLC layer callbacks and settings */ | 4355 | /* generic HDLC layer callbacks and settings */ |
diff --git a/drivers/char/synclink.c b/drivers/char/synclink.c index b8063d4cad32..0057a8f58cb1 100644 --- a/drivers/char/synclink.c +++ b/drivers/char/synclink.c | |||
@@ -8007,10 +8007,17 @@ static void hdlcdev_rx(struct mgsl_struct *info, char *buf, int size) | |||
8007 | dev->stats.rx_bytes += size; | 8007 | dev->stats.rx_bytes += size; |
8008 | 8008 | ||
8009 | netif_rx(skb); | 8009 | netif_rx(skb); |
8010 | |||
8011 | dev->last_rx = jiffies; | ||
8012 | } | 8010 | } |
8013 | 8011 | ||
8012 | static const struct net_device_ops hdlcdev_ops = { | ||
8013 | .ndo_open = hdlcdev_open, | ||
8014 | .ndo_stop = hdlcdev_close, | ||
8015 | .ndo_change_mtu = hdlc_change_mtu, | ||
8016 | .ndo_start_xmit = hdlc_start_xmit, | ||
8017 | .ndo_do_ioctl = hdlcdev_ioctl, | ||
8018 | .ndo_tx_timeout = hdlcdev_tx_timeout, | ||
8019 | }; | ||
8020 | |||
8014 | /** | 8021 | /** |
8015 | * called by device driver when adding device instance | 8022 | * called by device driver when adding device instance |
8016 | * do generic HDLC initialization | 8023 | * do generic HDLC initialization |
@@ -8038,11 +8045,8 @@ static int hdlcdev_init(struct mgsl_struct *info) | |||
8038 | dev->dma = info->dma_level; | 8045 | dev->dma = info->dma_level; |
8039 | 8046 | ||
8040 | /* network layer callbacks and settings */ | 8047 | /* network layer callbacks and settings */ |
8041 | dev->do_ioctl = hdlcdev_ioctl; | 8048 | dev->netdev_ops = &hdlcdev_ops; |
8042 | dev->open = hdlcdev_open; | 8049 | dev->watchdog_timeo = 10 * HZ; |
8043 | dev->stop = hdlcdev_close; | ||
8044 | dev->tx_timeout = hdlcdev_tx_timeout; | ||
8045 | dev->watchdog_timeo = 10*HZ; | ||
8046 | dev->tx_queue_len = 50; | 8050 | dev->tx_queue_len = 50; |
8047 | 8051 | ||
8048 | /* generic HDLC layer callbacks and settings */ | 8052 | /* generic HDLC layer callbacks and settings */ |
diff --git a/drivers/char/synclink_gt.c b/drivers/char/synclink_gt.c index f329f459817c..efb3dc928a43 100644 --- a/drivers/char/synclink_gt.c +++ b/drivers/char/synclink_gt.c | |||
@@ -1763,10 +1763,17 @@ static void hdlcdev_rx(struct slgt_info *info, char *buf, int size) | |||
1763 | dev->stats.rx_bytes += size; | 1763 | dev->stats.rx_bytes += size; |
1764 | 1764 | ||
1765 | netif_rx(skb); | 1765 | netif_rx(skb); |
1766 | |||
1767 | dev->last_rx = jiffies; | ||
1768 | } | 1766 | } |
1769 | 1767 | ||
1768 | static const struct net_device_ops hdlcdev_ops = { | ||
1769 | .ndo_open = hdlcdev_open, | ||
1770 | .ndo_stop = hdlcdev_close, | ||
1771 | .ndo_change_mtu = hdlc_change_mtu, | ||
1772 | .ndo_start_xmit = hdlc_start_xmit, | ||
1773 | .ndo_do_ioctl = hdlcdev_ioctl, | ||
1774 | .ndo_tx_timeout = hdlcdev_tx_timeout, | ||
1775 | }; | ||
1776 | |||
1770 | /** | 1777 | /** |
1771 | * called by device driver when adding device instance | 1778 | * called by device driver when adding device instance |
1772 | * do generic HDLC initialization | 1779 | * do generic HDLC initialization |
@@ -1794,11 +1801,8 @@ static int hdlcdev_init(struct slgt_info *info) | |||
1794 | dev->irq = info->irq_level; | 1801 | dev->irq = info->irq_level; |
1795 | 1802 | ||
1796 | /* network layer callbacks and settings */ | 1803 | /* network layer callbacks and settings */ |
1797 | dev->do_ioctl = hdlcdev_ioctl; | 1804 | dev->netdev_ops = &hdlcdev_ops; |
1798 | dev->open = hdlcdev_open; | 1805 | dev->watchdog_timeo = 10 * HZ; |
1799 | dev->stop = hdlcdev_close; | ||
1800 | dev->tx_timeout = hdlcdev_tx_timeout; | ||
1801 | dev->watchdog_timeo = 10*HZ; | ||
1802 | dev->tx_queue_len = 50; | 1806 | dev->tx_queue_len = 50; |
1803 | 1807 | ||
1804 | /* generic HDLC layer callbacks and settings */ | 1808 | /* generic HDLC layer callbacks and settings */ |
diff --git a/drivers/char/synclinkmp.c b/drivers/char/synclinkmp.c index 7b0c5b2dd263..8eb6c89a980e 100644 --- a/drivers/char/synclinkmp.c +++ b/drivers/char/synclinkmp.c | |||
@@ -1907,10 +1907,17 @@ static void hdlcdev_rx(SLMP_INFO *info, char *buf, int size) | |||
1907 | dev->stats.rx_bytes += size; | 1907 | dev->stats.rx_bytes += size; |
1908 | 1908 | ||
1909 | netif_rx(skb); | 1909 | netif_rx(skb); |
1910 | |||
1911 | dev->last_rx = jiffies; | ||
1912 | } | 1910 | } |
1913 | 1911 | ||
1912 | static const struct net_device_ops hdlcdev_ops = { | ||
1913 | .ndo_open = hdlcdev_open, | ||
1914 | .ndo_stop = hdlcdev_close, | ||
1915 | .ndo_change_mtu = hdlc_change_mtu, | ||
1916 | .ndo_start_xmit = hdlc_start_xmit, | ||
1917 | .ndo_do_ioctl = hdlcdev_ioctl, | ||
1918 | .ndo_tx_timeout = hdlcdev_tx_timeout, | ||
1919 | }; | ||
1920 | |||
1914 | /** | 1921 | /** |
1915 | * called by device driver when adding device instance | 1922 | * called by device driver when adding device instance |
1916 | * do generic HDLC initialization | 1923 | * do generic HDLC initialization |
@@ -1938,11 +1945,8 @@ static int hdlcdev_init(SLMP_INFO *info) | |||
1938 | dev->irq = info->irq_level; | 1945 | dev->irq = info->irq_level; |
1939 | 1946 | ||
1940 | /* network layer callbacks and settings */ | 1947 | /* network layer callbacks and settings */ |
1941 | dev->do_ioctl = hdlcdev_ioctl; | 1948 | dev->netdev_ops = &hdlcdev_ops; |
1942 | dev->open = hdlcdev_open; | 1949 | dev->watchdog_timeo = 10 * HZ; |
1943 | dev->stop = hdlcdev_close; | ||
1944 | dev->tx_timeout = hdlcdev_tx_timeout; | ||
1945 | dev->watchdog_timeo = 10*HZ; | ||
1946 | dev->tx_queue_len = 50; | 1950 | dev->tx_queue_len = 50; |
1947 | 1951 | ||
1948 | /* generic HDLC layer callbacks and settings */ | 1952 | /* generic HDLC layer callbacks and settings */ |
diff --git a/drivers/connector/cn_queue.c b/drivers/connector/cn_queue.c index b6fe7e7a2c2f..c769ef269fb5 100644 --- a/drivers/connector/cn_queue.c +++ b/drivers/connector/cn_queue.c | |||
@@ -1,9 +1,9 @@ | |||
1 | /* | 1 | /* |
2 | * cn_queue.c | 2 | * cn_queue.c |
3 | * | 3 | * |
4 | * 2004-2005 Copyright (c) Evgeniy Polyakov <johnpol@2ka.mipt.ru> | 4 | * 2004-2005 Copyright (c) Evgeniy Polyakov <johnpol@2ka.mipt.ru> |
5 | * All rights reserved. | 5 | * All rights reserved. |
6 | * | 6 | * |
7 | * This program is free software; you can redistribute it and/or modify | 7 | * This program is free software; you can redistribute it and/or modify |
8 | * it under the terms of the GNU General Public License as published by | 8 | * it under the terms of the GNU General Public License as published by |
9 | * the Free Software Foundation; either version 2 of the License, or | 9 | * the Free Software Foundation; either version 2 of the License, or |
@@ -31,6 +31,48 @@ | |||
31 | #include <linux/connector.h> | 31 | #include <linux/connector.h> |
32 | #include <linux/delay.h> | 32 | #include <linux/delay.h> |
33 | 33 | ||
34 | |||
35 | /* | ||
36 | * This job is sent to the kevent workqueue. | ||
37 | * While no event is once sent to any callback, the connector workqueue | ||
38 | * is not created to avoid a useless waiting kernel task. | ||
39 | * Once the first event is received, we create this dedicated workqueue which | ||
40 | * is necessary because the flow of data can be high and we don't want | ||
41 | * to encumber keventd with that. | ||
42 | */ | ||
43 | static void cn_queue_create(struct work_struct *work) | ||
44 | { | ||
45 | struct cn_queue_dev *dev; | ||
46 | |||
47 | dev = container_of(work, struct cn_queue_dev, wq_creation); | ||
48 | |||
49 | dev->cn_queue = create_singlethread_workqueue(dev->name); | ||
50 | /* If we fail, we will use keventd for all following connector jobs */ | ||
51 | WARN_ON(!dev->cn_queue); | ||
52 | } | ||
53 | |||
54 | /* | ||
55 | * Queue a data sent to a callback. | ||
56 | * If the connector workqueue is already created, we queue the job on it. | ||
57 | * Otherwise, we queue the job to kevent and queue the connector workqueue | ||
58 | * creation too. | ||
59 | */ | ||
60 | int queue_cn_work(struct cn_callback_entry *cbq, struct work_struct *work) | ||
61 | { | ||
62 | struct cn_queue_dev *pdev = cbq->pdev; | ||
63 | |||
64 | if (likely(pdev->cn_queue)) | ||
65 | return queue_work(pdev->cn_queue, work); | ||
66 | |||
67 | /* Don't create the connector workqueue twice */ | ||
68 | if (atomic_inc_return(&pdev->wq_requested) == 1) | ||
69 | schedule_work(&pdev->wq_creation); | ||
70 | else | ||
71 | atomic_dec(&pdev->wq_requested); | ||
72 | |||
73 | return schedule_work(work); | ||
74 | } | ||
75 | |||
34 | void cn_queue_wrapper(struct work_struct *work) | 76 | void cn_queue_wrapper(struct work_struct *work) |
35 | { | 77 | { |
36 | struct cn_callback_entry *cbq = | 78 | struct cn_callback_entry *cbq = |
@@ -58,14 +100,17 @@ static struct cn_callback_entry *cn_queue_alloc_callback_entry(char *name, struc | |||
58 | snprintf(cbq->id.name, sizeof(cbq->id.name), "%s", name); | 100 | snprintf(cbq->id.name, sizeof(cbq->id.name), "%s", name); |
59 | memcpy(&cbq->id.id, id, sizeof(struct cb_id)); | 101 | memcpy(&cbq->id.id, id, sizeof(struct cb_id)); |
60 | cbq->data.callback = callback; | 102 | cbq->data.callback = callback; |
61 | 103 | ||
62 | INIT_WORK(&cbq->work, &cn_queue_wrapper); | 104 | INIT_WORK(&cbq->work, &cn_queue_wrapper); |
63 | return cbq; | 105 | return cbq; |
64 | } | 106 | } |
65 | 107 | ||
66 | static void cn_queue_free_callback(struct cn_callback_entry *cbq) | 108 | static void cn_queue_free_callback(struct cn_callback_entry *cbq) |
67 | { | 109 | { |
68 | flush_workqueue(cbq->pdev->cn_queue); | 110 | /* The first jobs have been sent to kevent, flush them too */ |
111 | flush_scheduled_work(); | ||
112 | if (cbq->pdev->cn_queue) | ||
113 | flush_workqueue(cbq->pdev->cn_queue); | ||
69 | 114 | ||
70 | kfree(cbq); | 115 | kfree(cbq); |
71 | } | 116 | } |
@@ -143,14 +188,11 @@ struct cn_queue_dev *cn_queue_alloc_dev(char *name, struct sock *nls) | |||
143 | atomic_set(&dev->refcnt, 0); | 188 | atomic_set(&dev->refcnt, 0); |
144 | INIT_LIST_HEAD(&dev->queue_list); | 189 | INIT_LIST_HEAD(&dev->queue_list); |
145 | spin_lock_init(&dev->queue_lock); | 190 | spin_lock_init(&dev->queue_lock); |
191 | init_waitqueue_head(&dev->wq_created); | ||
146 | 192 | ||
147 | dev->nls = nls; | 193 | dev->nls = nls; |
148 | 194 | ||
149 | dev->cn_queue = create_singlethread_workqueue(dev->name); | 195 | INIT_WORK(&dev->wq_creation, cn_queue_create); |
150 | if (!dev->cn_queue) { | ||
151 | kfree(dev); | ||
152 | return NULL; | ||
153 | } | ||
154 | 196 | ||
155 | return dev; | 197 | return dev; |
156 | } | 198 | } |
@@ -158,9 +200,25 @@ struct cn_queue_dev *cn_queue_alloc_dev(char *name, struct sock *nls) | |||
158 | void cn_queue_free_dev(struct cn_queue_dev *dev) | 200 | void cn_queue_free_dev(struct cn_queue_dev *dev) |
159 | { | 201 | { |
160 | struct cn_callback_entry *cbq, *n; | 202 | struct cn_callback_entry *cbq, *n; |
203 | long timeout; | ||
204 | DEFINE_WAIT(wait); | ||
205 | |||
206 | /* Flush the first pending jobs queued on kevent */ | ||
207 | flush_scheduled_work(); | ||
208 | |||
209 | /* If the connector workqueue creation is still pending, wait for it */ | ||
210 | prepare_to_wait(&dev->wq_created, &wait, TASK_UNINTERRUPTIBLE); | ||
211 | if (atomic_read(&dev->wq_requested) && !dev->cn_queue) { | ||
212 | timeout = schedule_timeout(HZ * 2); | ||
213 | if (!timeout && !dev->cn_queue) | ||
214 | WARN_ON(1); | ||
215 | } | ||
216 | finish_wait(&dev->wq_created, &wait); | ||
161 | 217 | ||
162 | flush_workqueue(dev->cn_queue); | 218 | if (dev->cn_queue) { |
163 | destroy_workqueue(dev->cn_queue); | 219 | flush_workqueue(dev->cn_queue); |
220 | destroy_workqueue(dev->cn_queue); | ||
221 | } | ||
164 | 222 | ||
165 | spin_lock_bh(&dev->queue_lock); | 223 | spin_lock_bh(&dev->queue_lock); |
166 | list_for_each_entry_safe(cbq, n, &dev->queue_list, callback_entry) | 224 | list_for_each_entry_safe(cbq, n, &dev->queue_list, callback_entry) |
diff --git a/drivers/connector/connector.c b/drivers/connector/connector.c index bf4830082a13..fd336c5a9057 100644 --- a/drivers/connector/connector.c +++ b/drivers/connector/connector.c | |||
@@ -1,9 +1,9 @@ | |||
1 | /* | 1 | /* |
2 | * connector.c | 2 | * connector.c |
3 | * | 3 | * |
4 | * 2004-2005 Copyright (c) Evgeniy Polyakov <johnpol@2ka.mipt.ru> | 4 | * 2004-2005 Copyright (c) Evgeniy Polyakov <johnpol@2ka.mipt.ru> |
5 | * All rights reserved. | 5 | * All rights reserved. |
6 | * | 6 | * |
7 | * This program is free software; you can redistribute it and/or modify | 7 | * This program is free software; you can redistribute it and/or modify |
8 | * it under the terms of the GNU General Public License as published by | 8 | * it under the terms of the GNU General Public License as published by |
9 | * the Free Software Foundation; either version 2 of the License, or | 9 | * the Free Software Foundation; either version 2 of the License, or |
@@ -145,14 +145,13 @@ static int cn_call_callback(struct cn_msg *msg, void (*destruct_data)(void *), v | |||
145 | __cbq->data.ddata = data; | 145 | __cbq->data.ddata = data; |
146 | __cbq->data.destruct_data = destruct_data; | 146 | __cbq->data.destruct_data = destruct_data; |
147 | 147 | ||
148 | if (queue_work(dev->cbdev->cn_queue, | 148 | if (queue_cn_work(__cbq, &__cbq->work)) |
149 | &__cbq->work)) | ||
150 | err = 0; | 149 | err = 0; |
151 | else | 150 | else |
152 | err = -EINVAL; | 151 | err = -EINVAL; |
153 | } else { | 152 | } else { |
154 | struct cn_callback_data *d; | 153 | struct cn_callback_data *d; |
155 | 154 | ||
156 | err = -ENOMEM; | 155 | err = -ENOMEM; |
157 | __new_cbq = kzalloc(sizeof(struct cn_callback_entry), GFP_ATOMIC); | 156 | __new_cbq = kzalloc(sizeof(struct cn_callback_entry), GFP_ATOMIC); |
158 | if (__new_cbq) { | 157 | if (__new_cbq) { |
@@ -163,10 +162,12 @@ static int cn_call_callback(struct cn_msg *msg, void (*destruct_data)(void *), v | |||
163 | d->destruct_data = destruct_data; | 162 | d->destruct_data = destruct_data; |
164 | d->free = __new_cbq; | 163 | d->free = __new_cbq; |
165 | 164 | ||
165 | __new_cbq->pdev = __cbq->pdev; | ||
166 | |||
166 | INIT_WORK(&__new_cbq->work, | 167 | INIT_WORK(&__new_cbq->work, |
167 | &cn_queue_wrapper); | 168 | &cn_queue_wrapper); |
168 | 169 | ||
169 | if (queue_work(dev->cbdev->cn_queue, | 170 | if (queue_cn_work(__new_cbq, |
170 | &__new_cbq->work)) | 171 | &__new_cbq->work)) |
171 | err = 0; | 172 | err = 0; |
172 | else { | 173 | else { |
@@ -237,7 +238,7 @@ static void cn_notify(struct cb_id *id, u32 notify_event) | |||
237 | 238 | ||
238 | req = (struct cn_notify_req *)ctl->data; | 239 | req = (struct cn_notify_req *)ctl->data; |
239 | for (i = 0; i < ctl->idx_notify_num; ++i, ++req) { | 240 | for (i = 0; i < ctl->idx_notify_num; ++i, ++req) { |
240 | if (id->idx >= req->first && | 241 | if (id->idx >= req->first && |
241 | id->idx < req->first + req->range) { | 242 | id->idx < req->first + req->range) { |
242 | idx_found = 1; | 243 | idx_found = 1; |
243 | break; | 244 | break; |
@@ -245,7 +246,7 @@ static void cn_notify(struct cb_id *id, u32 notify_event) | |||
245 | } | 246 | } |
246 | 247 | ||
247 | for (i = 0; i < ctl->val_notify_num; ++i, ++req) { | 248 | for (i = 0; i < ctl->val_notify_num; ++i, ++req) { |
248 | if (id->val >= req->first && | 249 | if (id->val >= req->first && |
249 | id->val < req->first + req->range) { | 250 | id->val < req->first + req->range) { |
250 | val_found = 1; | 251 | val_found = 1; |
251 | break; | 252 | break; |
@@ -459,7 +460,7 @@ static int __devinit cn_init(void) | |||
459 | netlink_kernel_release(dev->nls); | 460 | netlink_kernel_release(dev->nls); |
460 | return -EINVAL; | 461 | return -EINVAL; |
461 | } | 462 | } |
462 | 463 | ||
463 | cn_already_initialized = 1; | 464 | cn_already_initialized = 1; |
464 | 465 | ||
465 | err = cn_add_callback(&dev->id, "connector", &cn_callback); | 466 | err = cn_add_callback(&dev->id, "connector", &cn_callback); |
diff --git a/drivers/infiniband/hw/cxgb3/cxio_hal.c b/drivers/infiniband/hw/cxgb3/cxio_hal.c index 4dcf08b3fd83..11efd3528ce4 100644 --- a/drivers/infiniband/hw/cxgb3/cxio_hal.c +++ b/drivers/infiniband/hw/cxgb3/cxio_hal.c | |||
@@ -701,6 +701,9 @@ static int __cxio_tpt_op(struct cxio_rdev *rdev_p, u32 reset_tpt_entry, | |||
701 | u32 stag_idx; | 701 | u32 stag_idx; |
702 | u32 wptr; | 702 | u32 wptr; |
703 | 703 | ||
704 | if (rdev_p->flags) | ||
705 | return -EIO; | ||
706 | |||
704 | stag_state = stag_state > 0; | 707 | stag_state = stag_state > 0; |
705 | stag_idx = (*stag) >> 8; | 708 | stag_idx = (*stag) >> 8; |
706 | 709 | ||
diff --git a/drivers/infiniband/hw/cxgb3/cxio_hal.h b/drivers/infiniband/hw/cxgb3/cxio_hal.h index 656fe47bc84f..9ed65b055171 100644 --- a/drivers/infiniband/hw/cxgb3/cxio_hal.h +++ b/drivers/infiniband/hw/cxgb3/cxio_hal.h | |||
@@ -108,6 +108,8 @@ struct cxio_rdev { | |||
108 | struct gen_pool *pbl_pool; | 108 | struct gen_pool *pbl_pool; |
109 | struct gen_pool *rqt_pool; | 109 | struct gen_pool *rqt_pool; |
110 | struct list_head entry; | 110 | struct list_head entry; |
111 | u32 flags; | ||
112 | #define CXIO_ERROR_FATAL 1 | ||
111 | }; | 113 | }; |
112 | 114 | ||
113 | static inline int cxio_num_stags(struct cxio_rdev *rdev_p) | 115 | static inline int cxio_num_stags(struct cxio_rdev *rdev_p) |
diff --git a/drivers/infiniband/hw/cxgb3/iwch.c b/drivers/infiniband/hw/cxgb3/iwch.c index 4489c89d6710..37a4fc264a07 100644 --- a/drivers/infiniband/hw/cxgb3/iwch.c +++ b/drivers/infiniband/hw/cxgb3/iwch.c | |||
@@ -51,13 +51,15 @@ cxgb3_cpl_handler_func t3c_handlers[NUM_CPL_CMDS]; | |||
51 | 51 | ||
52 | static void open_rnic_dev(struct t3cdev *); | 52 | static void open_rnic_dev(struct t3cdev *); |
53 | static void close_rnic_dev(struct t3cdev *); | 53 | static void close_rnic_dev(struct t3cdev *); |
54 | static void iwch_err_handler(struct t3cdev *, u32, u32); | ||
54 | 55 | ||
55 | struct cxgb3_client t3c_client = { | 56 | struct cxgb3_client t3c_client = { |
56 | .name = "iw_cxgb3", | 57 | .name = "iw_cxgb3", |
57 | .add = open_rnic_dev, | 58 | .add = open_rnic_dev, |
58 | .remove = close_rnic_dev, | 59 | .remove = close_rnic_dev, |
59 | .handlers = t3c_handlers, | 60 | .handlers = t3c_handlers, |
60 | .redirect = iwch_ep_redirect | 61 | .redirect = iwch_ep_redirect, |
62 | .err_handler = iwch_err_handler | ||
61 | }; | 63 | }; |
62 | 64 | ||
63 | static LIST_HEAD(dev_list); | 65 | static LIST_HEAD(dev_list); |
@@ -160,6 +162,17 @@ static void close_rnic_dev(struct t3cdev *tdev) | |||
160 | mutex_unlock(&dev_mutex); | 162 | mutex_unlock(&dev_mutex); |
161 | } | 163 | } |
162 | 164 | ||
165 | static void iwch_err_handler(struct t3cdev *tdev, u32 status, u32 error) | ||
166 | { | ||
167 | struct cxio_rdev *rdev = tdev->ulp; | ||
168 | |||
169 | if (status == OFFLOAD_STATUS_DOWN) | ||
170 | rdev->flags = CXIO_ERROR_FATAL; | ||
171 | |||
172 | return; | ||
173 | |||
174 | } | ||
175 | |||
163 | static int __init iwch_init_module(void) | 176 | static int __init iwch_init_module(void) |
164 | { | 177 | { |
165 | int err; | 178 | int err; |
diff --git a/drivers/infiniband/hw/nes/nes_hw.c b/drivers/infiniband/hw/nes/nes_hw.c index 5d139db1b771..53df9de23423 100644 --- a/drivers/infiniband/hw/nes/nes_hw.c +++ b/drivers/infiniband/hw/nes/nes_hw.c | |||
@@ -2541,7 +2541,7 @@ static void nes_nic_napi_ce_handler(struct nes_device *nesdev, struct nes_hw_nic | |||
2541 | { | 2541 | { |
2542 | struct nes_vnic *nesvnic = container_of(cq, struct nes_vnic, nic_cq); | 2542 | struct nes_vnic *nesvnic = container_of(cq, struct nes_vnic, nic_cq); |
2543 | 2543 | ||
2544 | netif_rx_schedule(&nesvnic->napi); | 2544 | napi_schedule(&nesvnic->napi); |
2545 | } | 2545 | } |
2546 | 2546 | ||
2547 | 2547 | ||
diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c index 57a47cf7e513..f5484ad1279b 100644 --- a/drivers/infiniband/hw/nes/nes_nic.c +++ b/drivers/infiniband/hw/nes/nes_nic.c | |||
@@ -111,7 +111,7 @@ static int nes_netdev_poll(struct napi_struct *napi, int budget) | |||
111 | nes_nic_ce_handler(nesdev, nescq); | 111 | nes_nic_ce_handler(nesdev, nescq); |
112 | 112 | ||
113 | if (nescq->cqes_pending == 0) { | 113 | if (nescq->cqes_pending == 0) { |
114 | netif_rx_complete(napi); | 114 | napi_complete(napi); |
115 | /* clear out completed cqes and arm */ | 115 | /* clear out completed cqes and arm */ |
116 | nes_write32(nesdev->regs+NES_CQE_ALLOC, NES_CQE_ALLOC_NOTIFY_NEXT | | 116 | nes_write32(nesdev->regs+NES_CQE_ALLOC, NES_CQE_ALLOC_NOTIFY_NEXT | |
117 | nescq->cq_number | (nescq->cqe_allocs_pending << 16)); | 117 | nescq->cq_number | (nescq->cqe_allocs_pending << 16)); |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c index a1925810be3c..da6082739839 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c | |||
@@ -446,11 +446,11 @@ poll_more: | |||
446 | if (dev->features & NETIF_F_LRO) | 446 | if (dev->features & NETIF_F_LRO) |
447 | lro_flush_all(&priv->lro.lro_mgr); | 447 | lro_flush_all(&priv->lro.lro_mgr); |
448 | 448 | ||
449 | netif_rx_complete(napi); | 449 | napi_complete(napi); |
450 | if (unlikely(ib_req_notify_cq(priv->recv_cq, | 450 | if (unlikely(ib_req_notify_cq(priv->recv_cq, |
451 | IB_CQ_NEXT_COMP | | 451 | IB_CQ_NEXT_COMP | |
452 | IB_CQ_REPORT_MISSED_EVENTS)) && | 452 | IB_CQ_REPORT_MISSED_EVENTS)) && |
453 | netif_rx_reschedule(napi)) | 453 | napi_reschedule(napi)) |
454 | goto poll_more; | 454 | goto poll_more; |
455 | } | 455 | } |
456 | 456 | ||
@@ -462,7 +462,7 @@ void ipoib_ib_completion(struct ib_cq *cq, void *dev_ptr) | |||
462 | struct net_device *dev = dev_ptr; | 462 | struct net_device *dev = dev_ptr; |
463 | struct ipoib_dev_priv *priv = netdev_priv(dev); | 463 | struct ipoib_dev_priv *priv = netdev_priv(dev); |
464 | 464 | ||
465 | netif_rx_schedule(&priv->napi); | 465 | napi_schedule(&priv->napi); |
466 | } | 466 | } |
467 | 467 | ||
468 | static void drain_tx_cq(struct net_device *dev) | 468 | static void drain_tx_cq(struct net_device *dev) |
diff --git a/drivers/net/3c501.c b/drivers/net/3c501.c index 3d1318a3e688..1c5344aa57cc 100644 --- a/drivers/net/3c501.c +++ b/drivers/net/3c501.c | |||
@@ -197,6 +197,17 @@ out: | |||
197 | return ERR_PTR(err); | 197 | return ERR_PTR(err); |
198 | } | 198 | } |
199 | 199 | ||
200 | static const struct net_device_ops el_netdev_ops = { | ||
201 | .ndo_open = el_open, | ||
202 | .ndo_stop = el1_close, | ||
203 | .ndo_start_xmit = el_start_xmit, | ||
204 | .ndo_tx_timeout = el_timeout, | ||
205 | .ndo_set_multicast_list = set_multicast_list, | ||
206 | .ndo_change_mtu = eth_change_mtu, | ||
207 | .ndo_set_mac_address = eth_mac_addr, | ||
208 | .ndo_validate_addr = eth_validate_addr, | ||
209 | }; | ||
210 | |||
200 | /** | 211 | /** |
201 | * el1_probe1: | 212 | * el1_probe1: |
202 | * @dev: The device structure to use | 213 | * @dev: The device structure to use |
@@ -305,12 +316,8 @@ static int __init el1_probe1(struct net_device *dev, int ioaddr) | |||
305 | * The EL1-specific entries in the device structure. | 316 | * The EL1-specific entries in the device structure. |
306 | */ | 317 | */ |
307 | 318 | ||
308 | dev->open = &el_open; | 319 | dev->netdev_ops = &el_netdev_ops; |
309 | dev->hard_start_xmit = &el_start_xmit; | ||
310 | dev->tx_timeout = &el_timeout; | ||
311 | dev->watchdog_timeo = HZ; | 320 | dev->watchdog_timeo = HZ; |
312 | dev->stop = &el1_close; | ||
313 | dev->set_multicast_list = &set_multicast_list; | ||
314 | dev->ethtool_ops = &netdev_ethtool_ops; | 321 | dev->ethtool_ops = &netdev_ethtool_ops; |
315 | return 0; | 322 | return 0; |
316 | } | 323 | } |
diff --git a/drivers/net/3c505.c b/drivers/net/3c505.c index 6124605bef05..ea1ad8ce8836 100644 --- a/drivers/net/3c505.c +++ b/drivers/net/3c505.c | |||
@@ -1348,6 +1348,17 @@ static int __init elp_autodetect(struct net_device *dev) | |||
1348 | return 0; /* Because of this, the layer above will return -ENODEV */ | 1348 | return 0; /* Because of this, the layer above will return -ENODEV */ |
1349 | } | 1349 | } |
1350 | 1350 | ||
1351 | static const struct net_device_ops elp_netdev_ops = { | ||
1352 | .ndo_open = elp_open, | ||
1353 | .ndo_stop = elp_close, | ||
1354 | .ndo_get_stats = elp_get_stats, | ||
1355 | .ndo_start_xmit = elp_start_xmit, | ||
1356 | .ndo_tx_timeout = elp_timeout, | ||
1357 | .ndo_set_multicast_list = elp_set_mc_list, | ||
1358 | .ndo_change_mtu = eth_change_mtu, | ||
1359 | .ndo_set_mac_address = eth_mac_addr, | ||
1360 | .ndo_validate_addr = eth_validate_addr, | ||
1361 | }; | ||
1351 | 1362 | ||
1352 | /****************************************************** | 1363 | /****************************************************** |
1353 | * | 1364 | * |
@@ -1552,13 +1563,8 @@ static int __init elplus_setup(struct net_device *dev) | |||
1552 | printk(KERN_ERR "%s: adapter configuration failed\n", dev->name); | 1563 | printk(KERN_ERR "%s: adapter configuration failed\n", dev->name); |
1553 | } | 1564 | } |
1554 | 1565 | ||
1555 | dev->open = elp_open; /* local */ | 1566 | dev->netdev_ops = &elp_netdev_ops; |
1556 | dev->stop = elp_close; /* local */ | ||
1557 | dev->get_stats = elp_get_stats; /* local */ | ||
1558 | dev->hard_start_xmit = elp_start_xmit; /* local */ | ||
1559 | dev->tx_timeout = elp_timeout; /* local */ | ||
1560 | dev->watchdog_timeo = 10*HZ; | 1567 | dev->watchdog_timeo = 10*HZ; |
1561 | dev->set_multicast_list = elp_set_mc_list; /* local */ | ||
1562 | dev->ethtool_ops = &netdev_ethtool_ops; /* local */ | 1568 | dev->ethtool_ops = &netdev_ethtool_ops; /* local */ |
1563 | 1569 | ||
1564 | dev->mem_start = dev->mem_end = 0; | 1570 | dev->mem_start = dev->mem_end = 0; |
diff --git a/drivers/net/3c507.c b/drivers/net/3c507.c index 423e65d0ba73..fbbaf826deff 100644 --- a/drivers/net/3c507.c +++ b/drivers/net/3c507.c | |||
@@ -352,6 +352,16 @@ out: | |||
352 | return ERR_PTR(err); | 352 | return ERR_PTR(err); |
353 | } | 353 | } |
354 | 354 | ||
355 | static const struct net_device_ops netdev_ops = { | ||
356 | .ndo_open = el16_open, | ||
357 | .ndo_stop = el16_close, | ||
358 | .ndo_start_xmit = el16_send_packet, | ||
359 | .ndo_tx_timeout = el16_tx_timeout, | ||
360 | .ndo_change_mtu = eth_change_mtu, | ||
361 | .ndo_set_mac_address = eth_mac_addr, | ||
362 | .ndo_validate_addr = eth_validate_addr, | ||
363 | }; | ||
364 | |||
355 | static int __init el16_probe1(struct net_device *dev, int ioaddr) | 365 | static int __init el16_probe1(struct net_device *dev, int ioaddr) |
356 | { | 366 | { |
357 | static unsigned char init_ID_done, version_printed; | 367 | static unsigned char init_ID_done, version_printed; |
@@ -449,10 +459,7 @@ static int __init el16_probe1(struct net_device *dev, int ioaddr) | |||
449 | goto out1; | 459 | goto out1; |
450 | } | 460 | } |
451 | 461 | ||
452 | dev->open = el16_open; | 462 | dev->netdev_ops = &netdev_ops; |
453 | dev->stop = el16_close; | ||
454 | dev->hard_start_xmit = el16_send_packet; | ||
455 | dev->tx_timeout = el16_tx_timeout; | ||
456 | dev->watchdog_timeo = TX_TIMEOUT; | 463 | dev->watchdog_timeo = TX_TIMEOUT; |
457 | dev->ethtool_ops = &netdev_ethtool_ops; | 464 | dev->ethtool_ops = &netdev_ethtool_ops; |
458 | dev->flags &= ~IFF_MULTICAST; /* Multicast doesn't work */ | 465 | dev->flags &= ~IFF_MULTICAST; /* Multicast doesn't work */ |
diff --git a/drivers/net/3c509.c b/drivers/net/3c509.c index 535c234286ea..d58919c7032e 100644 --- a/drivers/net/3c509.c +++ b/drivers/net/3c509.c | |||
@@ -537,6 +537,21 @@ static struct mca_driver el3_mca_driver = { | |||
537 | static int mca_registered; | 537 | static int mca_registered; |
538 | #endif /* CONFIG_MCA */ | 538 | #endif /* CONFIG_MCA */ |
539 | 539 | ||
540 | static const struct net_device_ops netdev_ops = { | ||
541 | .ndo_open = el3_open, | ||
542 | .ndo_stop = el3_close, | ||
543 | .ndo_start_xmit = el3_start_xmit, | ||
544 | .ndo_get_stats = el3_get_stats, | ||
545 | .ndo_set_multicast_list = set_multicast_list, | ||
546 | .ndo_tx_timeout = el3_tx_timeout, | ||
547 | .ndo_change_mtu = eth_change_mtu, | ||
548 | .ndo_set_mac_address = eth_mac_addr, | ||
549 | .ndo_validate_addr = eth_validate_addr, | ||
550 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
551 | .ndo_poll_controller = el3_poll_controller, | ||
552 | #endif | ||
553 | }; | ||
554 | |||
540 | static int __devinit el3_common_init(struct net_device *dev) | 555 | static int __devinit el3_common_init(struct net_device *dev) |
541 | { | 556 | { |
542 | struct el3_private *lp = netdev_priv(dev); | 557 | struct el3_private *lp = netdev_priv(dev); |
@@ -553,16 +568,8 @@ static int __devinit el3_common_init(struct net_device *dev) | |||
553 | } | 568 | } |
554 | 569 | ||
555 | /* The EL3-specific entries in the device structure. */ | 570 | /* The EL3-specific entries in the device structure. */ |
556 | dev->open = &el3_open; | 571 | dev->netdev_ops = &netdev_ops; |
557 | dev->hard_start_xmit = &el3_start_xmit; | ||
558 | dev->stop = &el3_close; | ||
559 | dev->get_stats = &el3_get_stats; | ||
560 | dev->set_multicast_list = &set_multicast_list; | ||
561 | dev->tx_timeout = el3_tx_timeout; | ||
562 | dev->watchdog_timeo = TX_TIMEOUT; | 572 | dev->watchdog_timeo = TX_TIMEOUT; |
563 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
564 | dev->poll_controller = el3_poll_controller; | ||
565 | #endif | ||
566 | SET_ETHTOOL_OPS(dev, ðtool_ops); | 573 | SET_ETHTOOL_OPS(dev, ðtool_ops); |
567 | 574 | ||
568 | err = register_netdev(dev); | 575 | err = register_netdev(dev); |
diff --git a/drivers/net/3c515.c b/drivers/net/3c515.c index 39ac12233aa7..167bf23066ea 100644 --- a/drivers/net/3c515.c +++ b/drivers/net/3c515.c | |||
@@ -563,6 +563,20 @@ no_pnp: | |||
563 | return NULL; | 563 | return NULL; |
564 | } | 564 | } |
565 | 565 | ||
566 | |||
567 | static const struct net_device_ops netdev_ops = { | ||
568 | .ndo_open = corkscrew_open, | ||
569 | .ndo_stop = corkscrew_close, | ||
570 | .ndo_start_xmit = corkscrew_start_xmit, | ||
571 | .ndo_tx_timeout = corkscrew_timeout, | ||
572 | .ndo_get_stats = corkscrew_get_stats, | ||
573 | .ndo_set_multicast_list = set_rx_mode, | ||
574 | .ndo_change_mtu = eth_change_mtu, | ||
575 | .ndo_set_mac_address = eth_mac_addr, | ||
576 | .ndo_validate_addr = eth_validate_addr, | ||
577 | }; | ||
578 | |||
579 | |||
566 | static int corkscrew_setup(struct net_device *dev, int ioaddr, | 580 | static int corkscrew_setup(struct net_device *dev, int ioaddr, |
567 | struct pnp_dev *idev, int card_number) | 581 | struct pnp_dev *idev, int card_number) |
568 | { | 582 | { |
@@ -681,13 +695,8 @@ static int corkscrew_setup(struct net_device *dev, int ioaddr, | |||
681 | vp->full_bus_master_rx = (vp->capabilities & 0x20) ? 1 : 0; | 695 | vp->full_bus_master_rx = (vp->capabilities & 0x20) ? 1 : 0; |
682 | 696 | ||
683 | /* The 3c51x-specific entries in the device structure. */ | 697 | /* The 3c51x-specific entries in the device structure. */ |
684 | dev->open = &corkscrew_open; | 698 | dev->netdev_ops = &netdev_ops; |
685 | dev->hard_start_xmit = &corkscrew_start_xmit; | ||
686 | dev->tx_timeout = &corkscrew_timeout; | ||
687 | dev->watchdog_timeo = (400 * HZ) / 1000; | 699 | dev->watchdog_timeo = (400 * HZ) / 1000; |
688 | dev->stop = &corkscrew_close; | ||
689 | dev->get_stats = &corkscrew_get_stats; | ||
690 | dev->set_multicast_list = &set_rx_mode; | ||
691 | dev->ethtool_ops = &netdev_ethtool_ops; | 700 | dev->ethtool_ops = &netdev_ethtool_ops; |
692 | 701 | ||
693 | return register_netdev(dev); | 702 | return register_netdev(dev); |
diff --git a/drivers/net/3c523.c b/drivers/net/3c523.c index ff41e1ff5603..8f734d74b513 100644 --- a/drivers/net/3c523.c +++ b/drivers/net/3c523.c | |||
@@ -403,6 +403,20 @@ static int elmc_getinfo(char *buf, int slot, void *d) | |||
403 | return len; | 403 | return len; |
404 | } /* elmc_getinfo() */ | 404 | } /* elmc_getinfo() */ |
405 | 405 | ||
406 | static const struct net_device_ops netdev_ops = { | ||
407 | .ndo_open = elmc_open, | ||
408 | .ndo_stop = elmc_close, | ||
409 | .ndo_get_stats = elmc_get_stats, | ||
410 | .ndo_start_xmit = elmc_send_packet, | ||
411 | .ndo_tx_timeout = elmc_timeout, | ||
412 | #ifdef ELMC_MULTICAST | ||
413 | .ndo_set_multicast_list = set_multicast_list, | ||
414 | #endif | ||
415 | .ndo_change_mtu = eth_change_mtu, | ||
416 | .ndo_set_mac_address = eth_mac_addr, | ||
417 | .ndo_validate_addr = eth_validate_addr, | ||
418 | }; | ||
419 | |||
406 | /*****************************************************************/ | 420 | /*****************************************************************/ |
407 | 421 | ||
408 | static int __init do_elmc_probe(struct net_device *dev) | 422 | static int __init do_elmc_probe(struct net_device *dev) |
@@ -544,17 +558,8 @@ static int __init do_elmc_probe(struct net_device *dev) | |||
544 | printk(KERN_INFO "%s: hardware address %pM\n", | 558 | printk(KERN_INFO "%s: hardware address %pM\n", |
545 | dev->name, dev->dev_addr); | 559 | dev->name, dev->dev_addr); |
546 | 560 | ||
547 | dev->open = &elmc_open; | 561 | dev->netdev_ops = &netdev_ops; |
548 | dev->stop = &elmc_close; | ||
549 | dev->get_stats = &elmc_get_stats; | ||
550 | dev->hard_start_xmit = &elmc_send_packet; | ||
551 | dev->tx_timeout = &elmc_timeout; | ||
552 | dev->watchdog_timeo = HZ; | 562 | dev->watchdog_timeo = HZ; |
553 | #ifdef ELMC_MULTICAST | ||
554 | dev->set_multicast_list = &set_multicast_list; | ||
555 | #else | ||
556 | dev->set_multicast_list = NULL; | ||
557 | #endif | ||
558 | dev->ethtool_ops = &netdev_ethtool_ops; | 563 | dev->ethtool_ops = &netdev_ethtool_ops; |
559 | 564 | ||
560 | /* note that we haven't actually requested the IRQ from the kernel. | 565 | /* note that we haven't actually requested the IRQ from the kernel. |
diff --git a/drivers/net/3c527.c b/drivers/net/3c527.c index 2df3af3b9b20..b61073c42bf8 100644 --- a/drivers/net/3c527.c +++ b/drivers/net/3c527.c | |||
@@ -288,6 +288,18 @@ struct net_device *__init mc32_probe(int unit) | |||
288 | return ERR_PTR(-ENODEV); | 288 | return ERR_PTR(-ENODEV); |
289 | } | 289 | } |
290 | 290 | ||
291 | static const struct net_device_ops netdev_ops = { | ||
292 | .ndo_open = mc32_open, | ||
293 | .ndo_stop = mc32_close, | ||
294 | .ndo_start_xmit = mc32_send_packet, | ||
295 | .ndo_get_stats = mc32_get_stats, | ||
296 | .ndo_set_multicast_list = mc32_set_multicast_list, | ||
297 | .ndo_tx_timeout = mc32_timeout, | ||
298 | .ndo_change_mtu = eth_change_mtu, | ||
299 | .ndo_set_mac_address = eth_mac_addr, | ||
300 | .ndo_validate_addr = eth_validate_addr, | ||
301 | }; | ||
302 | |||
291 | /** | 303 | /** |
292 | * mc32_probe1 - Check a given slot for a board and test the card | 304 | * mc32_probe1 - Check a given slot for a board and test the card |
293 | * @dev: Device structure to fill in | 305 | * @dev: Device structure to fill in |
@@ -518,12 +530,7 @@ static int __init mc32_probe1(struct net_device *dev, int slot) | |||
518 | printk("%s: Firmware Rev %d. %d RX buffers, %d TX buffers. Base of 0x%08X.\n", | 530 | printk("%s: Firmware Rev %d. %d RX buffers, %d TX buffers. Base of 0x%08X.\n", |
519 | dev->name, lp->exec_box->data[12], lp->rx_len, lp->tx_len, lp->base); | 531 | dev->name, lp->exec_box->data[12], lp->rx_len, lp->tx_len, lp->base); |
520 | 532 | ||
521 | dev->open = mc32_open; | 533 | dev->netdev_ops = &netdev_ops; |
522 | dev->stop = mc32_close; | ||
523 | dev->hard_start_xmit = mc32_send_packet; | ||
524 | dev->get_stats = mc32_get_stats; | ||
525 | dev->set_multicast_list = mc32_set_multicast_list; | ||
526 | dev->tx_timeout = mc32_timeout; | ||
527 | dev->watchdog_timeo = HZ*5; /* Board does all the work */ | 534 | dev->watchdog_timeo = HZ*5; /* Board does all the work */ |
528 | dev->ethtool_ops = &netdev_ethtool_ops; | 535 | dev->ethtool_ops = &netdev_ethtool_ops; |
529 | 536 | ||
diff --git a/drivers/net/3c59x.c b/drivers/net/3c59x.c index cdbbb6226fc5..b2563d384cf2 100644 --- a/drivers/net/3c59x.c +++ b/drivers/net/3c59x.c | |||
@@ -992,6 +992,42 @@ out: | |||
992 | return rc; | 992 | return rc; |
993 | } | 993 | } |
994 | 994 | ||
995 | static const struct net_device_ops boomrang_netdev_ops = { | ||
996 | .ndo_open = vortex_open, | ||
997 | .ndo_stop = vortex_close, | ||
998 | .ndo_start_xmit = boomerang_start_xmit, | ||
999 | .ndo_tx_timeout = vortex_tx_timeout, | ||
1000 | .ndo_get_stats = vortex_get_stats, | ||
1001 | #ifdef CONFIG_PCI | ||
1002 | .ndo_do_ioctl = vortex_ioctl, | ||
1003 | #endif | ||
1004 | .ndo_set_multicast_list = set_rx_mode, | ||
1005 | .ndo_change_mtu = eth_change_mtu, | ||
1006 | .ndo_set_mac_address = eth_mac_addr, | ||
1007 | .ndo_validate_addr = eth_validate_addr, | ||
1008 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
1009 | .ndo_poll_controller = poll_vortex, | ||
1010 | #endif | ||
1011 | }; | ||
1012 | |||
1013 | static const struct net_device_ops vortex_netdev_ops = { | ||
1014 | .ndo_open = vortex_open, | ||
1015 | .ndo_stop = vortex_close, | ||
1016 | .ndo_start_xmit = vortex_start_xmit, | ||
1017 | .ndo_tx_timeout = vortex_tx_timeout, | ||
1018 | .ndo_get_stats = vortex_get_stats, | ||
1019 | #ifdef CONFIG_PCI | ||
1020 | .ndo_do_ioctl = vortex_ioctl, | ||
1021 | #endif | ||
1022 | .ndo_set_multicast_list = set_rx_mode, | ||
1023 | .ndo_change_mtu = eth_change_mtu, | ||
1024 | .ndo_set_mac_address = eth_mac_addr, | ||
1025 | .ndo_validate_addr = eth_validate_addr, | ||
1026 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
1027 | .ndo_poll_controller = poll_vortex, | ||
1028 | #endif | ||
1029 | }; | ||
1030 | |||
995 | /* | 1031 | /* |
996 | * Start up the PCI/EISA device which is described by *gendev. | 1032 | * Start up the PCI/EISA device which is described by *gendev. |
997 | * Return 0 on success. | 1033 | * Return 0 on success. |
@@ -1366,18 +1402,16 @@ static int __devinit vortex_probe1(struct device *gendev, | |||
1366 | } | 1402 | } |
1367 | 1403 | ||
1368 | /* The 3c59x-specific entries in the device structure. */ | 1404 | /* The 3c59x-specific entries in the device structure. */ |
1369 | dev->open = vortex_open; | ||
1370 | if (vp->full_bus_master_tx) { | 1405 | if (vp->full_bus_master_tx) { |
1371 | dev->hard_start_xmit = boomerang_start_xmit; | 1406 | dev->netdev_ops = &boomrang_netdev_ops; |
1372 | /* Actually, it still should work with iommu. */ | 1407 | /* Actually, it still should work with iommu. */ |
1373 | if (card_idx < MAX_UNITS && | 1408 | if (card_idx < MAX_UNITS && |
1374 | ((hw_checksums[card_idx] == -1 && (vp->drv_flags & HAS_HWCKSM)) || | 1409 | ((hw_checksums[card_idx] == -1 && (vp->drv_flags & HAS_HWCKSM)) || |
1375 | hw_checksums[card_idx] == 1)) { | 1410 | hw_checksums[card_idx] == 1)) { |
1376 | dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG; | 1411 | dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG; |
1377 | } | 1412 | } |
1378 | } else { | 1413 | } else |
1379 | dev->hard_start_xmit = vortex_start_xmit; | 1414 | dev->netdev_ops = &vortex_netdev_ops; |
1380 | } | ||
1381 | 1415 | ||
1382 | if (print_info) { | 1416 | if (print_info) { |
1383 | printk(KERN_INFO "%s: scatter/gather %sabled. h/w checksums %sabled\n", | 1417 | printk(KERN_INFO "%s: scatter/gather %sabled. h/w checksums %sabled\n", |
@@ -1386,18 +1420,9 @@ static int __devinit vortex_probe1(struct device *gendev, | |||
1386 | (dev->features & NETIF_F_IP_CSUM) ? "en":"dis"); | 1420 | (dev->features & NETIF_F_IP_CSUM) ? "en":"dis"); |
1387 | } | 1421 | } |
1388 | 1422 | ||
1389 | dev->stop = vortex_close; | ||
1390 | dev->get_stats = vortex_get_stats; | ||
1391 | #ifdef CONFIG_PCI | ||
1392 | dev->do_ioctl = vortex_ioctl; | ||
1393 | #endif | ||
1394 | dev->ethtool_ops = &vortex_ethtool_ops; | 1423 | dev->ethtool_ops = &vortex_ethtool_ops; |
1395 | dev->set_multicast_list = set_rx_mode; | ||
1396 | dev->tx_timeout = vortex_tx_timeout; | ||
1397 | dev->watchdog_timeo = (watchdog * HZ) / 1000; | 1424 | dev->watchdog_timeo = (watchdog * HZ) / 1000; |
1398 | #ifdef CONFIG_NET_POLL_CONTROLLER | 1425 | |
1399 | dev->poll_controller = poll_vortex; | ||
1400 | #endif | ||
1401 | if (pdev) { | 1426 | if (pdev) { |
1402 | vp->pm_state_valid = 1; | 1427 | vp->pm_state_valid = 1; |
1403 | pci_save_state(VORTEX_PCI(vp)); | 1428 | pci_save_state(VORTEX_PCI(vp)); |
diff --git a/drivers/net/8139cp.c b/drivers/net/8139cp.c index 4e19ae3ce6be..35517b06ec3f 100644 --- a/drivers/net/8139cp.c +++ b/drivers/net/8139cp.c | |||
@@ -604,7 +604,7 @@ rx_next: | |||
604 | 604 | ||
605 | spin_lock_irqsave(&cp->lock, flags); | 605 | spin_lock_irqsave(&cp->lock, flags); |
606 | cpw16_f(IntrMask, cp_intr_mask); | 606 | cpw16_f(IntrMask, cp_intr_mask); |
607 | __netif_rx_complete(napi); | 607 | __napi_complete(napi); |
608 | spin_unlock_irqrestore(&cp->lock, flags); | 608 | spin_unlock_irqrestore(&cp->lock, flags); |
609 | } | 609 | } |
610 | 610 | ||
@@ -641,9 +641,9 @@ static irqreturn_t cp_interrupt (int irq, void *dev_instance) | |||
641 | } | 641 | } |
642 | 642 | ||
643 | if (status & (RxOK | RxErr | RxEmpty | RxFIFOOvr)) | 643 | if (status & (RxOK | RxErr | RxEmpty | RxFIFOOvr)) |
644 | if (netif_rx_schedule_prep(&cp->napi)) { | 644 | if (napi_schedule_prep(&cp->napi)) { |
645 | cpw16_f(IntrMask, cp_norx_intr_mask); | 645 | cpw16_f(IntrMask, cp_norx_intr_mask); |
646 | __netif_rx_schedule(&cp->napi); | 646 | __napi_schedule(&cp->napi); |
647 | } | 647 | } |
648 | 648 | ||
649 | if (status & (TxOK | TxErr | TxEmpty | SWInt)) | 649 | if (status & (TxOK | TxErr | TxEmpty | SWInt)) |
diff --git a/drivers/net/8139too.c b/drivers/net/8139too.c index a5b24202d564..5341da604e84 100644 --- a/drivers/net/8139too.c +++ b/drivers/net/8139too.c | |||
@@ -2128,7 +2128,7 @@ static int rtl8139_poll(struct napi_struct *napi, int budget) | |||
2128 | */ | 2128 | */ |
2129 | spin_lock_irqsave(&tp->lock, flags); | 2129 | spin_lock_irqsave(&tp->lock, flags); |
2130 | RTL_W16_F(IntrMask, rtl8139_intr_mask); | 2130 | RTL_W16_F(IntrMask, rtl8139_intr_mask); |
2131 | __netif_rx_complete(napi); | 2131 | __napi_complete(napi); |
2132 | spin_unlock_irqrestore(&tp->lock, flags); | 2132 | spin_unlock_irqrestore(&tp->lock, flags); |
2133 | } | 2133 | } |
2134 | spin_unlock(&tp->rx_lock); | 2134 | spin_unlock(&tp->rx_lock); |
@@ -2178,9 +2178,9 @@ static irqreturn_t rtl8139_interrupt (int irq, void *dev_instance) | |||
2178 | /* Receive packets are processed by poll routine. | 2178 | /* Receive packets are processed by poll routine. |
2179 | If not running start it now. */ | 2179 | If not running start it now. */ |
2180 | if (status & RxAckBits){ | 2180 | if (status & RxAckBits){ |
2181 | if (netif_rx_schedule_prep(&tp->napi)) { | 2181 | if (napi_schedule_prep(&tp->napi)) { |
2182 | RTL_W16_F (IntrMask, rtl8139_norx_intr_mask); | 2182 | RTL_W16_F (IntrMask, rtl8139_norx_intr_mask); |
2183 | __netif_rx_schedule(&tp->napi); | 2183 | __napi_schedule(&tp->napi); |
2184 | } | 2184 | } |
2185 | } | 2185 | } |
2186 | 2186 | ||
diff --git a/drivers/net/82596.c b/drivers/net/82596.c index b273596368e3..cca94b9c08ae 100644 --- a/drivers/net/82596.c +++ b/drivers/net/82596.c | |||
@@ -1122,6 +1122,17 @@ static void print_eth(unsigned char *add, char *str) | |||
1122 | static int io = 0x300; | 1122 | static int io = 0x300; |
1123 | static int irq = 10; | 1123 | static int irq = 10; |
1124 | 1124 | ||
1125 | static const struct net_device_ops i596_netdev_ops = { | ||
1126 | .ndo_open = i596_open, | ||
1127 | .ndo_stop = i596_close, | ||
1128 | .ndo_start_xmit = i596_start_xmit, | ||
1129 | .ndo_set_multicast_list = set_multicast_list, | ||
1130 | .ndo_tx_timeout = i596_tx_timeout, | ||
1131 | .ndo_change_mtu = eth_change_mtu, | ||
1132 | .ndo_set_mac_address = eth_mac_addr, | ||
1133 | .ndo_validate_addr = eth_validate_addr, | ||
1134 | }; | ||
1135 | |||
1125 | struct net_device * __init i82596_probe(int unit) | 1136 | struct net_device * __init i82596_probe(int unit) |
1126 | { | 1137 | { |
1127 | struct net_device *dev; | 1138 | struct net_device *dev; |
@@ -1232,11 +1243,7 @@ found: | |||
1232 | DEB(DEB_PROBE,printk(KERN_INFO "%s", version)); | 1243 | DEB(DEB_PROBE,printk(KERN_INFO "%s", version)); |
1233 | 1244 | ||
1234 | /* The 82596-specific entries in the device structure. */ | 1245 | /* The 82596-specific entries in the device structure. */ |
1235 | dev->open = i596_open; | 1246 | dev->netdev_ops = &i596_netdev_ops; |
1236 | dev->stop = i596_close; | ||
1237 | dev->hard_start_xmit = i596_start_xmit; | ||
1238 | dev->set_multicast_list = set_multicast_list; | ||
1239 | dev->tx_timeout = i596_tx_timeout; | ||
1240 | dev->watchdog_timeo = TX_TIMEOUT; | 1247 | dev->watchdog_timeo = TX_TIMEOUT; |
1241 | 1248 | ||
1242 | dev->ml_priv = (void *)(dev->mem_start); | 1249 | dev->ml_priv = (void *)(dev->mem_start); |
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index 6bdfd47d679d..49f4d50abc56 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig | |||
@@ -1829,10 +1829,10 @@ config 68360_ENET | |||
1829 | 1829 | ||
1830 | config FEC | 1830 | config FEC |
1831 | bool "FEC ethernet controller (of ColdFire CPUs)" | 1831 | bool "FEC ethernet controller (of ColdFire CPUs)" |
1832 | depends on M523x || M527x || M5272 || M528x || M520x || M532x | 1832 | depends on M523x || M527x || M5272 || M528x || M520x || M532x || MACH_MX27 |
1833 | help | 1833 | help |
1834 | Say Y here if you want to use the built-in 10/100 Fast ethernet | 1834 | Say Y here if you want to use the built-in 10/100 Fast ethernet |
1835 | controller on some Motorola ColdFire processors. | 1835 | controller on some Motorola ColdFire and Freescale i.MX processors. |
1836 | 1836 | ||
1837 | config FEC2 | 1837 | config FEC2 |
1838 | bool "Second FEC ethernet controller (on some ColdFire CPUs)" | 1838 | bool "Second FEC ethernet controller (on some ColdFire CPUs)" |
@@ -2022,7 +2022,6 @@ config IGB | |||
2022 | config IGB_LRO | 2022 | config IGB_LRO |
2023 | bool "Use software LRO" | 2023 | bool "Use software LRO" |
2024 | depends on IGB && INET | 2024 | depends on IGB && INET |
2025 | select INET_LRO | ||
2026 | ---help--- | 2025 | ---help--- |
2027 | Say Y here if you want to use large receive offload. | 2026 | Say Y here if you want to use large receive offload. |
2028 | 2027 | ||
@@ -2408,7 +2407,6 @@ config CHELSIO_T3 | |||
2408 | tristate "Chelsio Communications T3 10Gb Ethernet support" | 2407 | tristate "Chelsio Communications T3 10Gb Ethernet support" |
2409 | depends on CHELSIO_T3_DEPENDS | 2408 | depends on CHELSIO_T3_DEPENDS |
2410 | select FW_LOADER | 2409 | select FW_LOADER |
2411 | select INET_LRO | ||
2412 | help | 2410 | help |
2413 | This driver supports Chelsio T3-based gigabit and 10Gb Ethernet | 2411 | This driver supports Chelsio T3-based gigabit and 10Gb Ethernet |
2414 | adapters. | 2412 | adapters. |
@@ -2444,7 +2442,6 @@ config ENIC | |||
2444 | config IXGBE | 2442 | config IXGBE |
2445 | tristate "Intel(R) 10GbE PCI Express adapters support" | 2443 | tristate "Intel(R) 10GbE PCI Express adapters support" |
2446 | depends on PCI && INET | 2444 | depends on PCI && INET |
2447 | select INET_LRO | ||
2448 | ---help--- | 2445 | ---help--- |
2449 | This driver supports Intel(R) 10GbE PCI Express family of | 2446 | This driver supports Intel(R) 10GbE PCI Express family of |
2450 | adapters. For more information on how to identify your adapter, go | 2447 | adapters. For more information on how to identify your adapter, go |
diff --git a/drivers/net/amd8111e.c b/drivers/net/amd8111e.c index 7709992bb6bf..cb9c95d3ed0a 100644 --- a/drivers/net/amd8111e.c +++ b/drivers/net/amd8111e.c | |||
@@ -831,7 +831,7 @@ static int amd8111e_rx_poll(struct napi_struct *napi, int budget) | |||
831 | if (rx_pkt_limit > 0) { | 831 | if (rx_pkt_limit > 0) { |
832 | /* Receive descriptor is empty now */ | 832 | /* Receive descriptor is empty now */ |
833 | spin_lock_irqsave(&lp->lock, flags); | 833 | spin_lock_irqsave(&lp->lock, flags); |
834 | __netif_rx_complete(napi); | 834 | __napi_complete(napi); |
835 | writel(VAL0|RINTEN0, mmio + INTEN0); | 835 | writel(VAL0|RINTEN0, mmio + INTEN0); |
836 | writel(VAL2 | RDMD0, mmio + CMD0); | 836 | writel(VAL2 | RDMD0, mmio + CMD0); |
837 | spin_unlock_irqrestore(&lp->lock, flags); | 837 | spin_unlock_irqrestore(&lp->lock, flags); |
@@ -1170,11 +1170,11 @@ static irqreturn_t amd8111e_interrupt(int irq, void *dev_id) | |||
1170 | 1170 | ||
1171 | /* Check if Receive Interrupt has occurred. */ | 1171 | /* Check if Receive Interrupt has occurred. */ |
1172 | if (intr0 & RINT0) { | 1172 | if (intr0 & RINT0) { |
1173 | if (netif_rx_schedule_prep(&lp->napi)) { | 1173 | if (napi_schedule_prep(&lp->napi)) { |
1174 | /* Disable receive interupts */ | 1174 | /* Disable receive interupts */ |
1175 | writel(RINTEN0, mmio + INTEN0); | 1175 | writel(RINTEN0, mmio + INTEN0); |
1176 | /* Schedule a polling routine */ | 1176 | /* Schedule a polling routine */ |
1177 | __netif_rx_schedule(&lp->napi); | 1177 | __napi_schedule(&lp->napi); |
1178 | } else if (intren0 & RINTEN0) { | 1178 | } else if (intren0 & RINTEN0) { |
1179 | printk("************Driver bug! \ | 1179 | printk("************Driver bug! \ |
1180 | interrupt while in poll\n"); | 1180 | interrupt while in poll\n"); |
diff --git a/drivers/net/arcnet/arc-rawmode.c b/drivers/net/arcnet/arc-rawmode.c index 3ff9affb1a91..646dfc5f50c9 100644 --- a/drivers/net/arcnet/arc-rawmode.c +++ b/drivers/net/arcnet/arc-rawmode.c | |||
@@ -102,7 +102,7 @@ static void rx(struct net_device *dev, int bufnum, | |||
102 | skb = alloc_skb(length + ARC_HDR_SIZE, GFP_ATOMIC); | 102 | skb = alloc_skb(length + ARC_HDR_SIZE, GFP_ATOMIC); |
103 | if (skb == NULL) { | 103 | if (skb == NULL) { |
104 | BUGMSG(D_NORMAL, "Memory squeeze, dropping packet.\n"); | 104 | BUGMSG(D_NORMAL, "Memory squeeze, dropping packet.\n"); |
105 | lp->stats.rx_dropped++; | 105 | dev->stats.rx_dropped++; |
106 | return; | 106 | return; |
107 | } | 107 | } |
108 | skb_put(skb, length + ARC_HDR_SIZE); | 108 | skb_put(skb, length + ARC_HDR_SIZE); |
@@ -122,7 +122,7 @@ static void rx(struct net_device *dev, int bufnum, | |||
122 | 122 | ||
123 | BUGLVL(D_SKB) arcnet_dump_skb(dev, skb, "rx"); | 123 | BUGLVL(D_SKB) arcnet_dump_skb(dev, skb, "rx"); |
124 | 124 | ||
125 | skb->protocol = __constant_htons(ETH_P_ARCNET); | 125 | skb->protocol = cpu_to_be16(ETH_P_ARCNET); |
126 | ; | 126 | ; |
127 | netif_rx(skb); | 127 | netif_rx(skb); |
128 | } | 128 | } |
diff --git a/drivers/net/arcnet/arcnet.c b/drivers/net/arcnet/arcnet.c index 6b53e5ed125c..a80d4a30a464 100644 --- a/drivers/net/arcnet/arcnet.c +++ b/drivers/net/arcnet/arcnet.c | |||
@@ -95,17 +95,16 @@ EXPORT_SYMBOL(arcnet_unregister_proto); | |||
95 | EXPORT_SYMBOL(arcnet_debug); | 95 | EXPORT_SYMBOL(arcnet_debug); |
96 | EXPORT_SYMBOL(alloc_arcdev); | 96 | EXPORT_SYMBOL(alloc_arcdev); |
97 | EXPORT_SYMBOL(arcnet_interrupt); | 97 | EXPORT_SYMBOL(arcnet_interrupt); |
98 | EXPORT_SYMBOL(arcnet_open); | ||
99 | EXPORT_SYMBOL(arcnet_close); | ||
100 | EXPORT_SYMBOL(arcnet_send_packet); | ||
101 | EXPORT_SYMBOL(arcnet_timeout); | ||
98 | 102 | ||
99 | /* Internal function prototypes */ | 103 | /* Internal function prototypes */ |
100 | static int arcnet_open(struct net_device *dev); | ||
101 | static int arcnet_close(struct net_device *dev); | ||
102 | static int arcnet_send_packet(struct sk_buff *skb, struct net_device *dev); | ||
103 | static void arcnet_timeout(struct net_device *dev); | ||
104 | static int arcnet_header(struct sk_buff *skb, struct net_device *dev, | 104 | static int arcnet_header(struct sk_buff *skb, struct net_device *dev, |
105 | unsigned short type, const void *daddr, | 105 | unsigned short type, const void *daddr, |
106 | const void *saddr, unsigned len); | 106 | const void *saddr, unsigned len); |
107 | static int arcnet_rebuild_header(struct sk_buff *skb); | 107 | static int arcnet_rebuild_header(struct sk_buff *skb); |
108 | static struct net_device_stats *arcnet_get_stats(struct net_device *dev); | ||
109 | static int go_tx(struct net_device *dev); | 108 | static int go_tx(struct net_device *dev); |
110 | 109 | ||
111 | static int debug = ARCNET_DEBUG; | 110 | static int debug = ARCNET_DEBUG; |
@@ -322,11 +321,18 @@ static const struct header_ops arcnet_header_ops = { | |||
322 | .rebuild = arcnet_rebuild_header, | 321 | .rebuild = arcnet_rebuild_header, |
323 | }; | 322 | }; |
324 | 323 | ||
324 | static const struct net_device_ops arcnet_netdev_ops = { | ||
325 | .ndo_open = arcnet_open, | ||
326 | .ndo_stop = arcnet_close, | ||
327 | .ndo_start_xmit = arcnet_send_packet, | ||
328 | .ndo_tx_timeout = arcnet_timeout, | ||
329 | }; | ||
325 | 330 | ||
326 | /* Setup a struct device for ARCnet. */ | 331 | /* Setup a struct device for ARCnet. */ |
327 | static void arcdev_setup(struct net_device *dev) | 332 | static void arcdev_setup(struct net_device *dev) |
328 | { | 333 | { |
329 | dev->type = ARPHRD_ARCNET; | 334 | dev->type = ARPHRD_ARCNET; |
335 | dev->netdev_ops = &arcnet_netdev_ops; | ||
330 | dev->header_ops = &arcnet_header_ops; | 336 | dev->header_ops = &arcnet_header_ops; |
331 | dev->hard_header_len = sizeof(struct archdr); | 337 | dev->hard_header_len = sizeof(struct archdr); |
332 | dev->mtu = choose_mtu(); | 338 | dev->mtu = choose_mtu(); |
@@ -339,18 +345,9 @@ static void arcdev_setup(struct net_device *dev) | |||
339 | /* New-style flags. */ | 345 | /* New-style flags. */ |
340 | dev->flags = IFF_BROADCAST; | 346 | dev->flags = IFF_BROADCAST; |
341 | 347 | ||
342 | /* | ||
343 | * Put in this stuff here, so we don't have to export the symbols to | ||
344 | * the chipset drivers. | ||
345 | */ | ||
346 | dev->open = arcnet_open; | ||
347 | dev->stop = arcnet_close; | ||
348 | dev->hard_start_xmit = arcnet_send_packet; | ||
349 | dev->tx_timeout = arcnet_timeout; | ||
350 | dev->get_stats = arcnet_get_stats; | ||
351 | } | 348 | } |
352 | 349 | ||
353 | struct net_device *alloc_arcdev(char *name) | 350 | struct net_device *alloc_arcdev(const char *name) |
354 | { | 351 | { |
355 | struct net_device *dev; | 352 | struct net_device *dev; |
356 | 353 | ||
@@ -372,7 +369,7 @@ struct net_device *alloc_arcdev(char *name) | |||
372 | * that "should" only need to be set once at boot, so that there is | 369 | * that "should" only need to be set once at boot, so that there is |
373 | * non-reboot way to recover if something goes wrong. | 370 | * non-reboot way to recover if something goes wrong. |
374 | */ | 371 | */ |
375 | static int arcnet_open(struct net_device *dev) | 372 | int arcnet_open(struct net_device *dev) |
376 | { | 373 | { |
377 | struct arcnet_local *lp = netdev_priv(dev); | 374 | struct arcnet_local *lp = netdev_priv(dev); |
378 | int count, newmtu, error; | 375 | int count, newmtu, error; |
@@ -472,7 +469,7 @@ static int arcnet_open(struct net_device *dev) | |||
472 | 469 | ||
473 | 470 | ||
474 | /* The inverse routine to arcnet_open - shuts down the card. */ | 471 | /* The inverse routine to arcnet_open - shuts down the card. */ |
475 | static int arcnet_close(struct net_device *dev) | 472 | int arcnet_close(struct net_device *dev) |
476 | { | 473 | { |
477 | struct arcnet_local *lp = netdev_priv(dev); | 474 | struct arcnet_local *lp = netdev_priv(dev); |
478 | 475 | ||
@@ -583,8 +580,8 @@ static int arcnet_rebuild_header(struct sk_buff *skb) | |||
583 | } else { | 580 | } else { |
584 | BUGMSG(D_NORMAL, | 581 | BUGMSG(D_NORMAL, |
585 | "I don't understand ethernet protocol %Xh addresses!\n", type); | 582 | "I don't understand ethernet protocol %Xh addresses!\n", type); |
586 | lp->stats.tx_errors++; | 583 | dev->stats.tx_errors++; |
587 | lp->stats.tx_aborted_errors++; | 584 | dev->stats.tx_aborted_errors++; |
588 | } | 585 | } |
589 | 586 | ||
590 | /* if we couldn't resolve the address... give up. */ | 587 | /* if we couldn't resolve the address... give up. */ |
@@ -601,7 +598,7 @@ static int arcnet_rebuild_header(struct sk_buff *skb) | |||
601 | 598 | ||
602 | 599 | ||
603 | /* Called by the kernel in order to transmit a packet. */ | 600 | /* Called by the kernel in order to transmit a packet. */ |
604 | static int arcnet_send_packet(struct sk_buff *skb, struct net_device *dev) | 601 | int arcnet_send_packet(struct sk_buff *skb, struct net_device *dev) |
605 | { | 602 | { |
606 | struct arcnet_local *lp = netdev_priv(dev); | 603 | struct arcnet_local *lp = netdev_priv(dev); |
607 | struct archdr *pkt; | 604 | struct archdr *pkt; |
@@ -645,7 +642,7 @@ static int arcnet_send_packet(struct sk_buff *skb, struct net_device *dev) | |||
645 | !proto->ack_tx) { | 642 | !proto->ack_tx) { |
646 | /* done right away and we don't want to acknowledge | 643 | /* done right away and we don't want to acknowledge |
647 | the package later - forget about it now */ | 644 | the package later - forget about it now */ |
648 | lp->stats.tx_bytes += skb->len; | 645 | dev->stats.tx_bytes += skb->len; |
649 | freeskb = 1; | 646 | freeskb = 1; |
650 | } else { | 647 | } else { |
651 | /* do it the 'split' way */ | 648 | /* do it the 'split' way */ |
@@ -709,7 +706,7 @@ static int go_tx(struct net_device *dev) | |||
709 | /* start sending */ | 706 | /* start sending */ |
710 | ACOMMAND(TXcmd | (lp->cur_tx << 3)); | 707 | ACOMMAND(TXcmd | (lp->cur_tx << 3)); |
711 | 708 | ||
712 | lp->stats.tx_packets++; | 709 | dev->stats.tx_packets++; |
713 | lp->lasttrans_dest = lp->lastload_dest; | 710 | lp->lasttrans_dest = lp->lastload_dest; |
714 | lp->lastload_dest = 0; | 711 | lp->lastload_dest = 0; |
715 | lp->excnak_pending = 0; | 712 | lp->excnak_pending = 0; |
@@ -720,7 +717,7 @@ static int go_tx(struct net_device *dev) | |||
720 | 717 | ||
721 | 718 | ||
722 | /* Called by the kernel when transmit times out */ | 719 | /* Called by the kernel when transmit times out */ |
723 | static void arcnet_timeout(struct net_device *dev) | 720 | void arcnet_timeout(struct net_device *dev) |
724 | { | 721 | { |
725 | unsigned long flags; | 722 | unsigned long flags; |
726 | struct arcnet_local *lp = netdev_priv(dev); | 723 | struct arcnet_local *lp = netdev_priv(dev); |
@@ -732,11 +729,11 @@ static void arcnet_timeout(struct net_device *dev) | |||
732 | msg = " - missed IRQ?"; | 729 | msg = " - missed IRQ?"; |
733 | } else { | 730 | } else { |
734 | msg = ""; | 731 | msg = ""; |
735 | lp->stats.tx_aborted_errors++; | 732 | dev->stats.tx_aborted_errors++; |
736 | lp->timed_out = 1; | 733 | lp->timed_out = 1; |
737 | ACOMMAND(NOTXcmd | (lp->cur_tx << 3)); | 734 | ACOMMAND(NOTXcmd | (lp->cur_tx << 3)); |
738 | } | 735 | } |
739 | lp->stats.tx_errors++; | 736 | dev->stats.tx_errors++; |
740 | 737 | ||
741 | /* make sure we didn't miss a TX or a EXC NAK IRQ */ | 738 | /* make sure we didn't miss a TX or a EXC NAK IRQ */ |
742 | AINTMASK(0); | 739 | AINTMASK(0); |
@@ -865,8 +862,8 @@ irqreturn_t arcnet_interrupt(int irq, void *dev_id) | |||
865 | "transmit was not acknowledged! " | 862 | "transmit was not acknowledged! " |
866 | "(status=%Xh, dest=%02Xh)\n", | 863 | "(status=%Xh, dest=%02Xh)\n", |
867 | status, lp->lasttrans_dest); | 864 | status, lp->lasttrans_dest); |
868 | lp->stats.tx_errors++; | 865 | dev->stats.tx_errors++; |
869 | lp->stats.tx_carrier_errors++; | 866 | dev->stats.tx_carrier_errors++; |
870 | } else { | 867 | } else { |
871 | BUGMSG(D_DURING, | 868 | BUGMSG(D_DURING, |
872 | "broadcast was not acknowledged; that's normal " | 869 | "broadcast was not acknowledged; that's normal " |
@@ -905,7 +902,7 @@ irqreturn_t arcnet_interrupt(int irq, void *dev_id) | |||
905 | if (txbuf != -1) { | 902 | if (txbuf != -1) { |
906 | if (lp->outgoing.proto->continue_tx(dev, txbuf)) { | 903 | if (lp->outgoing.proto->continue_tx(dev, txbuf)) { |
907 | /* that was the last segment */ | 904 | /* that was the last segment */ |
908 | lp->stats.tx_bytes += lp->outgoing.skb->len; | 905 | dev->stats.tx_bytes += lp->outgoing.skb->len; |
909 | if(!lp->outgoing.proto->ack_tx) | 906 | if(!lp->outgoing.proto->ack_tx) |
910 | { | 907 | { |
911 | dev_kfree_skb_irq(lp->outgoing.skb); | 908 | dev_kfree_skb_irq(lp->outgoing.skb); |
@@ -930,7 +927,7 @@ irqreturn_t arcnet_interrupt(int irq, void *dev_id) | |||
930 | } | 927 | } |
931 | if (status & lp->intmask & RECONflag) { | 928 | if (status & lp->intmask & RECONflag) { |
932 | ACOMMAND(CFLAGScmd | CONFIGclear); | 929 | ACOMMAND(CFLAGScmd | CONFIGclear); |
933 | lp->stats.tx_carrier_errors++; | 930 | dev->stats.tx_carrier_errors++; |
934 | 931 | ||
935 | BUGMSG(D_RECON, "Network reconfiguration detected (status=%Xh)\n", | 932 | BUGMSG(D_RECON, "Network reconfiguration detected (status=%Xh)\n", |
936 | status); | 933 | status); |
@@ -1038,8 +1035,8 @@ static void arcnet_rx(struct net_device *dev, int bufnum) | |||
1038 | "(%d+4 bytes)\n", | 1035 | "(%d+4 bytes)\n", |
1039 | bufnum, pkt.hard.source, pkt.hard.dest, length); | 1036 | bufnum, pkt.hard.source, pkt.hard.dest, length); |
1040 | 1037 | ||
1041 | lp->stats.rx_packets++; | 1038 | dev->stats.rx_packets++; |
1042 | lp->stats.rx_bytes += length + ARC_HDR_SIZE; | 1039 | dev->stats.rx_bytes += length + ARC_HDR_SIZE; |
1043 | 1040 | ||
1044 | /* call the right receiver for the protocol */ | 1041 | /* call the right receiver for the protocol */ |
1045 | if (arc_proto_map[soft->proto]->is_ip) { | 1042 | if (arc_proto_map[soft->proto]->is_ip) { |
@@ -1067,18 +1064,6 @@ static void arcnet_rx(struct net_device *dev, int bufnum) | |||
1067 | } | 1064 | } |
1068 | 1065 | ||
1069 | 1066 | ||
1070 | |||
1071 | /* | ||
1072 | * Get the current statistics. This may be called with the card open or | ||
1073 | * closed. | ||
1074 | */ | ||
1075 | static struct net_device_stats *arcnet_get_stats(struct net_device *dev) | ||
1076 | { | ||
1077 | struct arcnet_local *lp = netdev_priv(dev); | ||
1078 | return &lp->stats; | ||
1079 | } | ||
1080 | |||
1081 | |||
1082 | static void null_rx(struct net_device *dev, int bufnum, | 1067 | static void null_rx(struct net_device *dev, int bufnum, |
1083 | struct archdr *pkthdr, int length) | 1068 | struct archdr *pkthdr, int length) |
1084 | { | 1069 | { |
diff --git a/drivers/net/arcnet/capmode.c b/drivers/net/arcnet/capmode.c index 30580bbe252d..083e21094b20 100644 --- a/drivers/net/arcnet/capmode.c +++ b/drivers/net/arcnet/capmode.c | |||
@@ -119,7 +119,7 @@ static void rx(struct net_device *dev, int bufnum, | |||
119 | skb = alloc_skb(length + ARC_HDR_SIZE + sizeof(int), GFP_ATOMIC); | 119 | skb = alloc_skb(length + ARC_HDR_SIZE + sizeof(int), GFP_ATOMIC); |
120 | if (skb == NULL) { | 120 | if (skb == NULL) { |
121 | BUGMSG(D_NORMAL, "Memory squeeze, dropping packet.\n"); | 121 | BUGMSG(D_NORMAL, "Memory squeeze, dropping packet.\n"); |
122 | lp->stats.rx_dropped++; | 122 | dev->stats.rx_dropped++; |
123 | return; | 123 | return; |
124 | } | 124 | } |
125 | skb_put(skb, length + ARC_HDR_SIZE + sizeof(int)); | 125 | skb_put(skb, length + ARC_HDR_SIZE + sizeof(int)); |
@@ -148,7 +148,7 @@ static void rx(struct net_device *dev, int bufnum, | |||
148 | 148 | ||
149 | BUGLVL(D_SKB) arcnet_dump_skb(dev, skb, "rx"); | 149 | BUGLVL(D_SKB) arcnet_dump_skb(dev, skb, "rx"); |
150 | 150 | ||
151 | skb->protocol = __constant_htons(ETH_P_ARCNET); | 151 | skb->protocol = cpu_to_be16(ETH_P_ARCNET); |
152 | ; | 152 | ; |
153 | netif_rx(skb); | 153 | netif_rx(skb); |
154 | } | 154 | } |
@@ -282,7 +282,7 @@ static int ack_tx(struct net_device *dev, int acked) | |||
282 | BUGMSG(D_PROTO, "Ackknowledge for cap packet %x.\n", | 282 | BUGMSG(D_PROTO, "Ackknowledge for cap packet %x.\n", |
283 | *((int*)&ackpkt->soft.cap.cookie[0])); | 283 | *((int*)&ackpkt->soft.cap.cookie[0])); |
284 | 284 | ||
285 | ackskb->protocol = __constant_htons(ETH_P_ARCNET); | 285 | ackskb->protocol = cpu_to_be16(ETH_P_ARCNET); |
286 | 286 | ||
287 | BUGLVL(D_SKB) arcnet_dump_skb(dev, ackskb, "ack_tx_recv"); | 287 | BUGLVL(D_SKB) arcnet_dump_skb(dev, ackskb, "ack_tx_recv"); |
288 | netif_rx(ackskb); | 288 | netif_rx(ackskb); |
diff --git a/drivers/net/arcnet/com20020-isa.c b/drivers/net/arcnet/com20020-isa.c index ea53a940272f..db08fc24047a 100644 --- a/drivers/net/arcnet/com20020-isa.c +++ b/drivers/net/arcnet/com20020-isa.c | |||
@@ -151,6 +151,8 @@ static int __init com20020_init(void) | |||
151 | if (node && node != 0xff) | 151 | if (node && node != 0xff) |
152 | dev->dev_addr[0] = node; | 152 | dev->dev_addr[0] = node; |
153 | 153 | ||
154 | dev->netdev_ops = &com20020_netdev_ops; | ||
155 | |||
154 | lp = netdev_priv(dev); | 156 | lp = netdev_priv(dev); |
155 | lp->backplane = backplane; | 157 | lp->backplane = backplane; |
156 | lp->clockp = clockp & 7; | 158 | lp->clockp = clockp & 7; |
diff --git a/drivers/net/arcnet/com20020-pci.c b/drivers/net/arcnet/com20020-pci.c index 8b51f632581d..dbf4de39754d 100644 --- a/drivers/net/arcnet/com20020-pci.c +++ b/drivers/net/arcnet/com20020-pci.c | |||
@@ -72,6 +72,9 @@ static int __devinit com20020pci_probe(struct pci_dev *pdev, const struct pci_de | |||
72 | dev = alloc_arcdev(device); | 72 | dev = alloc_arcdev(device); |
73 | if (!dev) | 73 | if (!dev) |
74 | return -ENOMEM; | 74 | return -ENOMEM; |
75 | |||
76 | dev->netdev_ops = &com20020_netdev_ops; | ||
77 | |||
75 | lp = netdev_priv(dev); | 78 | lp = netdev_priv(dev); |
76 | 79 | ||
77 | pci_set_drvdata(pdev, dev); | 80 | pci_set_drvdata(pdev, dev); |
diff --git a/drivers/net/arcnet/com20020.c b/drivers/net/arcnet/com20020.c index 103688358fb8..651275a5f3d2 100644 --- a/drivers/net/arcnet/com20020.c +++ b/drivers/net/arcnet/com20020.c | |||
@@ -149,6 +149,14 @@ int com20020_check(struct net_device *dev) | |||
149 | return 0; | 149 | return 0; |
150 | } | 150 | } |
151 | 151 | ||
152 | const struct net_device_ops com20020_netdev_ops = { | ||
153 | .ndo_open = arcnet_open, | ||
154 | .ndo_stop = arcnet_close, | ||
155 | .ndo_start_xmit = arcnet_send_packet, | ||
156 | .ndo_tx_timeout = arcnet_timeout, | ||
157 | .ndo_set_multicast_list = com20020_set_mc_list, | ||
158 | }; | ||
159 | |||
152 | /* Set up the struct net_device associated with this card. Called after | 160 | /* Set up the struct net_device associated with this card. Called after |
153 | * probing succeeds. | 161 | * probing succeeds. |
154 | */ | 162 | */ |
@@ -170,8 +178,6 @@ int com20020_found(struct net_device *dev, int shared) | |||
170 | lp->hw.copy_from_card = com20020_copy_from_card; | 178 | lp->hw.copy_from_card = com20020_copy_from_card; |
171 | lp->hw.close = com20020_close; | 179 | lp->hw.close = com20020_close; |
172 | 180 | ||
173 | dev->set_multicast_list = com20020_set_mc_list; | ||
174 | |||
175 | if (!dev->dev_addr[0]) | 181 | if (!dev->dev_addr[0]) |
176 | dev->dev_addr[0] = inb(ioaddr + BUS_ALIGN*8); /* FIXME: do this some other way! */ | 182 | dev->dev_addr[0] = inb(ioaddr + BUS_ALIGN*8); /* FIXME: do this some other way! */ |
177 | 183 | ||
@@ -342,6 +348,7 @@ static void com20020_set_mc_list(struct net_device *dev) | |||
342 | defined(CONFIG_ARCNET_COM20020_CS_MODULE) | 348 | defined(CONFIG_ARCNET_COM20020_CS_MODULE) |
343 | EXPORT_SYMBOL(com20020_check); | 349 | EXPORT_SYMBOL(com20020_check); |
344 | EXPORT_SYMBOL(com20020_found); | 350 | EXPORT_SYMBOL(com20020_found); |
351 | EXPORT_SYMBOL(com20020_netdev_ops); | ||
345 | #endif | 352 | #endif |
346 | 353 | ||
347 | MODULE_LICENSE("GPL"); | 354 | MODULE_LICENSE("GPL"); |
diff --git a/drivers/net/arcnet/rfc1051.c b/drivers/net/arcnet/rfc1051.c index 49d39a9cb696..06f8fa2f8f2f 100644 --- a/drivers/net/arcnet/rfc1051.c +++ b/drivers/net/arcnet/rfc1051.c | |||
@@ -88,7 +88,6 @@ MODULE_LICENSE("GPL"); | |||
88 | */ | 88 | */ |
89 | static __be16 type_trans(struct sk_buff *skb, struct net_device *dev) | 89 | static __be16 type_trans(struct sk_buff *skb, struct net_device *dev) |
90 | { | 90 | { |
91 | struct arcnet_local *lp = netdev_priv(dev); | ||
92 | struct archdr *pkt = (struct archdr *) skb->data; | 91 | struct archdr *pkt = (struct archdr *) skb->data; |
93 | struct arc_rfc1051 *soft = &pkt->soft.rfc1051; | 92 | struct arc_rfc1051 *soft = &pkt->soft.rfc1051; |
94 | int hdr_size = ARC_HDR_SIZE + RFC1051_HDR_SIZE; | 93 | int hdr_size = ARC_HDR_SIZE + RFC1051_HDR_SIZE; |
@@ -112,8 +111,8 @@ static __be16 type_trans(struct sk_buff *skb, struct net_device *dev) | |||
112 | return htons(ETH_P_ARP); | 111 | return htons(ETH_P_ARP); |
113 | 112 | ||
114 | default: | 113 | default: |
115 | lp->stats.rx_errors++; | 114 | dev->stats.rx_errors++; |
116 | lp->stats.rx_crc_errors++; | 115 | dev->stats.rx_crc_errors++; |
117 | return 0; | 116 | return 0; |
118 | } | 117 | } |
119 | 118 | ||
@@ -140,7 +139,7 @@ static void rx(struct net_device *dev, int bufnum, | |||
140 | skb = alloc_skb(length + ARC_HDR_SIZE, GFP_ATOMIC); | 139 | skb = alloc_skb(length + ARC_HDR_SIZE, GFP_ATOMIC); |
141 | if (skb == NULL) { | 140 | if (skb == NULL) { |
142 | BUGMSG(D_NORMAL, "Memory squeeze, dropping packet.\n"); | 141 | BUGMSG(D_NORMAL, "Memory squeeze, dropping packet.\n"); |
143 | lp->stats.rx_dropped++; | 142 | dev->stats.rx_dropped++; |
144 | return; | 143 | return; |
145 | } | 144 | } |
146 | skb_put(skb, length + ARC_HDR_SIZE); | 145 | skb_put(skb, length + ARC_HDR_SIZE); |
@@ -168,7 +167,6 @@ static void rx(struct net_device *dev, int bufnum, | |||
168 | static int build_header(struct sk_buff *skb, struct net_device *dev, | 167 | static int build_header(struct sk_buff *skb, struct net_device *dev, |
169 | unsigned short type, uint8_t daddr) | 168 | unsigned short type, uint8_t daddr) |
170 | { | 169 | { |
171 | struct arcnet_local *lp = netdev_priv(dev); | ||
172 | int hdr_size = ARC_HDR_SIZE + RFC1051_HDR_SIZE; | 170 | int hdr_size = ARC_HDR_SIZE + RFC1051_HDR_SIZE; |
173 | struct archdr *pkt = (struct archdr *) skb_push(skb, hdr_size); | 171 | struct archdr *pkt = (struct archdr *) skb_push(skb, hdr_size); |
174 | struct arc_rfc1051 *soft = &pkt->soft.rfc1051; | 172 | struct arc_rfc1051 *soft = &pkt->soft.rfc1051; |
@@ -184,8 +182,8 @@ static int build_header(struct sk_buff *skb, struct net_device *dev, | |||
184 | default: | 182 | default: |
185 | BUGMSG(D_NORMAL, "RFC1051: I don't understand protocol %d (%Xh)\n", | 183 | BUGMSG(D_NORMAL, "RFC1051: I don't understand protocol %d (%Xh)\n", |
186 | type, type); | 184 | type, type); |
187 | lp->stats.tx_errors++; | 185 | dev->stats.tx_errors++; |
188 | lp->stats.tx_aborted_errors++; | 186 | dev->stats.tx_aborted_errors++; |
189 | return 0; | 187 | return 0; |
190 | } | 188 | } |
191 | 189 | ||
diff --git a/drivers/net/arcnet/rfc1201.c b/drivers/net/arcnet/rfc1201.c index 2303d3a1f4b6..745530651c45 100644 --- a/drivers/net/arcnet/rfc1201.c +++ b/drivers/net/arcnet/rfc1201.c | |||
@@ -92,7 +92,6 @@ static __be16 type_trans(struct sk_buff *skb, struct net_device *dev) | |||
92 | { | 92 | { |
93 | struct archdr *pkt = (struct archdr *) skb->data; | 93 | struct archdr *pkt = (struct archdr *) skb->data; |
94 | struct arc_rfc1201 *soft = &pkt->soft.rfc1201; | 94 | struct arc_rfc1201 *soft = &pkt->soft.rfc1201; |
95 | struct arcnet_local *lp = netdev_priv(dev); | ||
96 | int hdr_size = ARC_HDR_SIZE + RFC1201_HDR_SIZE; | 95 | int hdr_size = ARC_HDR_SIZE + RFC1201_HDR_SIZE; |
97 | 96 | ||
98 | /* Pull off the arcnet header. */ | 97 | /* Pull off the arcnet header. */ |
@@ -121,8 +120,8 @@ static __be16 type_trans(struct sk_buff *skb, struct net_device *dev) | |||
121 | case ARC_P_NOVELL_EC: | 120 | case ARC_P_NOVELL_EC: |
122 | return htons(ETH_P_802_3); | 121 | return htons(ETH_P_802_3); |
123 | default: | 122 | default: |
124 | lp->stats.rx_errors++; | 123 | dev->stats.rx_errors++; |
125 | lp->stats.rx_crc_errors++; | 124 | dev->stats.rx_crc_errors++; |
126 | return 0; | 125 | return 0; |
127 | } | 126 | } |
128 | 127 | ||
@@ -172,8 +171,8 @@ static void rx(struct net_device *dev, int bufnum, | |||
172 | in->sequence, soft->split_flag, soft->sequence); | 171 | in->sequence, soft->split_flag, soft->sequence); |
173 | lp->rfc1201.aborted_seq = soft->sequence; | 172 | lp->rfc1201.aborted_seq = soft->sequence; |
174 | dev_kfree_skb_irq(in->skb); | 173 | dev_kfree_skb_irq(in->skb); |
175 | lp->stats.rx_errors++; | 174 | dev->stats.rx_errors++; |
176 | lp->stats.rx_missed_errors++; | 175 | dev->stats.rx_missed_errors++; |
177 | in->skb = NULL; | 176 | in->skb = NULL; |
178 | } | 177 | } |
179 | in->sequence = soft->sequence; | 178 | in->sequence = soft->sequence; |
@@ -181,7 +180,7 @@ static void rx(struct net_device *dev, int bufnum, | |||
181 | skb = alloc_skb(length + ARC_HDR_SIZE, GFP_ATOMIC); | 180 | skb = alloc_skb(length + ARC_HDR_SIZE, GFP_ATOMIC); |
182 | if (skb == NULL) { | 181 | if (skb == NULL) { |
183 | BUGMSG(D_NORMAL, "Memory squeeze, dropping packet.\n"); | 182 | BUGMSG(D_NORMAL, "Memory squeeze, dropping packet.\n"); |
184 | lp->stats.rx_dropped++; | 183 | dev->stats.rx_dropped++; |
185 | return; | 184 | return; |
186 | } | 185 | } |
187 | skb_put(skb, length + ARC_HDR_SIZE); | 186 | skb_put(skb, length + ARC_HDR_SIZE); |
@@ -213,7 +212,7 @@ static void rx(struct net_device *dev, int bufnum, | |||
213 | BUGMSG(D_EXTRA, | 212 | BUGMSG(D_EXTRA, |
214 | "ARP source address was 00h, set to %02Xh.\n", | 213 | "ARP source address was 00h, set to %02Xh.\n", |
215 | saddr); | 214 | saddr); |
216 | lp->stats.rx_crc_errors++; | 215 | dev->stats.rx_crc_errors++; |
217 | *cptr = saddr; | 216 | *cptr = saddr; |
218 | } else { | 217 | } else { |
219 | BUGMSG(D_DURING, "ARP source address (%Xh) is fine.\n", | 218 | BUGMSG(D_DURING, "ARP source address (%Xh) is fine.\n", |
@@ -222,8 +221,8 @@ static void rx(struct net_device *dev, int bufnum, | |||
222 | } else { | 221 | } else { |
223 | BUGMSG(D_NORMAL, "funny-shaped ARP packet. (%Xh, %Xh)\n", | 222 | BUGMSG(D_NORMAL, "funny-shaped ARP packet. (%Xh, %Xh)\n", |
224 | arp->ar_hln, arp->ar_pln); | 223 | arp->ar_hln, arp->ar_pln); |
225 | lp->stats.rx_errors++; | 224 | dev->stats.rx_errors++; |
226 | lp->stats.rx_crc_errors++; | 225 | dev->stats.rx_crc_errors++; |
227 | } | 226 | } |
228 | } | 227 | } |
229 | BUGLVL(D_SKB) arcnet_dump_skb(dev, skb, "rx"); | 228 | BUGLVL(D_SKB) arcnet_dump_skb(dev, skb, "rx"); |
@@ -257,8 +256,8 @@ static void rx(struct net_device *dev, int bufnum, | |||
257 | soft->split_flag); | 256 | soft->split_flag); |
258 | dev_kfree_skb_irq(in->skb); | 257 | dev_kfree_skb_irq(in->skb); |
259 | in->skb = NULL; | 258 | in->skb = NULL; |
260 | lp->stats.rx_errors++; | 259 | dev->stats.rx_errors++; |
261 | lp->stats.rx_missed_errors++; | 260 | dev->stats.rx_missed_errors++; |
262 | in->lastpacket = in->numpackets = 0; | 261 | in->lastpacket = in->numpackets = 0; |
263 | } | 262 | } |
264 | if (soft->split_flag & 1) { /* first packet in split */ | 263 | if (soft->split_flag & 1) { /* first packet in split */ |
@@ -269,8 +268,8 @@ static void rx(struct net_device *dev, int bufnum, | |||
269 | "(splitflag=%d, seq=%d)\n", | 268 | "(splitflag=%d, seq=%d)\n", |
270 | in->sequence, soft->split_flag, | 269 | in->sequence, soft->split_flag, |
271 | soft->sequence); | 270 | soft->sequence); |
272 | lp->stats.rx_errors++; | 271 | dev->stats.rx_errors++; |
273 | lp->stats.rx_missed_errors++; | 272 | dev->stats.rx_missed_errors++; |
274 | dev_kfree_skb_irq(in->skb); | 273 | dev_kfree_skb_irq(in->skb); |
275 | } | 274 | } |
276 | in->sequence = soft->sequence; | 275 | in->sequence = soft->sequence; |
@@ -281,8 +280,8 @@ static void rx(struct net_device *dev, int bufnum, | |||
281 | BUGMSG(D_EXTRA, "incoming packet more than 16 segments; dropping. (splitflag=%d)\n", | 280 | BUGMSG(D_EXTRA, "incoming packet more than 16 segments; dropping. (splitflag=%d)\n", |
282 | soft->split_flag); | 281 | soft->split_flag); |
283 | lp->rfc1201.aborted_seq = soft->sequence; | 282 | lp->rfc1201.aborted_seq = soft->sequence; |
284 | lp->stats.rx_errors++; | 283 | dev->stats.rx_errors++; |
285 | lp->stats.rx_length_errors++; | 284 | dev->stats.rx_length_errors++; |
286 | return; | 285 | return; |
287 | } | 286 | } |
288 | in->skb = skb = alloc_skb(508 * in->numpackets + ARC_HDR_SIZE, | 287 | in->skb = skb = alloc_skb(508 * in->numpackets + ARC_HDR_SIZE, |
@@ -290,7 +289,7 @@ static void rx(struct net_device *dev, int bufnum, | |||
290 | if (skb == NULL) { | 289 | if (skb == NULL) { |
291 | BUGMSG(D_NORMAL, "(split) memory squeeze, dropping packet.\n"); | 290 | BUGMSG(D_NORMAL, "(split) memory squeeze, dropping packet.\n"); |
292 | lp->rfc1201.aborted_seq = soft->sequence; | 291 | lp->rfc1201.aborted_seq = soft->sequence; |
293 | lp->stats.rx_dropped++; | 292 | dev->stats.rx_dropped++; |
294 | return; | 293 | return; |
295 | } | 294 | } |
296 | skb->dev = dev; | 295 | skb->dev = dev; |
@@ -314,8 +313,8 @@ static void rx(struct net_device *dev, int bufnum, | |||
314 | "first! (splitflag=%d, seq=%d, aborted=%d)\n", | 313 | "first! (splitflag=%d, seq=%d, aborted=%d)\n", |
315 | soft->split_flag, soft->sequence, | 314 | soft->split_flag, soft->sequence, |
316 | lp->rfc1201.aborted_seq); | 315 | lp->rfc1201.aborted_seq); |
317 | lp->stats.rx_errors++; | 316 | dev->stats.rx_errors++; |
318 | lp->stats.rx_missed_errors++; | 317 | dev->stats.rx_missed_errors++; |
319 | } | 318 | } |
320 | return; | 319 | return; |
321 | } | 320 | } |
@@ -325,8 +324,8 @@ static void rx(struct net_device *dev, int bufnum, | |||
325 | if (packetnum <= in->lastpacket - 1) { | 324 | if (packetnum <= in->lastpacket - 1) { |
326 | BUGMSG(D_EXTRA, "duplicate splitpacket ignored! (splitflag=%d)\n", | 325 | BUGMSG(D_EXTRA, "duplicate splitpacket ignored! (splitflag=%d)\n", |
327 | soft->split_flag); | 326 | soft->split_flag); |
328 | lp->stats.rx_errors++; | 327 | dev->stats.rx_errors++; |
329 | lp->stats.rx_frame_errors++; | 328 | dev->stats.rx_frame_errors++; |
330 | return; | 329 | return; |
331 | } | 330 | } |
332 | /* "bad" duplicate, kill reassembly */ | 331 | /* "bad" duplicate, kill reassembly */ |
@@ -336,8 +335,8 @@ static void rx(struct net_device *dev, int bufnum, | |||
336 | lp->rfc1201.aborted_seq = soft->sequence; | 335 | lp->rfc1201.aborted_seq = soft->sequence; |
337 | dev_kfree_skb_irq(in->skb); | 336 | dev_kfree_skb_irq(in->skb); |
338 | in->skb = NULL; | 337 | in->skb = NULL; |
339 | lp->stats.rx_errors++; | 338 | dev->stats.rx_errors++; |
340 | lp->stats.rx_missed_errors++; | 339 | dev->stats.rx_missed_errors++; |
341 | in->lastpacket = in->numpackets = 0; | 340 | in->lastpacket = in->numpackets = 0; |
342 | return; | 341 | return; |
343 | } | 342 | } |
@@ -404,8 +403,8 @@ static int build_header(struct sk_buff *skb, struct net_device *dev, | |||
404 | default: | 403 | default: |
405 | BUGMSG(D_NORMAL, "RFC1201: I don't understand protocol %d (%Xh)\n", | 404 | BUGMSG(D_NORMAL, "RFC1201: I don't understand protocol %d (%Xh)\n", |
406 | type, type); | 405 | type, type); |
407 | lp->stats.tx_errors++; | 406 | dev->stats.tx_errors++; |
408 | lp->stats.tx_aborted_errors++; | 407 | dev->stats.tx_aborted_errors++; |
409 | return 0; | 408 | return 0; |
410 | } | 409 | } |
411 | 410 | ||
diff --git a/drivers/net/arm/ep93xx_eth.c b/drivers/net/arm/ep93xx_eth.c index 3ec20cc18b0c..cc7708775da0 100644 --- a/drivers/net/arm/ep93xx_eth.c +++ b/drivers/net/arm/ep93xx_eth.c | |||
@@ -298,7 +298,7 @@ poll_some_more: | |||
298 | int more = 0; | 298 | int more = 0; |
299 | 299 | ||
300 | spin_lock_irq(&ep->rx_lock); | 300 | spin_lock_irq(&ep->rx_lock); |
301 | __netif_rx_complete(napi); | 301 | __napi_complete(napi); |
302 | wrl(ep, REG_INTEN, REG_INTEN_TX | REG_INTEN_RX); | 302 | wrl(ep, REG_INTEN, REG_INTEN_TX | REG_INTEN_RX); |
303 | if (ep93xx_have_more_rx(ep)) { | 303 | if (ep93xx_have_more_rx(ep)) { |
304 | wrl(ep, REG_INTEN, REG_INTEN_TX); | 304 | wrl(ep, REG_INTEN, REG_INTEN_TX); |
@@ -307,7 +307,7 @@ poll_some_more: | |||
307 | } | 307 | } |
308 | spin_unlock_irq(&ep->rx_lock); | 308 | spin_unlock_irq(&ep->rx_lock); |
309 | 309 | ||
310 | if (more && netif_rx_reschedule(napi)) | 310 | if (more && napi_reschedule(napi)) |
311 | goto poll_some_more; | 311 | goto poll_some_more; |
312 | } | 312 | } |
313 | 313 | ||
@@ -415,9 +415,9 @@ static irqreturn_t ep93xx_irq(int irq, void *dev_id) | |||
415 | 415 | ||
416 | if (status & REG_INTSTS_RX) { | 416 | if (status & REG_INTSTS_RX) { |
417 | spin_lock(&ep->rx_lock); | 417 | spin_lock(&ep->rx_lock); |
418 | if (likely(netif_rx_schedule_prep(&ep->napi))) { | 418 | if (likely(napi_schedule_prep(&ep->napi))) { |
419 | wrl(ep, REG_INTEN, REG_INTEN_TX); | 419 | wrl(ep, REG_INTEN, REG_INTEN_TX); |
420 | __netif_rx_schedule(&ep->napi); | 420 | __napi_schedule(&ep->napi); |
421 | } | 421 | } |
422 | spin_unlock(&ep->rx_lock); | 422 | spin_unlock(&ep->rx_lock); |
423 | } | 423 | } |
diff --git a/drivers/net/arm/ixp4xx_eth.c b/drivers/net/arm/ixp4xx_eth.c index 5fce1d5c1a1a..5fe17d5eaa54 100644 --- a/drivers/net/arm/ixp4xx_eth.c +++ b/drivers/net/arm/ixp4xx_eth.c | |||
@@ -473,7 +473,7 @@ static void eth_rx_irq(void *pdev) | |||
473 | printk(KERN_DEBUG "%s: eth_rx_irq\n", dev->name); | 473 | printk(KERN_DEBUG "%s: eth_rx_irq\n", dev->name); |
474 | #endif | 474 | #endif |
475 | qmgr_disable_irq(port->plat->rxq); | 475 | qmgr_disable_irq(port->plat->rxq); |
476 | netif_rx_schedule(&port->napi); | 476 | napi_schedule(&port->napi); |
477 | } | 477 | } |
478 | 478 | ||
479 | static int eth_poll(struct napi_struct *napi, int budget) | 479 | static int eth_poll(struct napi_struct *napi, int budget) |
@@ -498,16 +498,16 @@ static int eth_poll(struct napi_struct *napi, int budget) | |||
498 | 498 | ||
499 | if ((n = queue_get_desc(rxq, port, 0)) < 0) { | 499 | if ((n = queue_get_desc(rxq, port, 0)) < 0) { |
500 | #if DEBUG_RX | 500 | #if DEBUG_RX |
501 | printk(KERN_DEBUG "%s: eth_poll netif_rx_complete\n", | 501 | printk(KERN_DEBUG "%s: eth_poll napi_complete\n", |
502 | dev->name); | 502 | dev->name); |
503 | #endif | 503 | #endif |
504 | netif_rx_complete(napi); | 504 | napi_complete(napi); |
505 | qmgr_enable_irq(rxq); | 505 | qmgr_enable_irq(rxq); |
506 | if (!qmgr_stat_empty(rxq) && | 506 | if (!qmgr_stat_empty(rxq) && |
507 | netif_rx_reschedule(napi)) { | 507 | napi_reschedule(napi)) { |
508 | #if DEBUG_RX | 508 | #if DEBUG_RX |
509 | printk(KERN_DEBUG "%s: eth_poll" | 509 | printk(KERN_DEBUG "%s: eth_poll" |
510 | " netif_rx_reschedule successed\n", | 510 | " napi_reschedule successed\n", |
511 | dev->name); | 511 | dev->name); |
512 | #endif | 512 | #endif |
513 | qmgr_disable_irq(rxq); | 513 | qmgr_disable_irq(rxq); |
@@ -1036,7 +1036,7 @@ static int eth_open(struct net_device *dev) | |||
1036 | } | 1036 | } |
1037 | ports_open++; | 1037 | ports_open++; |
1038 | /* we may already have RX data, enables IRQ */ | 1038 | /* we may already have RX data, enables IRQ */ |
1039 | netif_rx_schedule(&port->napi); | 1039 | napi_schedule(&port->napi); |
1040 | return 0; | 1040 | return 0; |
1041 | } | 1041 | } |
1042 | 1042 | ||
diff --git a/drivers/net/arm/ks8695net.c b/drivers/net/arm/ks8695net.c index 1cf2f949c0b4..b39210cf4fb3 100644 --- a/drivers/net/arm/ks8695net.c +++ b/drivers/net/arm/ks8695net.c | |||
@@ -1059,7 +1059,7 @@ ks8695_get_drvinfo(struct net_device *ndev, struct ethtool_drvinfo *info) | |||
1059 | { | 1059 | { |
1060 | strlcpy(info->driver, MODULENAME, sizeof(info->driver)); | 1060 | strlcpy(info->driver, MODULENAME, sizeof(info->driver)); |
1061 | strlcpy(info->version, MODULEVERSION, sizeof(info->version)); | 1061 | strlcpy(info->version, MODULEVERSION, sizeof(info->version)); |
1062 | strlcpy(info->bus_info, ndev->dev.parent->bus_id, | 1062 | strlcpy(info->bus_info, dev_name(ndev->dev.parent), |
1063 | sizeof(info->bus_info)); | 1063 | sizeof(info->bus_info)); |
1064 | } | 1064 | } |
1065 | 1065 | ||
diff --git a/drivers/net/atl1e/atl1e_main.c b/drivers/net/atl1e/atl1e_main.c index bb9094d4cbc9..c758884728a5 100644 --- a/drivers/net/atl1e/atl1e_main.c +++ b/drivers/net/atl1e/atl1e_main.c | |||
@@ -1326,9 +1326,9 @@ static irqreturn_t atl1e_intr(int irq, void *data) | |||
1326 | AT_WRITE_REG(hw, REG_IMR, | 1326 | AT_WRITE_REG(hw, REG_IMR, |
1327 | IMR_NORMAL_MASK & ~ISR_RX_EVENT); | 1327 | IMR_NORMAL_MASK & ~ISR_RX_EVENT); |
1328 | AT_WRITE_FLUSH(hw); | 1328 | AT_WRITE_FLUSH(hw); |
1329 | if (likely(netif_rx_schedule_prep( | 1329 | if (likely(napi_schedule_prep( |
1330 | &adapter->napi))) | 1330 | &adapter->napi))) |
1331 | __netif_rx_schedule(&adapter->napi); | 1331 | __napi_schedule(&adapter->napi); |
1332 | } | 1332 | } |
1333 | } while (--max_ints > 0); | 1333 | } while (--max_ints > 0); |
1334 | /* re-enable Interrupt*/ | 1334 | /* re-enable Interrupt*/ |
@@ -1514,7 +1514,7 @@ static int atl1e_clean(struct napi_struct *napi, int budget) | |||
1514 | /* If no Tx and not enough Rx work done, exit the polling mode */ | 1514 | /* If no Tx and not enough Rx work done, exit the polling mode */ |
1515 | if (work_done < budget) { | 1515 | if (work_done < budget) { |
1516 | quit_polling: | 1516 | quit_polling: |
1517 | netif_rx_complete(napi); | 1517 | napi_complete(napi); |
1518 | imr_data = AT_READ_REG(&adapter->hw, REG_IMR); | 1518 | imr_data = AT_READ_REG(&adapter->hw, REG_IMR); |
1519 | AT_WRITE_REG(&adapter->hw, REG_IMR, imr_data | ISR_RX_EVENT); | 1519 | AT_WRITE_REG(&adapter->hw, REG_IMR, imr_data | ISR_RX_EVENT); |
1520 | /* test debug */ | 1520 | /* test debug */ |
diff --git a/drivers/net/au1000_eth.c b/drivers/net/au1000_eth.c index 9c875bb3f76c..4274e4ac963b 100644 --- a/drivers/net/au1000_eth.c +++ b/drivers/net/au1000_eth.c | |||
@@ -81,24 +81,6 @@ MODULE_AUTHOR(DRV_AUTHOR); | |||
81 | MODULE_DESCRIPTION(DRV_DESC); | 81 | MODULE_DESCRIPTION(DRV_DESC); |
82 | MODULE_LICENSE("GPL"); | 82 | MODULE_LICENSE("GPL"); |
83 | 83 | ||
84 | // prototypes | ||
85 | static void hard_stop(struct net_device *); | ||
86 | static void enable_rx_tx(struct net_device *dev); | ||
87 | static struct net_device * au1000_probe(int port_num); | ||
88 | static int au1000_init(struct net_device *); | ||
89 | static int au1000_open(struct net_device *); | ||
90 | static int au1000_close(struct net_device *); | ||
91 | static int au1000_tx(struct sk_buff *, struct net_device *); | ||
92 | static int au1000_rx(struct net_device *); | ||
93 | static irqreturn_t au1000_interrupt(int, void *); | ||
94 | static void au1000_tx_timeout(struct net_device *); | ||
95 | static void set_rx_mode(struct net_device *); | ||
96 | static int au1000_ioctl(struct net_device *, struct ifreq *, int); | ||
97 | static int au1000_mdio_read(struct net_device *, int, int); | ||
98 | static void au1000_mdio_write(struct net_device *, int, int, u16); | ||
99 | static void au1000_adjust_link(struct net_device *); | ||
100 | static void enable_mac(struct net_device *, int); | ||
101 | |||
102 | /* | 84 | /* |
103 | * Theory of operation | 85 | * Theory of operation |
104 | * | 86 | * |
@@ -188,6 +170,26 @@ struct au1000_private *au_macs[NUM_ETH_INTERFACES]; | |||
188 | # error MAC0-associated PHY attached 2nd MACs MII bus not supported yet | 170 | # error MAC0-associated PHY attached 2nd MACs MII bus not supported yet |
189 | #endif | 171 | #endif |
190 | 172 | ||
173 | static void enable_mac(struct net_device *dev, int force_reset) | ||
174 | { | ||
175 | unsigned long flags; | ||
176 | struct au1000_private *aup = netdev_priv(dev); | ||
177 | |||
178 | spin_lock_irqsave(&aup->lock, flags); | ||
179 | |||
180 | if(force_reset || (!aup->mac_enabled)) { | ||
181 | *aup->enable = MAC_EN_CLOCK_ENABLE; | ||
182 | au_sync_delay(2); | ||
183 | *aup->enable = (MAC_EN_RESET0 | MAC_EN_RESET1 | MAC_EN_RESET2 | ||
184 | | MAC_EN_CLOCK_ENABLE); | ||
185 | au_sync_delay(2); | ||
186 | |||
187 | aup->mac_enabled = 1; | ||
188 | } | ||
189 | |||
190 | spin_unlock_irqrestore(&aup->lock, flags); | ||
191 | } | ||
192 | |||
191 | /* | 193 | /* |
192 | * MII operations | 194 | * MII operations |
193 | */ | 195 | */ |
@@ -281,6 +283,107 @@ static int au1000_mdiobus_reset(struct mii_bus *bus) | |||
281 | return 0; | 283 | return 0; |
282 | } | 284 | } |
283 | 285 | ||
286 | static void hard_stop(struct net_device *dev) | ||
287 | { | ||
288 | struct au1000_private *aup = netdev_priv(dev); | ||
289 | |||
290 | if (au1000_debug > 4) | ||
291 | printk(KERN_INFO "%s: hard stop\n", dev->name); | ||
292 | |||
293 | aup->mac->control &= ~(MAC_RX_ENABLE | MAC_TX_ENABLE); | ||
294 | au_sync_delay(10); | ||
295 | } | ||
296 | |||
297 | static void enable_rx_tx(struct net_device *dev) | ||
298 | { | ||
299 | struct au1000_private *aup = netdev_priv(dev); | ||
300 | |||
301 | if (au1000_debug > 4) | ||
302 | printk(KERN_INFO "%s: enable_rx_tx\n", dev->name); | ||
303 | |||
304 | aup->mac->control |= (MAC_RX_ENABLE | MAC_TX_ENABLE); | ||
305 | au_sync_delay(10); | ||
306 | } | ||
307 | |||
308 | static void | ||
309 | au1000_adjust_link(struct net_device *dev) | ||
310 | { | ||
311 | struct au1000_private *aup = netdev_priv(dev); | ||
312 | struct phy_device *phydev = aup->phy_dev; | ||
313 | unsigned long flags; | ||
314 | |||
315 | int status_change = 0; | ||
316 | |||
317 | BUG_ON(!aup->phy_dev); | ||
318 | |||
319 | spin_lock_irqsave(&aup->lock, flags); | ||
320 | |||
321 | if (phydev->link && (aup->old_speed != phydev->speed)) { | ||
322 | // speed changed | ||
323 | |||
324 | switch(phydev->speed) { | ||
325 | case SPEED_10: | ||
326 | case SPEED_100: | ||
327 | break; | ||
328 | default: | ||
329 | printk(KERN_WARNING | ||
330 | "%s: Speed (%d) is not 10/100 ???\n", | ||
331 | dev->name, phydev->speed); | ||
332 | break; | ||
333 | } | ||
334 | |||
335 | aup->old_speed = phydev->speed; | ||
336 | |||
337 | status_change = 1; | ||
338 | } | ||
339 | |||
340 | if (phydev->link && (aup->old_duplex != phydev->duplex)) { | ||
341 | // duplex mode changed | ||
342 | |||
343 | /* switching duplex mode requires to disable rx and tx! */ | ||
344 | hard_stop(dev); | ||
345 | |||
346 | if (DUPLEX_FULL == phydev->duplex) | ||
347 | aup->mac->control = ((aup->mac->control | ||
348 | | MAC_FULL_DUPLEX) | ||
349 | & ~MAC_DISABLE_RX_OWN); | ||
350 | else | ||
351 | aup->mac->control = ((aup->mac->control | ||
352 | & ~MAC_FULL_DUPLEX) | ||
353 | | MAC_DISABLE_RX_OWN); | ||
354 | au_sync_delay(1); | ||
355 | |||
356 | enable_rx_tx(dev); | ||
357 | aup->old_duplex = phydev->duplex; | ||
358 | |||
359 | status_change = 1; | ||
360 | } | ||
361 | |||
362 | if(phydev->link != aup->old_link) { | ||
363 | // link state changed | ||
364 | |||
365 | if (!phydev->link) { | ||
366 | /* link went down */ | ||
367 | aup->old_speed = 0; | ||
368 | aup->old_duplex = -1; | ||
369 | } | ||
370 | |||
371 | aup->old_link = phydev->link; | ||
372 | status_change = 1; | ||
373 | } | ||
374 | |||
375 | spin_unlock_irqrestore(&aup->lock, flags); | ||
376 | |||
377 | if (status_change) { | ||
378 | if (phydev->link) | ||
379 | printk(KERN_INFO "%s: link up (%d/%s)\n", | ||
380 | dev->name, phydev->speed, | ||
381 | DUPLEX_FULL == phydev->duplex ? "Full" : "Half"); | ||
382 | else | ||
383 | printk(KERN_INFO "%s: link down\n", dev->name); | ||
384 | } | ||
385 | } | ||
386 | |||
284 | static int mii_probe (struct net_device *dev) | 387 | static int mii_probe (struct net_device *dev) |
285 | { | 388 | { |
286 | struct au1000_private *const aup = netdev_priv(dev); | 389 | struct au1000_private *const aup = netdev_priv(dev); |
@@ -355,8 +458,8 @@ static int mii_probe (struct net_device *dev) | |||
355 | /* now we are supposed to have a proper phydev, to attach to... */ | 458 | /* now we are supposed to have a proper phydev, to attach to... */ |
356 | BUG_ON(phydev->attached_dev); | 459 | BUG_ON(phydev->attached_dev); |
357 | 460 | ||
358 | phydev = phy_connect(dev, phydev->dev.bus_id, &au1000_adjust_link, 0, | 461 | phydev = phy_connect(dev, dev_name(&phydev->dev), &au1000_adjust_link, |
359 | PHY_INTERFACE_MODE_MII); | 462 | 0, PHY_INTERFACE_MODE_MII); |
360 | 463 | ||
361 | if (IS_ERR(phydev)) { | 464 | if (IS_ERR(phydev)) { |
362 | printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name); | 465 | printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name); |
@@ -381,8 +484,8 @@ static int mii_probe (struct net_device *dev) | |||
381 | aup->phy_dev = phydev; | 484 | aup->phy_dev = phydev; |
382 | 485 | ||
383 | printk(KERN_INFO "%s: attached PHY driver [%s] " | 486 | printk(KERN_INFO "%s: attached PHY driver [%s] " |
384 | "(mii_bus:phy_addr=%s, irq=%d)\n", | 487 | "(mii_bus:phy_addr=%s, irq=%d)\n", dev->name, |
385 | dev->name, phydev->drv->name, phydev->dev.bus_id, phydev->irq); | 488 | phydev->drv->name, dev_name(&phydev->dev), phydev->irq); |
386 | 489 | ||
387 | return 0; | 490 | return 0; |
388 | } | 491 | } |
@@ -412,48 +515,6 @@ void ReleaseDB(struct au1000_private *aup, db_dest_t *pDB) | |||
412 | aup->pDBfree = pDB; | 515 | aup->pDBfree = pDB; |
413 | } | 516 | } |
414 | 517 | ||
415 | static void enable_rx_tx(struct net_device *dev) | ||
416 | { | ||
417 | struct au1000_private *aup = netdev_priv(dev); | ||
418 | |||
419 | if (au1000_debug > 4) | ||
420 | printk(KERN_INFO "%s: enable_rx_tx\n", dev->name); | ||
421 | |||
422 | aup->mac->control |= (MAC_RX_ENABLE | MAC_TX_ENABLE); | ||
423 | au_sync_delay(10); | ||
424 | } | ||
425 | |||
426 | static void hard_stop(struct net_device *dev) | ||
427 | { | ||
428 | struct au1000_private *aup = netdev_priv(dev); | ||
429 | |||
430 | if (au1000_debug > 4) | ||
431 | printk(KERN_INFO "%s: hard stop\n", dev->name); | ||
432 | |||
433 | aup->mac->control &= ~(MAC_RX_ENABLE | MAC_TX_ENABLE); | ||
434 | au_sync_delay(10); | ||
435 | } | ||
436 | |||
437 | static void enable_mac(struct net_device *dev, int force_reset) | ||
438 | { | ||
439 | unsigned long flags; | ||
440 | struct au1000_private *aup = netdev_priv(dev); | ||
441 | |||
442 | spin_lock_irqsave(&aup->lock, flags); | ||
443 | |||
444 | if(force_reset || (!aup->mac_enabled)) { | ||
445 | *aup->enable = MAC_EN_CLOCK_ENABLE; | ||
446 | au_sync_delay(2); | ||
447 | *aup->enable = (MAC_EN_RESET0 | MAC_EN_RESET1 | MAC_EN_RESET2 | ||
448 | | MAC_EN_CLOCK_ENABLE); | ||
449 | au_sync_delay(2); | ||
450 | |||
451 | aup->mac_enabled = 1; | ||
452 | } | ||
453 | |||
454 | spin_unlock_irqrestore(&aup->lock, flags); | ||
455 | } | ||
456 | |||
457 | static void reset_mac_unlocked(struct net_device *dev) | 518 | static void reset_mac_unlocked(struct net_device *dev) |
458 | { | 519 | { |
459 | struct au1000_private *const aup = netdev_priv(dev); | 520 | struct au1000_private *const aup = netdev_priv(dev); |
@@ -542,30 +603,6 @@ static struct { | |||
542 | static int num_ifs; | 603 | static int num_ifs; |
543 | 604 | ||
544 | /* | 605 | /* |
545 | * Setup the base address and interrupt of the Au1xxx ethernet macs | ||
546 | * based on cpu type and whether the interface is enabled in sys_pinfunc | ||
547 | * register. The last interface is enabled if SYS_PF_NI2 (bit 4) is 0. | ||
548 | */ | ||
549 | static int __init au1000_init_module(void) | ||
550 | { | ||
551 | int ni = (int)((au_readl(SYS_PINFUNC) & (u32)(SYS_PF_NI2)) >> 4); | ||
552 | struct net_device *dev; | ||
553 | int i, found_one = 0; | ||
554 | |||
555 | num_ifs = NUM_ETH_INTERFACES - ni; | ||
556 | |||
557 | for(i = 0; i < num_ifs; i++) { | ||
558 | dev = au1000_probe(i); | ||
559 | iflist[i].dev = dev; | ||
560 | if (dev) | ||
561 | found_one++; | ||
562 | } | ||
563 | if (!found_one) | ||
564 | return -ENODEV; | ||
565 | return 0; | ||
566 | } | ||
567 | |||
568 | /* | ||
569 | * ethtool operations | 606 | * ethtool operations |
570 | */ | 607 | */ |
571 | 608 | ||
@@ -611,199 +648,6 @@ static const struct ethtool_ops au1000_ethtool_ops = { | |||
611 | .get_link = ethtool_op_get_link, | 648 | .get_link = ethtool_op_get_link, |
612 | }; | 649 | }; |
613 | 650 | ||
614 | static struct net_device * au1000_probe(int port_num) | ||
615 | { | ||
616 | static unsigned version_printed = 0; | ||
617 | struct au1000_private *aup = NULL; | ||
618 | struct net_device *dev = NULL; | ||
619 | db_dest_t *pDB, *pDBfree; | ||
620 | char ethaddr[6]; | ||
621 | int irq, i, err; | ||
622 | u32 base, macen; | ||
623 | |||
624 | if (port_num >= NUM_ETH_INTERFACES) | ||
625 | return NULL; | ||
626 | |||
627 | base = CPHYSADDR(iflist[port_num].base_addr ); | ||
628 | macen = CPHYSADDR(iflist[port_num].macen_addr); | ||
629 | irq = iflist[port_num].irq; | ||
630 | |||
631 | if (!request_mem_region( base, MAC_IOSIZE, "Au1x00 ENET") || | ||
632 | !request_mem_region(macen, 4, "Au1x00 ENET")) | ||
633 | return NULL; | ||
634 | |||
635 | if (version_printed++ == 0) | ||
636 | printk("%s version %s %s\n", DRV_NAME, DRV_VERSION, DRV_AUTHOR); | ||
637 | |||
638 | dev = alloc_etherdev(sizeof(struct au1000_private)); | ||
639 | if (!dev) { | ||
640 | printk(KERN_ERR "%s: alloc_etherdev failed\n", DRV_NAME); | ||
641 | return NULL; | ||
642 | } | ||
643 | |||
644 | if ((err = register_netdev(dev)) != 0) { | ||
645 | printk(KERN_ERR "%s: Cannot register net device, error %d\n", | ||
646 | DRV_NAME, err); | ||
647 | free_netdev(dev); | ||
648 | return NULL; | ||
649 | } | ||
650 | |||
651 | printk("%s: Au1xx0 Ethernet found at 0x%x, irq %d\n", | ||
652 | dev->name, base, irq); | ||
653 | |||
654 | aup = netdev_priv(dev); | ||
655 | |||
656 | spin_lock_init(&aup->lock); | ||
657 | |||
658 | /* Allocate the data buffers */ | ||
659 | /* Snooping works fine with eth on all au1xxx */ | ||
660 | aup->vaddr = (u32)dma_alloc_noncoherent(NULL, MAX_BUF_SIZE * | ||
661 | (NUM_TX_BUFFS + NUM_RX_BUFFS), | ||
662 | &aup->dma_addr, 0); | ||
663 | if (!aup->vaddr) { | ||
664 | free_netdev(dev); | ||
665 | release_mem_region( base, MAC_IOSIZE); | ||
666 | release_mem_region(macen, 4); | ||
667 | return NULL; | ||
668 | } | ||
669 | |||
670 | /* aup->mac is the base address of the MAC's registers */ | ||
671 | aup->mac = (volatile mac_reg_t *)iflist[port_num].base_addr; | ||
672 | |||
673 | /* Setup some variables for quick register address access */ | ||
674 | aup->enable = (volatile u32 *)iflist[port_num].macen_addr; | ||
675 | aup->mac_id = port_num; | ||
676 | au_macs[port_num] = aup; | ||
677 | |||
678 | if (port_num == 0) { | ||
679 | if (prom_get_ethernet_addr(ethaddr) == 0) | ||
680 | memcpy(au1000_mac_addr, ethaddr, sizeof(au1000_mac_addr)); | ||
681 | else { | ||
682 | printk(KERN_INFO "%s: No MAC address found\n", | ||
683 | dev->name); | ||
684 | /* Use the hard coded MAC addresses */ | ||
685 | } | ||
686 | |||
687 | setup_hw_rings(aup, MAC0_RX_DMA_ADDR, MAC0_TX_DMA_ADDR); | ||
688 | } else if (port_num == 1) | ||
689 | setup_hw_rings(aup, MAC1_RX_DMA_ADDR, MAC1_TX_DMA_ADDR); | ||
690 | |||
691 | /* | ||
692 | * Assign to the Ethernet ports two consecutive MAC addresses | ||
693 | * to match those that are printed on their stickers | ||
694 | */ | ||
695 | memcpy(dev->dev_addr, au1000_mac_addr, sizeof(au1000_mac_addr)); | ||
696 | dev->dev_addr[5] += port_num; | ||
697 | |||
698 | *aup->enable = 0; | ||
699 | aup->mac_enabled = 0; | ||
700 | |||
701 | aup->mii_bus = mdiobus_alloc(); | ||
702 | if (aup->mii_bus == NULL) | ||
703 | goto err_out; | ||
704 | |||
705 | aup->mii_bus->priv = dev; | ||
706 | aup->mii_bus->read = au1000_mdiobus_read; | ||
707 | aup->mii_bus->write = au1000_mdiobus_write; | ||
708 | aup->mii_bus->reset = au1000_mdiobus_reset; | ||
709 | aup->mii_bus->name = "au1000_eth_mii"; | ||
710 | snprintf(aup->mii_bus->id, MII_BUS_ID_SIZE, "%x", aup->mac_id); | ||
711 | aup->mii_bus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL); | ||
712 | for(i = 0; i < PHY_MAX_ADDR; ++i) | ||
713 | aup->mii_bus->irq[i] = PHY_POLL; | ||
714 | |||
715 | /* if known, set corresponding PHY IRQs */ | ||
716 | #if defined(AU1XXX_PHY_STATIC_CONFIG) | ||
717 | # if defined(AU1XXX_PHY0_IRQ) | ||
718 | if (AU1XXX_PHY0_BUSID == aup->mac_id) | ||
719 | aup->mii_bus->irq[AU1XXX_PHY0_ADDR] = AU1XXX_PHY0_IRQ; | ||
720 | # endif | ||
721 | # if defined(AU1XXX_PHY1_IRQ) | ||
722 | if (AU1XXX_PHY1_BUSID == aup->mac_id) | ||
723 | aup->mii_bus->irq[AU1XXX_PHY1_ADDR] = AU1XXX_PHY1_IRQ; | ||
724 | # endif | ||
725 | #endif | ||
726 | mdiobus_register(aup->mii_bus); | ||
727 | |||
728 | if (mii_probe(dev) != 0) { | ||
729 | goto err_out; | ||
730 | } | ||
731 | |||
732 | pDBfree = NULL; | ||
733 | /* setup the data buffer descriptors and attach a buffer to each one */ | ||
734 | pDB = aup->db; | ||
735 | for (i = 0; i < (NUM_TX_BUFFS+NUM_RX_BUFFS); i++) { | ||
736 | pDB->pnext = pDBfree; | ||
737 | pDBfree = pDB; | ||
738 | pDB->vaddr = (u32 *)((unsigned)aup->vaddr + MAX_BUF_SIZE*i); | ||
739 | pDB->dma_addr = (dma_addr_t)virt_to_bus(pDB->vaddr); | ||
740 | pDB++; | ||
741 | } | ||
742 | aup->pDBfree = pDBfree; | ||
743 | |||
744 | for (i = 0; i < NUM_RX_DMA; i++) { | ||
745 | pDB = GetFreeDB(aup); | ||
746 | if (!pDB) { | ||
747 | goto err_out; | ||
748 | } | ||
749 | aup->rx_dma_ring[i]->buff_stat = (unsigned)pDB->dma_addr; | ||
750 | aup->rx_db_inuse[i] = pDB; | ||
751 | } | ||
752 | for (i = 0; i < NUM_TX_DMA; i++) { | ||
753 | pDB = GetFreeDB(aup); | ||
754 | if (!pDB) { | ||
755 | goto err_out; | ||
756 | } | ||
757 | aup->tx_dma_ring[i]->buff_stat = (unsigned)pDB->dma_addr; | ||
758 | aup->tx_dma_ring[i]->len = 0; | ||
759 | aup->tx_db_inuse[i] = pDB; | ||
760 | } | ||
761 | |||
762 | dev->base_addr = base; | ||
763 | dev->irq = irq; | ||
764 | dev->open = au1000_open; | ||
765 | dev->hard_start_xmit = au1000_tx; | ||
766 | dev->stop = au1000_close; | ||
767 | dev->set_multicast_list = &set_rx_mode; | ||
768 | dev->do_ioctl = &au1000_ioctl; | ||
769 | SET_ETHTOOL_OPS(dev, &au1000_ethtool_ops); | ||
770 | dev->tx_timeout = au1000_tx_timeout; | ||
771 | dev->watchdog_timeo = ETH_TX_TIMEOUT; | ||
772 | |||
773 | /* | ||
774 | * The boot code uses the ethernet controller, so reset it to start | ||
775 | * fresh. au1000_init() expects that the device is in reset state. | ||
776 | */ | ||
777 | reset_mac(dev); | ||
778 | |||
779 | return dev; | ||
780 | |||
781 | err_out: | ||
782 | if (aup->mii_bus != NULL) { | ||
783 | mdiobus_unregister(aup->mii_bus); | ||
784 | mdiobus_free(aup->mii_bus); | ||
785 | } | ||
786 | |||
787 | /* here we should have a valid dev plus aup-> register addresses | ||
788 | * so we can reset the mac properly.*/ | ||
789 | reset_mac(dev); | ||
790 | |||
791 | for (i = 0; i < NUM_RX_DMA; i++) { | ||
792 | if (aup->rx_db_inuse[i]) | ||
793 | ReleaseDB(aup, aup->rx_db_inuse[i]); | ||
794 | } | ||
795 | for (i = 0; i < NUM_TX_DMA; i++) { | ||
796 | if (aup->tx_db_inuse[i]) | ||
797 | ReleaseDB(aup, aup->tx_db_inuse[i]); | ||
798 | } | ||
799 | dma_free_noncoherent(NULL, MAX_BUF_SIZE * (NUM_TX_BUFFS + NUM_RX_BUFFS), | ||
800 | (void *)aup->vaddr, aup->dma_addr); | ||
801 | unregister_netdev(dev); | ||
802 | free_netdev(dev); | ||
803 | release_mem_region( base, MAC_IOSIZE); | ||
804 | release_mem_region(macen, 4); | ||
805 | return NULL; | ||
806 | } | ||
807 | 651 | ||
808 | /* | 652 | /* |
809 | * Initialize the interface. | 653 | * Initialize the interface. |
@@ -864,83 +708,170 @@ static int au1000_init(struct net_device *dev) | |||
864 | return 0; | 708 | return 0; |
865 | } | 709 | } |
866 | 710 | ||
867 | static void | 711 | static inline void update_rx_stats(struct net_device *dev, u32 status) |
868 | au1000_adjust_link(struct net_device *dev) | ||
869 | { | 712 | { |
870 | struct au1000_private *aup = netdev_priv(dev); | 713 | struct au1000_private *aup = netdev_priv(dev); |
871 | struct phy_device *phydev = aup->phy_dev; | 714 | struct net_device_stats *ps = &dev->stats; |
872 | unsigned long flags; | ||
873 | 715 | ||
874 | int status_change = 0; | 716 | ps->rx_packets++; |
717 | if (status & RX_MCAST_FRAME) | ||
718 | ps->multicast++; | ||
875 | 719 | ||
876 | BUG_ON(!aup->phy_dev); | 720 | if (status & RX_ERROR) { |
721 | ps->rx_errors++; | ||
722 | if (status & RX_MISSED_FRAME) | ||
723 | ps->rx_missed_errors++; | ||
724 | if (status & (RX_OVERLEN | RX_OVERLEN | RX_LEN_ERROR)) | ||
725 | ps->rx_length_errors++; | ||
726 | if (status & RX_CRC_ERROR) | ||
727 | ps->rx_crc_errors++; | ||
728 | if (status & RX_COLL) | ||
729 | ps->collisions++; | ||
730 | } | ||
731 | else | ||
732 | ps->rx_bytes += status & RX_FRAME_LEN_MASK; | ||
877 | 733 | ||
878 | spin_lock_irqsave(&aup->lock, flags); | 734 | } |
879 | 735 | ||
880 | if (phydev->link && (aup->old_speed != phydev->speed)) { | 736 | /* |
881 | // speed changed | 737 | * Au1000 receive routine. |
738 | */ | ||
739 | static int au1000_rx(struct net_device *dev) | ||
740 | { | ||
741 | struct au1000_private *aup = netdev_priv(dev); | ||
742 | struct sk_buff *skb; | ||
743 | volatile rx_dma_t *prxd; | ||
744 | u32 buff_stat, status; | ||
745 | db_dest_t *pDB; | ||
746 | u32 frmlen; | ||
882 | 747 | ||
883 | switch(phydev->speed) { | 748 | if (au1000_debug > 5) |
884 | case SPEED_10: | 749 | printk("%s: au1000_rx head %d\n", dev->name, aup->rx_head); |
885 | case SPEED_100: | ||
886 | break; | ||
887 | default: | ||
888 | printk(KERN_WARNING | ||
889 | "%s: Speed (%d) is not 10/100 ???\n", | ||
890 | dev->name, phydev->speed); | ||
891 | break; | ||
892 | } | ||
893 | 750 | ||
894 | aup->old_speed = phydev->speed; | 751 | prxd = aup->rx_dma_ring[aup->rx_head]; |
752 | buff_stat = prxd->buff_stat; | ||
753 | while (buff_stat & RX_T_DONE) { | ||
754 | status = prxd->status; | ||
755 | pDB = aup->rx_db_inuse[aup->rx_head]; | ||
756 | update_rx_stats(dev, status); | ||
757 | if (!(status & RX_ERROR)) { | ||
895 | 758 | ||
896 | status_change = 1; | 759 | /* good frame */ |
760 | frmlen = (status & RX_FRAME_LEN_MASK); | ||
761 | frmlen -= 4; /* Remove FCS */ | ||
762 | skb = dev_alloc_skb(frmlen + 2); | ||
763 | if (skb == NULL) { | ||
764 | printk(KERN_ERR | ||
765 | "%s: Memory squeeze, dropping packet.\n", | ||
766 | dev->name); | ||
767 | dev->stats.rx_dropped++; | ||
768 | continue; | ||
769 | } | ||
770 | skb_reserve(skb, 2); /* 16 byte IP header align */ | ||
771 | skb_copy_to_linear_data(skb, | ||
772 | (unsigned char *)pDB->vaddr, frmlen); | ||
773 | skb_put(skb, frmlen); | ||
774 | skb->protocol = eth_type_trans(skb, dev); | ||
775 | netif_rx(skb); /* pass the packet to upper layers */ | ||
776 | } | ||
777 | else { | ||
778 | if (au1000_debug > 4) { | ||
779 | if (status & RX_MISSED_FRAME) | ||
780 | printk("rx miss\n"); | ||
781 | if (status & RX_WDOG_TIMER) | ||
782 | printk("rx wdog\n"); | ||
783 | if (status & RX_RUNT) | ||
784 | printk("rx runt\n"); | ||
785 | if (status & RX_OVERLEN) | ||
786 | printk("rx overlen\n"); | ||
787 | if (status & RX_COLL) | ||
788 | printk("rx coll\n"); | ||
789 | if (status & RX_MII_ERROR) | ||
790 | printk("rx mii error\n"); | ||
791 | if (status & RX_CRC_ERROR) | ||
792 | printk("rx crc error\n"); | ||
793 | if (status & RX_LEN_ERROR) | ||
794 | printk("rx len error\n"); | ||
795 | if (status & RX_U_CNTRL_FRAME) | ||
796 | printk("rx u control frame\n"); | ||
797 | if (status & RX_MISSED_FRAME) | ||
798 | printk("rx miss\n"); | ||
799 | } | ||
800 | } | ||
801 | prxd->buff_stat = (u32)(pDB->dma_addr | RX_DMA_ENABLE); | ||
802 | aup->rx_head = (aup->rx_head + 1) & (NUM_RX_DMA - 1); | ||
803 | au_sync(); | ||
804 | |||
805 | /* next descriptor */ | ||
806 | prxd = aup->rx_dma_ring[aup->rx_head]; | ||
807 | buff_stat = prxd->buff_stat; | ||
897 | } | 808 | } |
809 | return 0; | ||
810 | } | ||
898 | 811 | ||
899 | if (phydev->link && (aup->old_duplex != phydev->duplex)) { | 812 | static void update_tx_stats(struct net_device *dev, u32 status) |
900 | // duplex mode changed | 813 | { |
814 | struct au1000_private *aup = netdev_priv(dev); | ||
815 | struct net_device_stats *ps = &dev->stats; | ||
901 | 816 | ||
902 | /* switching duplex mode requires to disable rx and tx! */ | 817 | if (status & TX_FRAME_ABORTED) { |
903 | hard_stop(dev); | 818 | if (!aup->phy_dev || (DUPLEX_FULL == aup->phy_dev->duplex)) { |
819 | if (status & (TX_JAB_TIMEOUT | TX_UNDERRUN)) { | ||
820 | /* any other tx errors are only valid | ||
821 | * in half duplex mode */ | ||
822 | ps->tx_errors++; | ||
823 | ps->tx_aborted_errors++; | ||
824 | } | ||
825 | } | ||
826 | else { | ||
827 | ps->tx_errors++; | ||
828 | ps->tx_aborted_errors++; | ||
829 | if (status & (TX_NO_CARRIER | TX_LOSS_CARRIER)) | ||
830 | ps->tx_carrier_errors++; | ||
831 | } | ||
832 | } | ||
833 | } | ||
904 | 834 | ||
905 | if (DUPLEX_FULL == phydev->duplex) | 835 | /* |
906 | aup->mac->control = ((aup->mac->control | 836 | * Called from the interrupt service routine to acknowledge |
907 | | MAC_FULL_DUPLEX) | 837 | * the TX DONE bits. This is a must if the irq is setup as |
908 | & ~MAC_DISABLE_RX_OWN); | 838 | * edge triggered. |
909 | else | 839 | */ |
910 | aup->mac->control = ((aup->mac->control | 840 | static void au1000_tx_ack(struct net_device *dev) |
911 | & ~MAC_FULL_DUPLEX) | 841 | { |
912 | | MAC_DISABLE_RX_OWN); | 842 | struct au1000_private *aup = netdev_priv(dev); |
913 | au_sync_delay(1); | 843 | volatile tx_dma_t *ptxd; |
914 | 844 | ||
915 | enable_rx_tx(dev); | 845 | ptxd = aup->tx_dma_ring[aup->tx_tail]; |
916 | aup->old_duplex = phydev->duplex; | ||
917 | 846 | ||
918 | status_change = 1; | 847 | while (ptxd->buff_stat & TX_T_DONE) { |
919 | } | 848 | update_tx_stats(dev, ptxd->status); |
849 | ptxd->buff_stat &= ~TX_T_DONE; | ||
850 | ptxd->len = 0; | ||
851 | au_sync(); | ||
920 | 852 | ||
921 | if(phydev->link != aup->old_link) { | 853 | aup->tx_tail = (aup->tx_tail + 1) & (NUM_TX_DMA - 1); |
922 | // link state changed | 854 | ptxd = aup->tx_dma_ring[aup->tx_tail]; |
923 | 855 | ||
924 | if (!phydev->link) { | 856 | if (aup->tx_full) { |
925 | /* link went down */ | 857 | aup->tx_full = 0; |
926 | aup->old_speed = 0; | 858 | netif_wake_queue(dev); |
927 | aup->old_duplex = -1; | ||
928 | } | 859 | } |
929 | |||
930 | aup->old_link = phydev->link; | ||
931 | status_change = 1; | ||
932 | } | 860 | } |
861 | } | ||
933 | 862 | ||
934 | spin_unlock_irqrestore(&aup->lock, flags); | 863 | /* |
864 | * Au1000 interrupt service routine. | ||
865 | */ | ||
866 | static irqreturn_t au1000_interrupt(int irq, void *dev_id) | ||
867 | { | ||
868 | struct net_device *dev = dev_id; | ||
935 | 869 | ||
936 | if (status_change) { | 870 | /* Handle RX interrupts first to minimize chance of overrun */ |
937 | if (phydev->link) | 871 | |
938 | printk(KERN_INFO "%s: link up (%d/%s)\n", | 872 | au1000_rx(dev); |
939 | dev->name, phydev->speed, | 873 | au1000_tx_ack(dev); |
940 | DUPLEX_FULL == phydev->duplex ? "Full" : "Half"); | 874 | return IRQ_RETVAL(1); |
941 | else | ||
942 | printk(KERN_INFO "%s: link down\n", dev->name); | ||
943 | } | ||
944 | } | 875 | } |
945 | 876 | ||
946 | static int au1000_open(struct net_device *dev) | 877 | static int au1000_open(struct net_device *dev) |
@@ -1003,88 +934,6 @@ static int au1000_close(struct net_device *dev) | |||
1003 | return 0; | 934 | return 0; |
1004 | } | 935 | } |
1005 | 936 | ||
1006 | static void __exit au1000_cleanup_module(void) | ||
1007 | { | ||
1008 | int i, j; | ||
1009 | struct net_device *dev; | ||
1010 | struct au1000_private *aup; | ||
1011 | |||
1012 | for (i = 0; i < num_ifs; i++) { | ||
1013 | dev = iflist[i].dev; | ||
1014 | if (dev) { | ||
1015 | aup = netdev_priv(dev); | ||
1016 | unregister_netdev(dev); | ||
1017 | mdiobus_unregister(aup->mii_bus); | ||
1018 | mdiobus_free(aup->mii_bus); | ||
1019 | for (j = 0; j < NUM_RX_DMA; j++) | ||
1020 | if (aup->rx_db_inuse[j]) | ||
1021 | ReleaseDB(aup, aup->rx_db_inuse[j]); | ||
1022 | for (j = 0; j < NUM_TX_DMA; j++) | ||
1023 | if (aup->tx_db_inuse[j]) | ||
1024 | ReleaseDB(aup, aup->tx_db_inuse[j]); | ||
1025 | dma_free_noncoherent(NULL, MAX_BUF_SIZE * | ||
1026 | (NUM_TX_BUFFS + NUM_RX_BUFFS), | ||
1027 | (void *)aup->vaddr, aup->dma_addr); | ||
1028 | release_mem_region(dev->base_addr, MAC_IOSIZE); | ||
1029 | release_mem_region(CPHYSADDR(iflist[i].macen_addr), 4); | ||
1030 | free_netdev(dev); | ||
1031 | } | ||
1032 | } | ||
1033 | } | ||
1034 | |||
1035 | static void update_tx_stats(struct net_device *dev, u32 status) | ||
1036 | { | ||
1037 | struct au1000_private *aup = netdev_priv(dev); | ||
1038 | struct net_device_stats *ps = &dev->stats; | ||
1039 | |||
1040 | if (status & TX_FRAME_ABORTED) { | ||
1041 | if (!aup->phy_dev || (DUPLEX_FULL == aup->phy_dev->duplex)) { | ||
1042 | if (status & (TX_JAB_TIMEOUT | TX_UNDERRUN)) { | ||
1043 | /* any other tx errors are only valid | ||
1044 | * in half duplex mode */ | ||
1045 | ps->tx_errors++; | ||
1046 | ps->tx_aborted_errors++; | ||
1047 | } | ||
1048 | } | ||
1049 | else { | ||
1050 | ps->tx_errors++; | ||
1051 | ps->tx_aborted_errors++; | ||
1052 | if (status & (TX_NO_CARRIER | TX_LOSS_CARRIER)) | ||
1053 | ps->tx_carrier_errors++; | ||
1054 | } | ||
1055 | } | ||
1056 | } | ||
1057 | |||
1058 | |||
1059 | /* | ||
1060 | * Called from the interrupt service routine to acknowledge | ||
1061 | * the TX DONE bits. This is a must if the irq is setup as | ||
1062 | * edge triggered. | ||
1063 | */ | ||
1064 | static void au1000_tx_ack(struct net_device *dev) | ||
1065 | { | ||
1066 | struct au1000_private *aup = netdev_priv(dev); | ||
1067 | volatile tx_dma_t *ptxd; | ||
1068 | |||
1069 | ptxd = aup->tx_dma_ring[aup->tx_tail]; | ||
1070 | |||
1071 | while (ptxd->buff_stat & TX_T_DONE) { | ||
1072 | update_tx_stats(dev, ptxd->status); | ||
1073 | ptxd->buff_stat &= ~TX_T_DONE; | ||
1074 | ptxd->len = 0; | ||
1075 | au_sync(); | ||
1076 | |||
1077 | aup->tx_tail = (aup->tx_tail + 1) & (NUM_TX_DMA - 1); | ||
1078 | ptxd = aup->tx_dma_ring[aup->tx_tail]; | ||
1079 | |||
1080 | if (aup->tx_full) { | ||
1081 | aup->tx_full = 0; | ||
1082 | netif_wake_queue(dev); | ||
1083 | } | ||
1084 | } | ||
1085 | } | ||
1086 | |||
1087 | |||
1088 | /* | 937 | /* |
1089 | * Au1000 transmit routine. | 938 | * Au1000 transmit routine. |
1090 | */ | 939 | */ |
@@ -1142,123 +991,6 @@ static int au1000_tx(struct sk_buff *skb, struct net_device *dev) | |||
1142 | return 0; | 991 | return 0; |
1143 | } | 992 | } |
1144 | 993 | ||
1145 | static inline void update_rx_stats(struct net_device *dev, u32 status) | ||
1146 | { | ||
1147 | struct au1000_private *aup = netdev_priv(dev); | ||
1148 | struct net_device_stats *ps = &dev->stats; | ||
1149 | |||
1150 | ps->rx_packets++; | ||
1151 | if (status & RX_MCAST_FRAME) | ||
1152 | ps->multicast++; | ||
1153 | |||
1154 | if (status & RX_ERROR) { | ||
1155 | ps->rx_errors++; | ||
1156 | if (status & RX_MISSED_FRAME) | ||
1157 | ps->rx_missed_errors++; | ||
1158 | if (status & (RX_OVERLEN | RX_OVERLEN | RX_LEN_ERROR)) | ||
1159 | ps->rx_length_errors++; | ||
1160 | if (status & RX_CRC_ERROR) | ||
1161 | ps->rx_crc_errors++; | ||
1162 | if (status & RX_COLL) | ||
1163 | ps->collisions++; | ||
1164 | } | ||
1165 | else | ||
1166 | ps->rx_bytes += status & RX_FRAME_LEN_MASK; | ||
1167 | |||
1168 | } | ||
1169 | |||
1170 | /* | ||
1171 | * Au1000 receive routine. | ||
1172 | */ | ||
1173 | static int au1000_rx(struct net_device *dev) | ||
1174 | { | ||
1175 | struct au1000_private *aup = netdev_priv(dev); | ||
1176 | struct sk_buff *skb; | ||
1177 | volatile rx_dma_t *prxd; | ||
1178 | u32 buff_stat, status; | ||
1179 | db_dest_t *pDB; | ||
1180 | u32 frmlen; | ||
1181 | |||
1182 | if (au1000_debug > 5) | ||
1183 | printk("%s: au1000_rx head %d\n", dev->name, aup->rx_head); | ||
1184 | |||
1185 | prxd = aup->rx_dma_ring[aup->rx_head]; | ||
1186 | buff_stat = prxd->buff_stat; | ||
1187 | while (buff_stat & RX_T_DONE) { | ||
1188 | status = prxd->status; | ||
1189 | pDB = aup->rx_db_inuse[aup->rx_head]; | ||
1190 | update_rx_stats(dev, status); | ||
1191 | if (!(status & RX_ERROR)) { | ||
1192 | |||
1193 | /* good frame */ | ||
1194 | frmlen = (status & RX_FRAME_LEN_MASK); | ||
1195 | frmlen -= 4; /* Remove FCS */ | ||
1196 | skb = dev_alloc_skb(frmlen + 2); | ||
1197 | if (skb == NULL) { | ||
1198 | printk(KERN_ERR | ||
1199 | "%s: Memory squeeze, dropping packet.\n", | ||
1200 | dev->name); | ||
1201 | dev->stats.rx_dropped++; | ||
1202 | continue; | ||
1203 | } | ||
1204 | skb_reserve(skb, 2); /* 16 byte IP header align */ | ||
1205 | skb_copy_to_linear_data(skb, | ||
1206 | (unsigned char *)pDB->vaddr, frmlen); | ||
1207 | skb_put(skb, frmlen); | ||
1208 | skb->protocol = eth_type_trans(skb, dev); | ||
1209 | netif_rx(skb); /* pass the packet to upper layers */ | ||
1210 | } | ||
1211 | else { | ||
1212 | if (au1000_debug > 4) { | ||
1213 | if (status & RX_MISSED_FRAME) | ||
1214 | printk("rx miss\n"); | ||
1215 | if (status & RX_WDOG_TIMER) | ||
1216 | printk("rx wdog\n"); | ||
1217 | if (status & RX_RUNT) | ||
1218 | printk("rx runt\n"); | ||
1219 | if (status & RX_OVERLEN) | ||
1220 | printk("rx overlen\n"); | ||
1221 | if (status & RX_COLL) | ||
1222 | printk("rx coll\n"); | ||
1223 | if (status & RX_MII_ERROR) | ||
1224 | printk("rx mii error\n"); | ||
1225 | if (status & RX_CRC_ERROR) | ||
1226 | printk("rx crc error\n"); | ||
1227 | if (status & RX_LEN_ERROR) | ||
1228 | printk("rx len error\n"); | ||
1229 | if (status & RX_U_CNTRL_FRAME) | ||
1230 | printk("rx u control frame\n"); | ||
1231 | if (status & RX_MISSED_FRAME) | ||
1232 | printk("rx miss\n"); | ||
1233 | } | ||
1234 | } | ||
1235 | prxd->buff_stat = (u32)(pDB->dma_addr | RX_DMA_ENABLE); | ||
1236 | aup->rx_head = (aup->rx_head + 1) & (NUM_RX_DMA - 1); | ||
1237 | au_sync(); | ||
1238 | |||
1239 | /* next descriptor */ | ||
1240 | prxd = aup->rx_dma_ring[aup->rx_head]; | ||
1241 | buff_stat = prxd->buff_stat; | ||
1242 | } | ||
1243 | return 0; | ||
1244 | } | ||
1245 | |||
1246 | |||
1247 | /* | ||
1248 | * Au1000 interrupt service routine. | ||
1249 | */ | ||
1250 | static irqreturn_t au1000_interrupt(int irq, void *dev_id) | ||
1251 | { | ||
1252 | struct net_device *dev = dev_id; | ||
1253 | |||
1254 | /* Handle RX interrupts first to minimize chance of overrun */ | ||
1255 | |||
1256 | au1000_rx(dev); | ||
1257 | au1000_tx_ack(dev); | ||
1258 | return IRQ_RETVAL(1); | ||
1259 | } | ||
1260 | |||
1261 | |||
1262 | /* | 994 | /* |
1263 | * The Tx ring has been full longer than the watchdog timeout | 995 | * The Tx ring has been full longer than the watchdog timeout |
1264 | * value. The transmitter must be hung? | 996 | * value. The transmitter must be hung? |
@@ -1315,5 +1047,252 @@ static int au1000_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) | |||
1315 | return phy_mii_ioctl(aup->phy_dev, if_mii(rq), cmd); | 1047 | return phy_mii_ioctl(aup->phy_dev, if_mii(rq), cmd); |
1316 | } | 1048 | } |
1317 | 1049 | ||
1050 | static struct net_device * au1000_probe(int port_num) | ||
1051 | { | ||
1052 | static unsigned version_printed = 0; | ||
1053 | struct au1000_private *aup = NULL; | ||
1054 | struct net_device *dev = NULL; | ||
1055 | db_dest_t *pDB, *pDBfree; | ||
1056 | char ethaddr[6]; | ||
1057 | int irq, i, err; | ||
1058 | u32 base, macen; | ||
1059 | |||
1060 | if (port_num >= NUM_ETH_INTERFACES) | ||
1061 | return NULL; | ||
1062 | |||
1063 | base = CPHYSADDR(iflist[port_num].base_addr ); | ||
1064 | macen = CPHYSADDR(iflist[port_num].macen_addr); | ||
1065 | irq = iflist[port_num].irq; | ||
1066 | |||
1067 | if (!request_mem_region( base, MAC_IOSIZE, "Au1x00 ENET") || | ||
1068 | !request_mem_region(macen, 4, "Au1x00 ENET")) | ||
1069 | return NULL; | ||
1070 | |||
1071 | if (version_printed++ == 0) | ||
1072 | printk("%s version %s %s\n", DRV_NAME, DRV_VERSION, DRV_AUTHOR); | ||
1073 | |||
1074 | dev = alloc_etherdev(sizeof(struct au1000_private)); | ||
1075 | if (!dev) { | ||
1076 | printk(KERN_ERR "%s: alloc_etherdev failed\n", DRV_NAME); | ||
1077 | return NULL; | ||
1078 | } | ||
1079 | |||
1080 | if ((err = register_netdev(dev)) != 0) { | ||
1081 | printk(KERN_ERR "%s: Cannot register net device, error %d\n", | ||
1082 | DRV_NAME, err); | ||
1083 | free_netdev(dev); | ||
1084 | return NULL; | ||
1085 | } | ||
1086 | |||
1087 | printk("%s: Au1xx0 Ethernet found at 0x%x, irq %d\n", | ||
1088 | dev->name, base, irq); | ||
1089 | |||
1090 | aup = netdev_priv(dev); | ||
1091 | |||
1092 | spin_lock_init(&aup->lock); | ||
1093 | |||
1094 | /* Allocate the data buffers */ | ||
1095 | /* Snooping works fine with eth on all au1xxx */ | ||
1096 | aup->vaddr = (u32)dma_alloc_noncoherent(NULL, MAX_BUF_SIZE * | ||
1097 | (NUM_TX_BUFFS + NUM_RX_BUFFS), | ||
1098 | &aup->dma_addr, 0); | ||
1099 | if (!aup->vaddr) { | ||
1100 | free_netdev(dev); | ||
1101 | release_mem_region( base, MAC_IOSIZE); | ||
1102 | release_mem_region(macen, 4); | ||
1103 | return NULL; | ||
1104 | } | ||
1105 | |||
1106 | /* aup->mac is the base address of the MAC's registers */ | ||
1107 | aup->mac = (volatile mac_reg_t *)iflist[port_num].base_addr; | ||
1108 | |||
1109 | /* Setup some variables for quick register address access */ | ||
1110 | aup->enable = (volatile u32 *)iflist[port_num].macen_addr; | ||
1111 | aup->mac_id = port_num; | ||
1112 | au_macs[port_num] = aup; | ||
1113 | |||
1114 | if (port_num == 0) { | ||
1115 | if (prom_get_ethernet_addr(ethaddr) == 0) | ||
1116 | memcpy(au1000_mac_addr, ethaddr, sizeof(au1000_mac_addr)); | ||
1117 | else { | ||
1118 | printk(KERN_INFO "%s: No MAC address found\n", | ||
1119 | dev->name); | ||
1120 | /* Use the hard coded MAC addresses */ | ||
1121 | } | ||
1122 | |||
1123 | setup_hw_rings(aup, MAC0_RX_DMA_ADDR, MAC0_TX_DMA_ADDR); | ||
1124 | } else if (port_num == 1) | ||
1125 | setup_hw_rings(aup, MAC1_RX_DMA_ADDR, MAC1_TX_DMA_ADDR); | ||
1126 | |||
1127 | /* | ||
1128 | * Assign to the Ethernet ports two consecutive MAC addresses | ||
1129 | * to match those that are printed on their stickers | ||
1130 | */ | ||
1131 | memcpy(dev->dev_addr, au1000_mac_addr, sizeof(au1000_mac_addr)); | ||
1132 | dev->dev_addr[5] += port_num; | ||
1133 | |||
1134 | *aup->enable = 0; | ||
1135 | aup->mac_enabled = 0; | ||
1136 | |||
1137 | aup->mii_bus = mdiobus_alloc(); | ||
1138 | if (aup->mii_bus == NULL) | ||
1139 | goto err_out; | ||
1140 | |||
1141 | aup->mii_bus->priv = dev; | ||
1142 | aup->mii_bus->read = au1000_mdiobus_read; | ||
1143 | aup->mii_bus->write = au1000_mdiobus_write; | ||
1144 | aup->mii_bus->reset = au1000_mdiobus_reset; | ||
1145 | aup->mii_bus->name = "au1000_eth_mii"; | ||
1146 | snprintf(aup->mii_bus->id, MII_BUS_ID_SIZE, "%x", aup->mac_id); | ||
1147 | aup->mii_bus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL); | ||
1148 | for(i = 0; i < PHY_MAX_ADDR; ++i) | ||
1149 | aup->mii_bus->irq[i] = PHY_POLL; | ||
1150 | |||
1151 | /* if known, set corresponding PHY IRQs */ | ||
1152 | #if defined(AU1XXX_PHY_STATIC_CONFIG) | ||
1153 | # if defined(AU1XXX_PHY0_IRQ) | ||
1154 | if (AU1XXX_PHY0_BUSID == aup->mac_id) | ||
1155 | aup->mii_bus->irq[AU1XXX_PHY0_ADDR] = AU1XXX_PHY0_IRQ; | ||
1156 | # endif | ||
1157 | # if defined(AU1XXX_PHY1_IRQ) | ||
1158 | if (AU1XXX_PHY1_BUSID == aup->mac_id) | ||
1159 | aup->mii_bus->irq[AU1XXX_PHY1_ADDR] = AU1XXX_PHY1_IRQ; | ||
1160 | # endif | ||
1161 | #endif | ||
1162 | mdiobus_register(aup->mii_bus); | ||
1163 | |||
1164 | if (mii_probe(dev) != 0) { | ||
1165 | goto err_out; | ||
1166 | } | ||
1167 | |||
1168 | pDBfree = NULL; | ||
1169 | /* setup the data buffer descriptors and attach a buffer to each one */ | ||
1170 | pDB = aup->db; | ||
1171 | for (i = 0; i < (NUM_TX_BUFFS+NUM_RX_BUFFS); i++) { | ||
1172 | pDB->pnext = pDBfree; | ||
1173 | pDBfree = pDB; | ||
1174 | pDB->vaddr = (u32 *)((unsigned)aup->vaddr + MAX_BUF_SIZE*i); | ||
1175 | pDB->dma_addr = (dma_addr_t)virt_to_bus(pDB->vaddr); | ||
1176 | pDB++; | ||
1177 | } | ||
1178 | aup->pDBfree = pDBfree; | ||
1179 | |||
1180 | for (i = 0; i < NUM_RX_DMA; i++) { | ||
1181 | pDB = GetFreeDB(aup); | ||
1182 | if (!pDB) { | ||
1183 | goto err_out; | ||
1184 | } | ||
1185 | aup->rx_dma_ring[i]->buff_stat = (unsigned)pDB->dma_addr; | ||
1186 | aup->rx_db_inuse[i] = pDB; | ||
1187 | } | ||
1188 | for (i = 0; i < NUM_TX_DMA; i++) { | ||
1189 | pDB = GetFreeDB(aup); | ||
1190 | if (!pDB) { | ||
1191 | goto err_out; | ||
1192 | } | ||
1193 | aup->tx_dma_ring[i]->buff_stat = (unsigned)pDB->dma_addr; | ||
1194 | aup->tx_dma_ring[i]->len = 0; | ||
1195 | aup->tx_db_inuse[i] = pDB; | ||
1196 | } | ||
1197 | |||
1198 | dev->base_addr = base; | ||
1199 | dev->irq = irq; | ||
1200 | dev->open = au1000_open; | ||
1201 | dev->hard_start_xmit = au1000_tx; | ||
1202 | dev->stop = au1000_close; | ||
1203 | dev->set_multicast_list = &set_rx_mode; | ||
1204 | dev->do_ioctl = &au1000_ioctl; | ||
1205 | SET_ETHTOOL_OPS(dev, &au1000_ethtool_ops); | ||
1206 | dev->tx_timeout = au1000_tx_timeout; | ||
1207 | dev->watchdog_timeo = ETH_TX_TIMEOUT; | ||
1208 | |||
1209 | /* | ||
1210 | * The boot code uses the ethernet controller, so reset it to start | ||
1211 | * fresh. au1000_init() expects that the device is in reset state. | ||
1212 | */ | ||
1213 | reset_mac(dev); | ||
1214 | |||
1215 | return dev; | ||
1216 | |||
1217 | err_out: | ||
1218 | if (aup->mii_bus != NULL) { | ||
1219 | mdiobus_unregister(aup->mii_bus); | ||
1220 | mdiobus_free(aup->mii_bus); | ||
1221 | } | ||
1222 | |||
1223 | /* here we should have a valid dev plus aup-> register addresses | ||
1224 | * so we can reset the mac properly.*/ | ||
1225 | reset_mac(dev); | ||
1226 | |||
1227 | for (i = 0; i < NUM_RX_DMA; i++) { | ||
1228 | if (aup->rx_db_inuse[i]) | ||
1229 | ReleaseDB(aup, aup->rx_db_inuse[i]); | ||
1230 | } | ||
1231 | for (i = 0; i < NUM_TX_DMA; i++) { | ||
1232 | if (aup->tx_db_inuse[i]) | ||
1233 | ReleaseDB(aup, aup->tx_db_inuse[i]); | ||
1234 | } | ||
1235 | dma_free_noncoherent(NULL, MAX_BUF_SIZE * (NUM_TX_BUFFS + NUM_RX_BUFFS), | ||
1236 | (void *)aup->vaddr, aup->dma_addr); | ||
1237 | unregister_netdev(dev); | ||
1238 | free_netdev(dev); | ||
1239 | release_mem_region( base, MAC_IOSIZE); | ||
1240 | release_mem_region(macen, 4); | ||
1241 | return NULL; | ||
1242 | } | ||
1243 | |||
1244 | /* | ||
1245 | * Setup the base address and interrupt of the Au1xxx ethernet macs | ||
1246 | * based on cpu type and whether the interface is enabled in sys_pinfunc | ||
1247 | * register. The last interface is enabled if SYS_PF_NI2 (bit 4) is 0. | ||
1248 | */ | ||
1249 | static int __init au1000_init_module(void) | ||
1250 | { | ||
1251 | int ni = (int)((au_readl(SYS_PINFUNC) & (u32)(SYS_PF_NI2)) >> 4); | ||
1252 | struct net_device *dev; | ||
1253 | int i, found_one = 0; | ||
1254 | |||
1255 | num_ifs = NUM_ETH_INTERFACES - ni; | ||
1256 | |||
1257 | for(i = 0; i < num_ifs; i++) { | ||
1258 | dev = au1000_probe(i); | ||
1259 | iflist[i].dev = dev; | ||
1260 | if (dev) | ||
1261 | found_one++; | ||
1262 | } | ||
1263 | if (!found_one) | ||
1264 | return -ENODEV; | ||
1265 | return 0; | ||
1266 | } | ||
1267 | |||
1268 | static void __exit au1000_cleanup_module(void) | ||
1269 | { | ||
1270 | int i, j; | ||
1271 | struct net_device *dev; | ||
1272 | struct au1000_private *aup; | ||
1273 | |||
1274 | for (i = 0; i < num_ifs; i++) { | ||
1275 | dev = iflist[i].dev; | ||
1276 | if (dev) { | ||
1277 | aup = netdev_priv(dev); | ||
1278 | unregister_netdev(dev); | ||
1279 | mdiobus_unregister(aup->mii_bus); | ||
1280 | mdiobus_free(aup->mii_bus); | ||
1281 | for (j = 0; j < NUM_RX_DMA; j++) | ||
1282 | if (aup->rx_db_inuse[j]) | ||
1283 | ReleaseDB(aup, aup->rx_db_inuse[j]); | ||
1284 | for (j = 0; j < NUM_TX_DMA; j++) | ||
1285 | if (aup->tx_db_inuse[j]) | ||
1286 | ReleaseDB(aup, aup->tx_db_inuse[j]); | ||
1287 | dma_free_noncoherent(NULL, MAX_BUF_SIZE * | ||
1288 | (NUM_TX_BUFFS + NUM_RX_BUFFS), | ||
1289 | (void *)aup->vaddr, aup->dma_addr); | ||
1290 | release_mem_region(dev->base_addr, MAC_IOSIZE); | ||
1291 | release_mem_region(CPHYSADDR(iflist[i].macen_addr), 4); | ||
1292 | free_netdev(dev); | ||
1293 | } | ||
1294 | } | ||
1295 | } | ||
1296 | |||
1318 | module_init(au1000_init_module); | 1297 | module_init(au1000_init_module); |
1319 | module_exit(au1000_cleanup_module); | 1298 | module_exit(au1000_cleanup_module); |
diff --git a/drivers/net/b44.c b/drivers/net/b44.c index c38512ebcea6..92aaaa1ee9f1 100644 --- a/drivers/net/b44.c +++ b/drivers/net/b44.c | |||
@@ -874,7 +874,7 @@ static int b44_poll(struct napi_struct *napi, int budget) | |||
874 | } | 874 | } |
875 | 875 | ||
876 | if (work_done < budget) { | 876 | if (work_done < budget) { |
877 | netif_rx_complete(napi); | 877 | napi_complete(napi); |
878 | b44_enable_ints(bp); | 878 | b44_enable_ints(bp); |
879 | } | 879 | } |
880 | 880 | ||
@@ -906,13 +906,13 @@ static irqreturn_t b44_interrupt(int irq, void *dev_id) | |||
906 | goto irq_ack; | 906 | goto irq_ack; |
907 | } | 907 | } |
908 | 908 | ||
909 | if (netif_rx_schedule_prep(&bp->napi)) { | 909 | if (napi_schedule_prep(&bp->napi)) { |
910 | /* NOTE: These writes are posted by the readback of | 910 | /* NOTE: These writes are posted by the readback of |
911 | * the ISTAT register below. | 911 | * the ISTAT register below. |
912 | */ | 912 | */ |
913 | bp->istat = istat; | 913 | bp->istat = istat; |
914 | __b44_disable_ints(bp); | 914 | __b44_disable_ints(bp); |
915 | __netif_rx_schedule(&bp->napi); | 915 | __napi_schedule(&bp->napi); |
916 | } else { | 916 | } else { |
917 | printk(KERN_ERR PFX "%s: Error, poll already scheduled\n", | 917 | printk(KERN_ERR PFX "%s: Error, poll already scheduled\n", |
918 | dev->name); | 918 | dev->name); |
diff --git a/drivers/net/bfin_mac.c b/drivers/net/bfin_mac.c index 78e31aa861e0..9afe8092dfc4 100644 --- a/drivers/net/bfin_mac.c +++ b/drivers/net/bfin_mac.c | |||
@@ -415,11 +415,11 @@ static int mii_probe(struct net_device *dev) | |||
415 | } | 415 | } |
416 | 416 | ||
417 | #if defined(CONFIG_BFIN_MAC_RMII) | 417 | #if defined(CONFIG_BFIN_MAC_RMII) |
418 | phydev = phy_connect(dev, phydev->dev.bus_id, &bfin_mac_adjust_link, 0, | 418 | phydev = phy_connect(dev, dev_name(&phydev->dev), &bfin_mac_adjust_link, |
419 | PHY_INTERFACE_MODE_RMII); | 419 | 0, PHY_INTERFACE_MODE_RMII); |
420 | #else | 420 | #else |
421 | phydev = phy_connect(dev, phydev->dev.bus_id, &bfin_mac_adjust_link, 0, | 421 | phydev = phy_connect(dev, dev_name(&phydev->dev), &bfin_mac_adjust_link, |
422 | PHY_INTERFACE_MODE_MII); | 422 | 0, PHY_INTERFACE_MODE_MII); |
423 | #endif | 423 | #endif |
424 | 424 | ||
425 | if (IS_ERR(phydev)) { | 425 | if (IS_ERR(phydev)) { |
@@ -447,7 +447,7 @@ static int mii_probe(struct net_device *dev) | |||
447 | printk(KERN_INFO "%s: attached PHY driver [%s] " | 447 | printk(KERN_INFO "%s: attached PHY driver [%s] " |
448 | "(mii_bus:phy_addr=%s, irq=%d, mdc_clk=%dHz(mdc_div=%d)" | 448 | "(mii_bus:phy_addr=%s, irq=%d, mdc_clk=%dHz(mdc_div=%d)" |
449 | "@sclk=%dMHz)\n", | 449 | "@sclk=%dMHz)\n", |
450 | DRV_NAME, phydev->drv->name, phydev->dev.bus_id, phydev->irq, | 450 | DRV_NAME, phydev->drv->name, dev_name(&phydev->dev), phydev->irq, |
451 | MDC_CLK, mdc_div, sclk/1000000); | 451 | MDC_CLK, mdc_div, sclk/1000000); |
452 | 452 | ||
453 | return 0; | 453 | return 0; |
@@ -488,7 +488,7 @@ static void bfin_mac_ethtool_getdrvinfo(struct net_device *dev, | |||
488 | strcpy(info->driver, DRV_NAME); | 488 | strcpy(info->driver, DRV_NAME); |
489 | strcpy(info->version, DRV_VERSION); | 489 | strcpy(info->version, DRV_VERSION); |
490 | strcpy(info->fw_version, "N/A"); | 490 | strcpy(info->fw_version, "N/A"); |
491 | strcpy(info->bus_info, dev->dev.bus_id); | 491 | strcpy(info->bus_info, dev_name(&dev->dev)); |
492 | } | 492 | } |
493 | 493 | ||
494 | static struct ethtool_ops bfin_mac_ethtool_ops = { | 494 | static struct ethtool_ops bfin_mac_ethtool_ops = { |
diff --git a/drivers/net/bmac.c b/drivers/net/bmac.c index 8a546a33d581..1ab58375d061 100644 --- a/drivers/net/bmac.c +++ b/drivers/net/bmac.c | |||
@@ -1240,7 +1240,7 @@ static void bmac_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *inf | |||
1240 | { | 1240 | { |
1241 | struct bmac_data *bp = netdev_priv(dev); | 1241 | struct bmac_data *bp = netdev_priv(dev); |
1242 | strcpy(info->driver, "bmac"); | 1242 | strcpy(info->driver, "bmac"); |
1243 | strcpy(info->bus_info, bp->mdev->ofdev.dev.bus_id); | 1243 | strcpy(info->bus_info, dev_name(&bp->mdev->ofdev.dev)); |
1244 | } | 1244 | } |
1245 | 1245 | ||
1246 | static const struct ethtool_ops bmac_ethtool_ops = { | 1246 | static const struct ethtool_ops bmac_ethtool_ops = { |
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c index d4a3dac21dcf..49e0e51a9dfc 100644 --- a/drivers/net/bnx2.c +++ b/drivers/net/bnx2.c | |||
@@ -1497,6 +1497,8 @@ static int bnx2_fw_sync(struct bnx2 *, u32, int, int); | |||
1497 | 1497 | ||
1498 | static int | 1498 | static int |
1499 | bnx2_setup_remote_phy(struct bnx2 *bp, u8 port) | 1499 | bnx2_setup_remote_phy(struct bnx2 *bp, u8 port) |
1500 | __releases(&bp->phy_lock) | ||
1501 | __acquires(&bp->phy_lock) | ||
1500 | { | 1502 | { |
1501 | u32 speed_arg = 0, pause_adv; | 1503 | u32 speed_arg = 0, pause_adv; |
1502 | 1504 | ||
@@ -1554,6 +1556,8 @@ bnx2_setup_remote_phy(struct bnx2 *bp, u8 port) | |||
1554 | 1556 | ||
1555 | static int | 1557 | static int |
1556 | bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port) | 1558 | bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port) |
1559 | __releases(&bp->phy_lock) | ||
1560 | __acquires(&bp->phy_lock) | ||
1557 | { | 1561 | { |
1558 | u32 adv, bmcr; | 1562 | u32 adv, bmcr; |
1559 | u32 new_adv = 0; | 1563 | u32 new_adv = 0; |
@@ -1866,6 +1870,8 @@ bnx2_set_remote_link(struct bnx2 *bp) | |||
1866 | 1870 | ||
1867 | static int | 1871 | static int |
1868 | bnx2_setup_copper_phy(struct bnx2 *bp) | 1872 | bnx2_setup_copper_phy(struct bnx2 *bp) |
1873 | __releases(&bp->phy_lock) | ||
1874 | __acquires(&bp->phy_lock) | ||
1869 | { | 1875 | { |
1870 | u32 bmcr; | 1876 | u32 bmcr; |
1871 | u32 new_bmcr; | 1877 | u32 new_bmcr; |
@@ -1963,6 +1969,8 @@ bnx2_setup_copper_phy(struct bnx2 *bp) | |||
1963 | 1969 | ||
1964 | static int | 1970 | static int |
1965 | bnx2_setup_phy(struct bnx2 *bp, u8 port) | 1971 | bnx2_setup_phy(struct bnx2 *bp, u8 port) |
1972 | __releases(&bp->phy_lock) | ||
1973 | __acquires(&bp->phy_lock) | ||
1966 | { | 1974 | { |
1967 | if (bp->loopback == MAC_LOOPBACK) | 1975 | if (bp->loopback == MAC_LOOPBACK) |
1968 | return 0; | 1976 | return 0; |
@@ -2176,6 +2184,8 @@ bnx2_init_copper_phy(struct bnx2 *bp, int reset_phy) | |||
2176 | 2184 | ||
2177 | static int | 2185 | static int |
2178 | bnx2_init_phy(struct bnx2 *bp, int reset_phy) | 2186 | bnx2_init_phy(struct bnx2 *bp, int reset_phy) |
2187 | __releases(&bp->phy_lock) | ||
2188 | __acquires(&bp->phy_lock) | ||
2179 | { | 2189 | { |
2180 | u32 val; | 2190 | u32 val; |
2181 | int rc = 0; | 2191 | int rc = 0; |
@@ -2997,6 +3007,8 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget) | |||
2997 | skb->ip_summed = CHECKSUM_UNNECESSARY; | 3007 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
2998 | } | 3008 | } |
2999 | 3009 | ||
3010 | skb_record_rx_queue(skb, bnapi - &bp->bnx2_napi[0]); | ||
3011 | |||
3000 | #ifdef BCM_VLAN | 3012 | #ifdef BCM_VLAN |
3001 | if (hw_vlan) | 3013 | if (hw_vlan) |
3002 | vlan_hwaccel_receive_skb(skb, bp->vlgrp, vtag); | 3014 | vlan_hwaccel_receive_skb(skb, bp->vlgrp, vtag); |
@@ -3053,7 +3065,7 @@ bnx2_msi(int irq, void *dev_instance) | |||
3053 | if (unlikely(atomic_read(&bp->intr_sem) != 0)) | 3065 | if (unlikely(atomic_read(&bp->intr_sem) != 0)) |
3054 | return IRQ_HANDLED; | 3066 | return IRQ_HANDLED; |
3055 | 3067 | ||
3056 | netif_rx_schedule(&bnapi->napi); | 3068 | napi_schedule(&bnapi->napi); |
3057 | 3069 | ||
3058 | return IRQ_HANDLED; | 3070 | return IRQ_HANDLED; |
3059 | } | 3071 | } |
@@ -3070,7 +3082,7 @@ bnx2_msi_1shot(int irq, void *dev_instance) | |||
3070 | if (unlikely(atomic_read(&bp->intr_sem) != 0)) | 3082 | if (unlikely(atomic_read(&bp->intr_sem) != 0)) |
3071 | return IRQ_HANDLED; | 3083 | return IRQ_HANDLED; |
3072 | 3084 | ||
3073 | netif_rx_schedule(&bnapi->napi); | 3085 | napi_schedule(&bnapi->napi); |
3074 | 3086 | ||
3075 | return IRQ_HANDLED; | 3087 | return IRQ_HANDLED; |
3076 | } | 3088 | } |
@@ -3106,9 +3118,9 @@ bnx2_interrupt(int irq, void *dev_instance) | |||
3106 | if (unlikely(atomic_read(&bp->intr_sem) != 0)) | 3118 | if (unlikely(atomic_read(&bp->intr_sem) != 0)) |
3107 | return IRQ_HANDLED; | 3119 | return IRQ_HANDLED; |
3108 | 3120 | ||
3109 | if (netif_rx_schedule_prep(&bnapi->napi)) { | 3121 | if (napi_schedule_prep(&bnapi->napi)) { |
3110 | bnapi->last_status_idx = sblk->status_idx; | 3122 | bnapi->last_status_idx = sblk->status_idx; |
3111 | __netif_rx_schedule(&bnapi->napi); | 3123 | __napi_schedule(&bnapi->napi); |
3112 | } | 3124 | } |
3113 | 3125 | ||
3114 | return IRQ_HANDLED; | 3126 | return IRQ_HANDLED; |
@@ -3218,7 +3230,7 @@ static int bnx2_poll_msix(struct napi_struct *napi, int budget) | |||
3218 | rmb(); | 3230 | rmb(); |
3219 | if (likely(!bnx2_has_fast_work(bnapi))) { | 3231 | if (likely(!bnx2_has_fast_work(bnapi))) { |
3220 | 3232 | ||
3221 | netif_rx_complete(napi); | 3233 | napi_complete(napi); |
3222 | REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num | | 3234 | REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num | |
3223 | BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | | 3235 | BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | |
3224 | bnapi->last_status_idx); | 3236 | bnapi->last_status_idx); |
@@ -3251,7 +3263,7 @@ static int bnx2_poll(struct napi_struct *napi, int budget) | |||
3251 | 3263 | ||
3252 | rmb(); | 3264 | rmb(); |
3253 | if (likely(!bnx2_has_work(bnapi))) { | 3265 | if (likely(!bnx2_has_work(bnapi))) { |
3254 | netif_rx_complete(napi); | 3266 | napi_complete(napi); |
3255 | if (likely(bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)) { | 3267 | if (likely(bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)) { |
3256 | REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, | 3268 | REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, |
3257 | BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | | 3269 | BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | |
diff --git a/drivers/net/bnx2x_main.c b/drivers/net/bnx2x_main.c index d3e7775a9ccf..88da14c141f4 100644 --- a/drivers/net/bnx2x_main.c +++ b/drivers/net/bnx2x_main.c | |||
@@ -1325,6 +1325,7 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp, | |||
1325 | 1325 | ||
1326 | skb->protocol = eth_type_trans(skb, bp->dev); | 1326 | skb->protocol = eth_type_trans(skb, bp->dev); |
1327 | skb->ip_summed = CHECKSUM_UNNECESSARY; | 1327 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
1328 | skb_record_rx_queue(skb, queue); | ||
1328 | 1329 | ||
1329 | { | 1330 | { |
1330 | struct iphdr *iph; | 1331 | struct iphdr *iph; |
@@ -1654,7 +1655,7 @@ static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie) | |||
1654 | prefetch(&fp->status_blk->c_status_block.status_block_index); | 1655 | prefetch(&fp->status_blk->c_status_block.status_block_index); |
1655 | prefetch(&fp->status_blk->u_status_block.status_block_index); | 1656 | prefetch(&fp->status_blk->u_status_block.status_block_index); |
1656 | 1657 | ||
1657 | netif_rx_schedule(&bnx2x_fp(bp, index, napi)); | 1658 | napi_schedule(&bnx2x_fp(bp, index, napi)); |
1658 | 1659 | ||
1659 | return IRQ_HANDLED; | 1660 | return IRQ_HANDLED; |
1660 | } | 1661 | } |
@@ -1693,7 +1694,7 @@ static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance) | |||
1693 | prefetch(&fp->status_blk->c_status_block.status_block_index); | 1694 | prefetch(&fp->status_blk->c_status_block.status_block_index); |
1694 | prefetch(&fp->status_blk->u_status_block.status_block_index); | 1695 | prefetch(&fp->status_blk->u_status_block.status_block_index); |
1695 | 1696 | ||
1696 | netif_rx_schedule(&bnx2x_fp(bp, 0, napi)); | 1697 | napi_schedule(&bnx2x_fp(bp, 0, napi)); |
1697 | 1698 | ||
1698 | status &= ~mask; | 1699 | status &= ~mask; |
1699 | } | 1700 | } |
@@ -9374,7 +9375,7 @@ static int bnx2x_poll(struct napi_struct *napi, int budget) | |||
9374 | #ifdef BNX2X_STOP_ON_ERROR | 9375 | #ifdef BNX2X_STOP_ON_ERROR |
9375 | poll_panic: | 9376 | poll_panic: |
9376 | #endif | 9377 | #endif |
9377 | netif_rx_complete(napi); | 9378 | napi_complete(napi); |
9378 | 9379 | ||
9379 | bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID, | 9380 | bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID, |
9380 | le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1); | 9381 | le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1); |
diff --git a/drivers/net/bonding/bond_3ad.h b/drivers/net/bonding/bond_3ad.h index 8a83eb283c21..a306230381c8 100644 --- a/drivers/net/bonding/bond_3ad.h +++ b/drivers/net/bonding/bond_3ad.h | |||
@@ -29,7 +29,7 @@ | |||
29 | 29 | ||
30 | // General definitions | 30 | // General definitions |
31 | #define BOND_ETH_P_LACPDU 0x8809 | 31 | #define BOND_ETH_P_LACPDU 0x8809 |
32 | #define PKT_TYPE_LACPDU __constant_htons(BOND_ETH_P_LACPDU) | 32 | #define PKT_TYPE_LACPDU cpu_to_be16(BOND_ETH_P_LACPDU) |
33 | #define AD_TIMER_INTERVAL 100 /*msec*/ | 33 | #define AD_TIMER_INTERVAL 100 /*msec*/ |
34 | 34 | ||
35 | #define MULTICAST_LACPDU_ADDR {0x01, 0x80, 0xC2, 0x00, 0x00, 0x02} | 35 | #define MULTICAST_LACPDU_ADDR {0x01, 0x80, 0xC2, 0x00, 0x00, 0x02} |
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c index 27fb7f5c21cf..409b14074275 100644 --- a/drivers/net/bonding/bond_alb.c +++ b/drivers/net/bonding/bond_alb.c | |||
@@ -822,7 +822,7 @@ static int rlb_initialize(struct bonding *bond) | |||
822 | _unlock_rx_hashtbl(bond); | 822 | _unlock_rx_hashtbl(bond); |
823 | 823 | ||
824 | /*initialize packet type*/ | 824 | /*initialize packet type*/ |
825 | pk_type->type = __constant_htons(ETH_P_ARP); | 825 | pk_type->type = cpu_to_be16(ETH_P_ARP); |
826 | pk_type->dev = NULL; | 826 | pk_type->dev = NULL; |
827 | pk_type->func = rlb_arp_recv; | 827 | pk_type->func = rlb_arp_recv; |
828 | 828 | ||
@@ -892,7 +892,7 @@ static void alb_send_learning_packets(struct slave *slave, u8 mac_addr[]) | |||
892 | memset(&pkt, 0, size); | 892 | memset(&pkt, 0, size); |
893 | memcpy(pkt.mac_dst, mac_addr, ETH_ALEN); | 893 | memcpy(pkt.mac_dst, mac_addr, ETH_ALEN); |
894 | memcpy(pkt.mac_src, mac_addr, ETH_ALEN); | 894 | memcpy(pkt.mac_src, mac_addr, ETH_ALEN); |
895 | pkt.type = __constant_htons(ETH_P_LOOP); | 895 | pkt.type = cpu_to_be16(ETH_P_LOOP); |
896 | 896 | ||
897 | for (i = 0; i < MAX_LP_BURST; i++) { | 897 | for (i = 0; i < MAX_LP_BURST; i++) { |
898 | struct sk_buff *skb; | 898 | struct sk_buff *skb; |
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 9fb388388fb7..21bce2c0fde2 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c | |||
@@ -3369,7 +3369,7 @@ static int bond_info_seq_show(struct seq_file *seq, void *v) | |||
3369 | return 0; | 3369 | return 0; |
3370 | } | 3370 | } |
3371 | 3371 | ||
3372 | static struct seq_operations bond_info_seq_ops = { | 3372 | static const struct seq_operations bond_info_seq_ops = { |
3373 | .start = bond_info_seq_start, | 3373 | .start = bond_info_seq_start, |
3374 | .next = bond_info_seq_next, | 3374 | .next = bond_info_seq_next, |
3375 | .stop = bond_info_seq_stop, | 3375 | .stop = bond_info_seq_stop, |
diff --git a/drivers/net/cassini.c b/drivers/net/cassini.c index bbbc3bb08aa5..0effefa1b882 100644 --- a/drivers/net/cassini.c +++ b/drivers/net/cassini.c | |||
@@ -2506,7 +2506,7 @@ static irqreturn_t cas_interruptN(int irq, void *dev_id) | |||
2506 | if (status & INTR_RX_DONE_ALT) { /* handle rx separately */ | 2506 | if (status & INTR_RX_DONE_ALT) { /* handle rx separately */ |
2507 | #ifdef USE_NAPI | 2507 | #ifdef USE_NAPI |
2508 | cas_mask_intr(cp); | 2508 | cas_mask_intr(cp); |
2509 | netif_rx_schedule(&cp->napi); | 2509 | napi_schedule(&cp->napi); |
2510 | #else | 2510 | #else |
2511 | cas_rx_ringN(cp, ring, 0); | 2511 | cas_rx_ringN(cp, ring, 0); |
2512 | #endif | 2512 | #endif |
@@ -2557,7 +2557,7 @@ static irqreturn_t cas_interrupt1(int irq, void *dev_id) | |||
2557 | if (status & INTR_RX_DONE_ALT) { /* handle rx separately */ | 2557 | if (status & INTR_RX_DONE_ALT) { /* handle rx separately */ |
2558 | #ifdef USE_NAPI | 2558 | #ifdef USE_NAPI |
2559 | cas_mask_intr(cp); | 2559 | cas_mask_intr(cp); |
2560 | netif_rx_schedule(&cp->napi); | 2560 | napi_schedule(&cp->napi); |
2561 | #else | 2561 | #else |
2562 | cas_rx_ringN(cp, 1, 0); | 2562 | cas_rx_ringN(cp, 1, 0); |
2563 | #endif | 2563 | #endif |
@@ -2613,7 +2613,7 @@ static irqreturn_t cas_interrupt(int irq, void *dev_id) | |||
2613 | if (status & INTR_RX_DONE) { | 2613 | if (status & INTR_RX_DONE) { |
2614 | #ifdef USE_NAPI | 2614 | #ifdef USE_NAPI |
2615 | cas_mask_intr(cp); | 2615 | cas_mask_intr(cp); |
2616 | netif_rx_schedule(&cp->napi); | 2616 | napi_schedule(&cp->napi); |
2617 | #else | 2617 | #else |
2618 | cas_rx_ringN(cp, 0, 0); | 2618 | cas_rx_ringN(cp, 0, 0); |
2619 | #endif | 2619 | #endif |
@@ -2691,7 +2691,7 @@ rx_comp: | |||
2691 | #endif | 2691 | #endif |
2692 | spin_unlock_irqrestore(&cp->lock, flags); | 2692 | spin_unlock_irqrestore(&cp->lock, flags); |
2693 | if (enable_intr) { | 2693 | if (enable_intr) { |
2694 | netif_rx_complete(napi); | 2694 | napi_complete(napi); |
2695 | cas_unmask_intr(cp); | 2695 | cas_unmask_intr(cp); |
2696 | } | 2696 | } |
2697 | return credits; | 2697 | return credits; |
diff --git a/drivers/net/chelsio/sge.c b/drivers/net/chelsio/sge.c index d984b7995763..840da83fb3cf 100644 --- a/drivers/net/chelsio/sge.c +++ b/drivers/net/chelsio/sge.c | |||
@@ -1612,7 +1612,7 @@ int t1_poll(struct napi_struct *napi, int budget) | |||
1612 | int work_done = process_responses(adapter, budget); | 1612 | int work_done = process_responses(adapter, budget); |
1613 | 1613 | ||
1614 | if (likely(work_done < budget)) { | 1614 | if (likely(work_done < budget)) { |
1615 | netif_rx_complete(napi); | 1615 | napi_complete(napi); |
1616 | writel(adapter->sge->respQ.cidx, | 1616 | writel(adapter->sge->respQ.cidx, |
1617 | adapter->regs + A_SG_SLEEPING); | 1617 | adapter->regs + A_SG_SLEEPING); |
1618 | } | 1618 | } |
@@ -1630,7 +1630,7 @@ irqreturn_t t1_interrupt(int irq, void *data) | |||
1630 | 1630 | ||
1631 | if (napi_schedule_prep(&adapter->napi)) { | 1631 | if (napi_schedule_prep(&adapter->napi)) { |
1632 | if (process_pure_responses(adapter)) | 1632 | if (process_pure_responses(adapter)) |
1633 | __netif_rx_schedule(&adapter->napi); | 1633 | __napi_schedule(&adapter->napi); |
1634 | else { | 1634 | else { |
1635 | /* no data, no NAPI needed */ | 1635 | /* no data, no NAPI needed */ |
1636 | writel(sge->respQ.cidx, adapter->regs + A_SG_SLEEPING); | 1636 | writel(sge->respQ.cidx, adapter->regs + A_SG_SLEEPING); |
diff --git a/drivers/net/cpmac.c b/drivers/net/cpmac.c index f66548751c38..3f476c7c0736 100644 --- a/drivers/net/cpmac.c +++ b/drivers/net/cpmac.c | |||
@@ -428,7 +428,7 @@ static int cpmac_poll(struct napi_struct *napi, int budget) | |||
428 | printk(KERN_WARNING "%s: rx: polling, but no queue\n", | 428 | printk(KERN_WARNING "%s: rx: polling, but no queue\n", |
429 | priv->dev->name); | 429 | priv->dev->name); |
430 | spin_unlock(&priv->rx_lock); | 430 | spin_unlock(&priv->rx_lock); |
431 | netif_rx_complete(napi); | 431 | napi_complete(napi); |
432 | return 0; | 432 | return 0; |
433 | } | 433 | } |
434 | 434 | ||
@@ -514,7 +514,7 @@ static int cpmac_poll(struct napi_struct *napi, int budget) | |||
514 | if (processed == 0) { | 514 | if (processed == 0) { |
515 | /* we ran out of packets to read, | 515 | /* we ran out of packets to read, |
516 | * revert to interrupt-driven mode */ | 516 | * revert to interrupt-driven mode */ |
517 | netif_rx_complete(napi); | 517 | napi_complete(napi); |
518 | cpmac_write(priv->regs, CPMAC_RX_INT_ENABLE, 1); | 518 | cpmac_write(priv->regs, CPMAC_RX_INT_ENABLE, 1); |
519 | return 0; | 519 | return 0; |
520 | } | 520 | } |
@@ -536,7 +536,7 @@ fatal_error: | |||
536 | } | 536 | } |
537 | 537 | ||
538 | spin_unlock(&priv->rx_lock); | 538 | spin_unlock(&priv->rx_lock); |
539 | netif_rx_complete(napi); | 539 | napi_complete(napi); |
540 | netif_tx_stop_all_queues(priv->dev); | 540 | netif_tx_stop_all_queues(priv->dev); |
541 | napi_disable(&priv->napi); | 541 | napi_disable(&priv->napi); |
542 | 542 | ||
@@ -802,9 +802,9 @@ static irqreturn_t cpmac_irq(int irq, void *dev_id) | |||
802 | 802 | ||
803 | if (status & MAC_INT_RX) { | 803 | if (status & MAC_INT_RX) { |
804 | queue = (status >> 8) & 7; | 804 | queue = (status >> 8) & 7; |
805 | if (netif_rx_schedule_prep(&priv->napi)) { | 805 | if (napi_schedule_prep(&priv->napi)) { |
806 | cpmac_write(priv->regs, CPMAC_RX_INT_CLEAR, 1 << queue); | 806 | cpmac_write(priv->regs, CPMAC_RX_INT_CLEAR, 1 << queue); |
807 | __netif_rx_schedule(&priv->napi); | 807 | __napi_schedule(&priv->napi); |
808 | } | 808 | } |
809 | } | 809 | } |
810 | 810 | ||
@@ -1161,7 +1161,7 @@ static int __devinit cpmac_probe(struct platform_device *pdev) | |||
1161 | priv->msg_enable = netif_msg_init(debug_level, 0xff); | 1161 | priv->msg_enable = netif_msg_init(debug_level, 0xff); |
1162 | memcpy(dev->dev_addr, pdata->dev_addr, sizeof(dev->dev_addr)); | 1162 | memcpy(dev->dev_addr, pdata->dev_addr, sizeof(dev->dev_addr)); |
1163 | 1163 | ||
1164 | priv->phy = phy_connect(dev, cpmac_mii->phy_map[phy_id]->dev.bus_id, | 1164 | priv->phy = phy_connect(dev, dev_name(&cpmac_mii->phy_map[phy_id]->dev), |
1165 | &cpmac_adjust_link, 0, PHY_INTERFACE_MODE_MII); | 1165 | &cpmac_adjust_link, 0, PHY_INTERFACE_MODE_MII); |
1166 | if (IS_ERR(priv->phy)) { | 1166 | if (IS_ERR(priv->phy)) { |
1167 | if (netif_msg_drv(priv)) | 1167 | if (netif_msg_drv(priv)) |
diff --git a/drivers/net/cxgb3/adapter.h b/drivers/net/cxgb3/adapter.h index a89d8cc51205..fbe15699584e 100644 --- a/drivers/net/cxgb3/adapter.h +++ b/drivers/net/cxgb3/adapter.h | |||
@@ -42,7 +42,6 @@ | |||
42 | #include <linux/cache.h> | 42 | #include <linux/cache.h> |
43 | #include <linux/mutex.h> | 43 | #include <linux/mutex.h> |
44 | #include <linux/bitops.h> | 44 | #include <linux/bitops.h> |
45 | #include <linux/inet_lro.h> | ||
46 | #include "t3cdev.h" | 45 | #include "t3cdev.h" |
47 | #include <asm/io.h> | 46 | #include <asm/io.h> |
48 | 47 | ||
@@ -178,15 +177,11 @@ enum { /* per port SGE statistics */ | |||
178 | SGE_PSTAT_TX_CSUM, /* # of TX checksum offloads */ | 177 | SGE_PSTAT_TX_CSUM, /* # of TX checksum offloads */ |
179 | SGE_PSTAT_VLANEX, /* # of VLAN tag extractions */ | 178 | SGE_PSTAT_VLANEX, /* # of VLAN tag extractions */ |
180 | SGE_PSTAT_VLANINS, /* # of VLAN tag insertions */ | 179 | SGE_PSTAT_VLANINS, /* # of VLAN tag insertions */ |
181 | SGE_PSTAT_LRO_AGGR, /* # of page chunks added to LRO sessions */ | ||
182 | SGE_PSTAT_LRO_FLUSHED, /* # of flushed LRO sessions */ | ||
183 | SGE_PSTAT_LRO_NO_DESC, /* # of overflown LRO sessions */ | ||
184 | 180 | ||
185 | SGE_PSTAT_MAX /* must be last */ | 181 | SGE_PSTAT_MAX /* must be last */ |
186 | }; | 182 | }; |
187 | 183 | ||
188 | #define T3_MAX_LRO_SES 8 | 184 | struct napi_gro_fraginfo; |
189 | #define T3_MAX_LRO_MAX_PKTS 64 | ||
190 | 185 | ||
191 | struct sge_qset { /* an SGE queue set */ | 186 | struct sge_qset { /* an SGE queue set */ |
192 | struct adapter *adap; | 187 | struct adapter *adap; |
@@ -194,12 +189,8 @@ struct sge_qset { /* an SGE queue set */ | |||
194 | struct sge_rspq rspq; | 189 | struct sge_rspq rspq; |
195 | struct sge_fl fl[SGE_RXQ_PER_SET]; | 190 | struct sge_fl fl[SGE_RXQ_PER_SET]; |
196 | struct sge_txq txq[SGE_TXQ_PER_SET]; | 191 | struct sge_txq txq[SGE_TXQ_PER_SET]; |
197 | struct net_lro_mgr lro_mgr; | 192 | struct napi_gro_fraginfo lro_frag_tbl; |
198 | struct net_lro_desc lro_desc[T3_MAX_LRO_SES]; | ||
199 | struct skb_frag_struct *lro_frag_tbl; | ||
200 | int lro_nfrags; | ||
201 | int lro_enabled; | 193 | int lro_enabled; |
202 | int lro_frag_len; | ||
203 | void *lro_va; | 194 | void *lro_va; |
204 | struct net_device *netdev; | 195 | struct net_device *netdev; |
205 | struct netdev_queue *tx_q; /* associated netdev TX queue */ | 196 | struct netdev_queue *tx_q; /* associated netdev TX queue */ |
@@ -230,6 +221,7 @@ struct adapter { | |||
230 | unsigned int slow_intr_mask; | 221 | unsigned int slow_intr_mask; |
231 | unsigned long irq_stats[IRQ_NUM_STATS]; | 222 | unsigned long irq_stats[IRQ_NUM_STATS]; |
232 | 223 | ||
224 | int msix_nvectors; | ||
233 | struct { | 225 | struct { |
234 | unsigned short vec; | 226 | unsigned short vec; |
235 | char desc[22]; | 227 | char desc[22]; |
diff --git a/drivers/net/cxgb3/cxgb3_main.c b/drivers/net/cxgb3/cxgb3_main.c index 0089746b8d02..f2c7cc3e263a 100644 --- a/drivers/net/cxgb3/cxgb3_main.c +++ b/drivers/net/cxgb3/cxgb3_main.c | |||
@@ -338,7 +338,7 @@ static void free_irq_resources(struct adapter *adapter) | |||
338 | 338 | ||
339 | free_irq(adapter->msix_info[0].vec, adapter); | 339 | free_irq(adapter->msix_info[0].vec, adapter); |
340 | for_each_port(adapter, i) | 340 | for_each_port(adapter, i) |
341 | n += adap2pinfo(adapter, i)->nqsets; | 341 | n += adap2pinfo(adapter, i)->nqsets; |
342 | 342 | ||
343 | for (i = 0; i < n; ++i) | 343 | for (i = 0; i < n; ++i) |
344 | free_irq(adapter->msix_info[i + 1].vec, | 344 | free_irq(adapter->msix_info[i + 1].vec, |
@@ -508,19 +508,9 @@ static void set_qset_lro(struct net_device *dev, int qset_idx, int val) | |||
508 | { | 508 | { |
509 | struct port_info *pi = netdev_priv(dev); | 509 | struct port_info *pi = netdev_priv(dev); |
510 | struct adapter *adapter = pi->adapter; | 510 | struct adapter *adapter = pi->adapter; |
511 | int i, lro_on = 1; | ||
512 | 511 | ||
513 | adapter->params.sge.qset[qset_idx].lro = !!val; | 512 | adapter->params.sge.qset[qset_idx].lro = !!val; |
514 | adapter->sge.qs[qset_idx].lro_enabled = !!val; | 513 | adapter->sge.qs[qset_idx].lro_enabled = !!val; |
515 | |||
516 | /* let ethtool report LRO on only if all queues are LRO enabled */ | ||
517 | for (i = pi->first_qset; i < pi->first_qset + pi->nqsets; ++i) | ||
518 | lro_on &= adapter->params.sge.qset[i].lro; | ||
519 | |||
520 | if (lro_on) | ||
521 | dev->features |= NETIF_F_LRO; | ||
522 | else | ||
523 | dev->features &= ~NETIF_F_LRO; | ||
524 | } | 514 | } |
525 | 515 | ||
526 | /** | 516 | /** |
@@ -1433,9 +1423,9 @@ static void get_stats(struct net_device *dev, struct ethtool_stats *stats, | |||
1433 | *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANINS); | 1423 | *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANINS); |
1434 | *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TX_CSUM); | 1424 | *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TX_CSUM); |
1435 | *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_RX_CSUM_GOOD); | 1425 | *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_RX_CSUM_GOOD); |
1436 | *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_LRO_AGGR); | 1426 | *data++ = 0; |
1437 | *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_LRO_FLUSHED); | 1427 | *data++ = 0; |
1438 | *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_LRO_NO_DESC); | 1428 | *data++ = 0; |
1439 | *data++ = s->rx_cong_drops; | 1429 | *data++ = s->rx_cong_drops; |
1440 | 1430 | ||
1441 | *data++ = s->num_toggled; | 1431 | *data++ = s->num_toggled; |
@@ -1826,28 +1816,6 @@ static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) | |||
1826 | memset(&wol->sopass, 0, sizeof(wol->sopass)); | 1816 | memset(&wol->sopass, 0, sizeof(wol->sopass)); |
1827 | } | 1817 | } |
1828 | 1818 | ||
1829 | static int cxgb3_set_flags(struct net_device *dev, u32 data) | ||
1830 | { | ||
1831 | struct port_info *pi = netdev_priv(dev); | ||
1832 | int i; | ||
1833 | |||
1834 | if (data & ETH_FLAG_LRO) { | ||
1835 | if (!(pi->rx_offload & T3_RX_CSUM)) | ||
1836 | return -EINVAL; | ||
1837 | |||
1838 | pi->rx_offload |= T3_LRO; | ||
1839 | for (i = pi->first_qset; i < pi->first_qset + pi->nqsets; i++) | ||
1840 | set_qset_lro(dev, i, 1); | ||
1841 | |||
1842 | } else { | ||
1843 | pi->rx_offload &= ~T3_LRO; | ||
1844 | for (i = pi->first_qset; i < pi->first_qset + pi->nqsets; i++) | ||
1845 | set_qset_lro(dev, i, 0); | ||
1846 | } | ||
1847 | |||
1848 | return 0; | ||
1849 | } | ||
1850 | |||
1851 | static const struct ethtool_ops cxgb_ethtool_ops = { | 1819 | static const struct ethtool_ops cxgb_ethtool_ops = { |
1852 | .get_settings = get_settings, | 1820 | .get_settings = get_settings, |
1853 | .set_settings = set_settings, | 1821 | .set_settings = set_settings, |
@@ -1877,8 +1845,6 @@ static const struct ethtool_ops cxgb_ethtool_ops = { | |||
1877 | .get_regs = get_regs, | 1845 | .get_regs = get_regs, |
1878 | .get_wol = get_wol, | 1846 | .get_wol = get_wol, |
1879 | .set_tso = ethtool_op_set_tso, | 1847 | .set_tso = ethtool_op_set_tso, |
1880 | .get_flags = ethtool_op_get_flags, | ||
1881 | .set_flags = cxgb3_set_flags, | ||
1882 | }; | 1848 | }; |
1883 | 1849 | ||
1884 | static int in_range(int val, int lo, int hi) | 1850 | static int in_range(int val, int lo, int hi) |
@@ -2576,6 +2542,12 @@ static int t3_adapter_error(struct adapter *adapter, int reset) | |||
2576 | { | 2542 | { |
2577 | int i, ret = 0; | 2543 | int i, ret = 0; |
2578 | 2544 | ||
2545 | if (is_offload(adapter) && | ||
2546 | test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) { | ||
2547 | cxgb3_err_notify(&adapter->tdev, OFFLOAD_STATUS_DOWN, 0); | ||
2548 | offload_close(&adapter->tdev); | ||
2549 | } | ||
2550 | |||
2579 | /* Stop all ports */ | 2551 | /* Stop all ports */ |
2580 | for_each_port(adapter, i) { | 2552 | for_each_port(adapter, i) { |
2581 | struct net_device *netdev = adapter->port[i]; | 2553 | struct net_device *netdev = adapter->port[i]; |
@@ -2584,10 +2556,6 @@ static int t3_adapter_error(struct adapter *adapter, int reset) | |||
2584 | cxgb_close(netdev); | 2556 | cxgb_close(netdev); |
2585 | } | 2557 | } |
2586 | 2558 | ||
2587 | if (is_offload(adapter) && | ||
2588 | test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) | ||
2589 | offload_close(&adapter->tdev); | ||
2590 | |||
2591 | /* Stop SGE timers */ | 2559 | /* Stop SGE timers */ |
2592 | t3_stop_sge_timers(adapter); | 2560 | t3_stop_sge_timers(adapter); |
2593 | 2561 | ||
@@ -2639,6 +2607,9 @@ static void t3_resume_ports(struct adapter *adapter) | |||
2639 | } | 2607 | } |
2640 | } | 2608 | } |
2641 | } | 2609 | } |
2610 | |||
2611 | if (is_offload(adapter) && !ofld_disable) | ||
2612 | cxgb3_err_notify(&adapter->tdev, OFFLOAD_STATUS_UP, 0); | ||
2642 | } | 2613 | } |
2643 | 2614 | ||
2644 | /* | 2615 | /* |
@@ -2752,7 +2723,7 @@ static void set_nqsets(struct adapter *adap) | |||
2752 | int i, j = 0; | 2723 | int i, j = 0; |
2753 | int num_cpus = num_online_cpus(); | 2724 | int num_cpus = num_online_cpus(); |
2754 | int hwports = adap->params.nports; | 2725 | int hwports = adap->params.nports; |
2755 | int nqsets = SGE_QSETS; | 2726 | int nqsets = adap->msix_nvectors - 1; |
2756 | 2727 | ||
2757 | if (adap->params.rev > 0 && adap->flags & USING_MSIX) { | 2728 | if (adap->params.rev > 0 && adap->flags & USING_MSIX) { |
2758 | if (hwports == 2 && | 2729 | if (hwports == 2 && |
@@ -2781,18 +2752,25 @@ static void set_nqsets(struct adapter *adap) | |||
2781 | static int __devinit cxgb_enable_msix(struct adapter *adap) | 2752 | static int __devinit cxgb_enable_msix(struct adapter *adap) |
2782 | { | 2753 | { |
2783 | struct msix_entry entries[SGE_QSETS + 1]; | 2754 | struct msix_entry entries[SGE_QSETS + 1]; |
2755 | int vectors; | ||
2784 | int i, err; | 2756 | int i, err; |
2785 | 2757 | ||
2786 | for (i = 0; i < ARRAY_SIZE(entries); ++i) | 2758 | vectors = ARRAY_SIZE(entries); |
2759 | for (i = 0; i < vectors; ++i) | ||
2787 | entries[i].entry = i; | 2760 | entries[i].entry = i; |
2788 | 2761 | ||
2789 | err = pci_enable_msix(adap->pdev, entries, ARRAY_SIZE(entries)); | 2762 | while ((err = pci_enable_msix(adap->pdev, entries, vectors)) > 0) |
2763 | vectors = err; | ||
2764 | |||
2765 | if (!err && vectors < (adap->params.nports + 1)) | ||
2766 | err = -1; | ||
2767 | |||
2790 | if (!err) { | 2768 | if (!err) { |
2791 | for (i = 0; i < ARRAY_SIZE(entries); ++i) | 2769 | for (i = 0; i < vectors; ++i) |
2792 | adap->msix_info[i].vec = entries[i].vector; | 2770 | adap->msix_info[i].vec = entries[i].vector; |
2793 | } else if (err > 0) | 2771 | adap->msix_nvectors = vectors; |
2794 | dev_info(&adap->pdev->dev, | 2772 | } |
2795 | "only %d MSI-X vectors left, not using MSI-X\n", err); | 2773 | |
2796 | return err; | 2774 | return err; |
2797 | } | 2775 | } |
2798 | 2776 | ||
@@ -2960,7 +2938,7 @@ static int __devinit init_one(struct pci_dev *pdev, | |||
2960 | netdev->mem_end = mmio_start + mmio_len - 1; | 2938 | netdev->mem_end = mmio_start + mmio_len - 1; |
2961 | netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO; | 2939 | netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO; |
2962 | netdev->features |= NETIF_F_LLTX; | 2940 | netdev->features |= NETIF_F_LLTX; |
2963 | netdev->features |= NETIF_F_LRO; | 2941 | netdev->features |= NETIF_F_GRO; |
2964 | if (pci_using_dac) | 2942 | if (pci_using_dac) |
2965 | netdev->features |= NETIF_F_HIGHDMA; | 2943 | netdev->features |= NETIF_F_HIGHDMA; |
2966 | 2944 | ||
diff --git a/drivers/net/cxgb3/cxgb3_offload.c b/drivers/net/cxgb3/cxgb3_offload.c index 2d7f69aff1d9..620d80be6aac 100644 --- a/drivers/net/cxgb3/cxgb3_offload.c +++ b/drivers/net/cxgb3/cxgb3_offload.c | |||
@@ -153,6 +153,18 @@ void cxgb3_remove_clients(struct t3cdev *tdev) | |||
153 | mutex_unlock(&cxgb3_db_lock); | 153 | mutex_unlock(&cxgb3_db_lock); |
154 | } | 154 | } |
155 | 155 | ||
156 | void cxgb3_err_notify(struct t3cdev *tdev, u32 status, u32 error) | ||
157 | { | ||
158 | struct cxgb3_client *client; | ||
159 | |||
160 | mutex_lock(&cxgb3_db_lock); | ||
161 | list_for_each_entry(client, &client_list, client_list) { | ||
162 | if (client->err_handler) | ||
163 | client->err_handler(tdev, status, error); | ||
164 | } | ||
165 | mutex_unlock(&cxgb3_db_lock); | ||
166 | } | ||
167 | |||
156 | static struct net_device *get_iff_from_mac(struct adapter *adapter, | 168 | static struct net_device *get_iff_from_mac(struct adapter *adapter, |
157 | const unsigned char *mac, | 169 | const unsigned char *mac, |
158 | unsigned int vlan) | 170 | unsigned int vlan) |
diff --git a/drivers/net/cxgb3/cxgb3_offload.h b/drivers/net/cxgb3/cxgb3_offload.h index d514e5019dfc..a8e8e5fcdf84 100644 --- a/drivers/net/cxgb3/cxgb3_offload.h +++ b/drivers/net/cxgb3/cxgb3_offload.h | |||
@@ -64,10 +64,16 @@ void cxgb3_register_client(struct cxgb3_client *client); | |||
64 | void cxgb3_unregister_client(struct cxgb3_client *client); | 64 | void cxgb3_unregister_client(struct cxgb3_client *client); |
65 | void cxgb3_add_clients(struct t3cdev *tdev); | 65 | void cxgb3_add_clients(struct t3cdev *tdev); |
66 | void cxgb3_remove_clients(struct t3cdev *tdev); | 66 | void cxgb3_remove_clients(struct t3cdev *tdev); |
67 | void cxgb3_err_notify(struct t3cdev *tdev, u32 status, u32 error); | ||
67 | 68 | ||
68 | typedef int (*cxgb3_cpl_handler_func)(struct t3cdev *dev, | 69 | typedef int (*cxgb3_cpl_handler_func)(struct t3cdev *dev, |
69 | struct sk_buff *skb, void *ctx); | 70 | struct sk_buff *skb, void *ctx); |
70 | 71 | ||
72 | enum { | ||
73 | OFFLOAD_STATUS_UP, | ||
74 | OFFLOAD_STATUS_DOWN | ||
75 | }; | ||
76 | |||
71 | struct cxgb3_client { | 77 | struct cxgb3_client { |
72 | char *name; | 78 | char *name; |
73 | void (*add) (struct t3cdev *); | 79 | void (*add) (struct t3cdev *); |
@@ -76,6 +82,7 @@ struct cxgb3_client { | |||
76 | int (*redirect)(void *ctx, struct dst_entry *old, | 82 | int (*redirect)(void *ctx, struct dst_entry *old, |
77 | struct dst_entry *new, struct l2t_entry *l2t); | 83 | struct dst_entry *new, struct l2t_entry *l2t); |
78 | struct list_head client_list; | 84 | struct list_head client_list; |
85 | void (*err_handler)(struct t3cdev *tdev, u32 status, u32 error); | ||
79 | }; | 86 | }; |
80 | 87 | ||
81 | /* | 88 | /* |
diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c index 379a1324db4e..272a0168f3e9 100644 --- a/drivers/net/cxgb3/sge.c +++ b/drivers/net/cxgb3/sge.c | |||
@@ -585,8 +585,7 @@ static void t3_reset_qset(struct sge_qset *q) | |||
585 | memset(q->txq, 0, sizeof(struct sge_txq) * SGE_TXQ_PER_SET); | 585 | memset(q->txq, 0, sizeof(struct sge_txq) * SGE_TXQ_PER_SET); |
586 | q->txq_stopped = 0; | 586 | q->txq_stopped = 0; |
587 | q->tx_reclaim_timer.function = NULL; /* for t3_stop_sge_timers() */ | 587 | q->tx_reclaim_timer.function = NULL; /* for t3_stop_sge_timers() */ |
588 | kfree(q->lro_frag_tbl); | 588 | q->lro_frag_tbl.nr_frags = q->lro_frag_tbl.len = 0; |
589 | q->lro_nfrags = q->lro_frag_len = 0; | ||
590 | } | 589 | } |
591 | 590 | ||
592 | 591 | ||
@@ -1938,6 +1937,7 @@ static void rx_eth(struct adapter *adap, struct sge_rspq *rq, | |||
1938 | skb->ip_summed = CHECKSUM_UNNECESSARY; | 1937 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
1939 | } else | 1938 | } else |
1940 | skb->ip_summed = CHECKSUM_NONE; | 1939 | skb->ip_summed = CHECKSUM_NONE; |
1940 | skb_record_rx_queue(skb, qs - &adap->sge.qs[0]); | ||
1941 | 1941 | ||
1942 | if (unlikely(p->vlan_valid)) { | 1942 | if (unlikely(p->vlan_valid)) { |
1943 | struct vlan_group *grp = pi->vlan_grp; | 1943 | struct vlan_group *grp = pi->vlan_grp; |
@@ -1945,10 +1945,8 @@ static void rx_eth(struct adapter *adap, struct sge_rspq *rq, | |||
1945 | qs->port_stats[SGE_PSTAT_VLANEX]++; | 1945 | qs->port_stats[SGE_PSTAT_VLANEX]++; |
1946 | if (likely(grp)) | 1946 | if (likely(grp)) |
1947 | if (lro) | 1947 | if (lro) |
1948 | lro_vlan_hwaccel_receive_skb(&qs->lro_mgr, skb, | 1948 | vlan_gro_receive(&qs->napi, grp, |
1949 | grp, | 1949 | ntohs(p->vlan), skb); |
1950 | ntohs(p->vlan), | ||
1951 | p); | ||
1952 | else { | 1950 | else { |
1953 | if (unlikely(pi->iscsi_ipv4addr && | 1951 | if (unlikely(pi->iscsi_ipv4addr && |
1954 | is_arp(skb))) { | 1952 | is_arp(skb))) { |
@@ -1965,7 +1963,7 @@ static void rx_eth(struct adapter *adap, struct sge_rspq *rq, | |||
1965 | dev_kfree_skb_any(skb); | 1963 | dev_kfree_skb_any(skb); |
1966 | } else if (rq->polling) { | 1964 | } else if (rq->polling) { |
1967 | if (lro) | 1965 | if (lro) |
1968 | lro_receive_skb(&qs->lro_mgr, skb, p); | 1966 | napi_gro_receive(&qs->napi, skb); |
1969 | else { | 1967 | else { |
1970 | if (unlikely(pi->iscsi_ipv4addr && is_arp(skb))) | 1968 | if (unlikely(pi->iscsi_ipv4addr && is_arp(skb))) |
1971 | cxgb3_arp_process(adap, skb); | 1969 | cxgb3_arp_process(adap, skb); |
@@ -1981,59 +1979,6 @@ static inline int is_eth_tcp(u32 rss) | |||
1981 | } | 1979 | } |
1982 | 1980 | ||
1983 | /** | 1981 | /** |
1984 | * lro_frame_ok - check if an ingress packet is eligible for LRO | ||
1985 | * @p: the CPL header of the packet | ||
1986 | * | ||
1987 | * Returns true if a received packet is eligible for LRO. | ||
1988 | * The following conditions must be true: | ||
1989 | * - packet is TCP/IP Ethernet II (checked elsewhere) | ||
1990 | * - not an IP fragment | ||
1991 | * - no IP options | ||
1992 | * - TCP/IP checksums are correct | ||
1993 | * - the packet is for this host | ||
1994 | */ | ||
1995 | static inline int lro_frame_ok(const struct cpl_rx_pkt *p) | ||
1996 | { | ||
1997 | const struct ethhdr *eh = (struct ethhdr *)(p + 1); | ||
1998 | const struct iphdr *ih = (struct iphdr *)(eh + 1); | ||
1999 | |||
2000 | return (*((u8 *)p + 1) & 0x90) == 0x10 && p->csum == htons(0xffff) && | ||
2001 | eh->h_proto == htons(ETH_P_IP) && ih->ihl == (sizeof(*ih) >> 2); | ||
2002 | } | ||
2003 | |||
2004 | static int t3_get_lro_header(void **eh, void **iph, void **tcph, | ||
2005 | u64 *hdr_flags, void *priv) | ||
2006 | { | ||
2007 | const struct cpl_rx_pkt *cpl = priv; | ||
2008 | |||
2009 | if (!lro_frame_ok(cpl)) | ||
2010 | return -1; | ||
2011 | |||
2012 | *eh = (struct ethhdr *)(cpl + 1); | ||
2013 | *iph = (struct iphdr *)((struct ethhdr *)*eh + 1); | ||
2014 | *tcph = (struct tcphdr *)((struct iphdr *)*iph + 1); | ||
2015 | |||
2016 | *hdr_flags = LRO_IPV4 | LRO_TCP; | ||
2017 | return 0; | ||
2018 | } | ||
2019 | |||
2020 | static int t3_get_skb_header(struct sk_buff *skb, | ||
2021 | void **iph, void **tcph, u64 *hdr_flags, | ||
2022 | void *priv) | ||
2023 | { | ||
2024 | void *eh; | ||
2025 | |||
2026 | return t3_get_lro_header(&eh, iph, tcph, hdr_flags, priv); | ||
2027 | } | ||
2028 | |||
2029 | static int t3_get_frag_header(struct skb_frag_struct *frag, void **eh, | ||
2030 | void **iph, void **tcph, u64 *hdr_flags, | ||
2031 | void *priv) | ||
2032 | { | ||
2033 | return t3_get_lro_header(eh, iph, tcph, hdr_flags, priv); | ||
2034 | } | ||
2035 | |||
2036 | /** | ||
2037 | * lro_add_page - add a page chunk to an LRO session | 1982 | * lro_add_page - add a page chunk to an LRO session |
2038 | * @adap: the adapter | 1983 | * @adap: the adapter |
2039 | * @qs: the associated queue set | 1984 | * @qs: the associated queue set |
@@ -2049,8 +1994,9 @@ static void lro_add_page(struct adapter *adap, struct sge_qset *qs, | |||
2049 | { | 1994 | { |
2050 | struct rx_sw_desc *sd = &fl->sdesc[fl->cidx]; | 1995 | struct rx_sw_desc *sd = &fl->sdesc[fl->cidx]; |
2051 | struct cpl_rx_pkt *cpl; | 1996 | struct cpl_rx_pkt *cpl; |
2052 | struct skb_frag_struct *rx_frag = qs->lro_frag_tbl; | 1997 | struct skb_frag_struct *rx_frag = qs->lro_frag_tbl.frags; |
2053 | int nr_frags = qs->lro_nfrags, frag_len = qs->lro_frag_len; | 1998 | int nr_frags = qs->lro_frag_tbl.nr_frags; |
1999 | int frag_len = qs->lro_frag_tbl.len; | ||
2054 | int offset = 0; | 2000 | int offset = 0; |
2055 | 2001 | ||
2056 | if (!nr_frags) { | 2002 | if (!nr_frags) { |
@@ -2069,13 +2015,13 @@ static void lro_add_page(struct adapter *adap, struct sge_qset *qs, | |||
2069 | rx_frag->page_offset = sd->pg_chunk.offset + offset; | 2015 | rx_frag->page_offset = sd->pg_chunk.offset + offset; |
2070 | rx_frag->size = len; | 2016 | rx_frag->size = len; |
2071 | frag_len += len; | 2017 | frag_len += len; |
2072 | qs->lro_nfrags++; | 2018 | qs->lro_frag_tbl.nr_frags++; |
2073 | qs->lro_frag_len = frag_len; | 2019 | qs->lro_frag_tbl.len = frag_len; |
2074 | 2020 | ||
2075 | if (!complete) | 2021 | if (!complete) |
2076 | return; | 2022 | return; |
2077 | 2023 | ||
2078 | qs->lro_nfrags = qs->lro_frag_len = 0; | 2024 | qs->lro_frag_tbl.ip_summed = CHECKSUM_UNNECESSARY; |
2079 | cpl = qs->lro_va; | 2025 | cpl = qs->lro_va; |
2080 | 2026 | ||
2081 | if (unlikely(cpl->vlan_valid)) { | 2027 | if (unlikely(cpl->vlan_valid)) { |
@@ -2084,36 +2030,15 @@ static void lro_add_page(struct adapter *adap, struct sge_qset *qs, | |||
2084 | struct vlan_group *grp = pi->vlan_grp; | 2030 | struct vlan_group *grp = pi->vlan_grp; |
2085 | 2031 | ||
2086 | if (likely(grp != NULL)) { | 2032 | if (likely(grp != NULL)) { |
2087 | lro_vlan_hwaccel_receive_frags(&qs->lro_mgr, | 2033 | vlan_gro_frags(&qs->napi, grp, ntohs(cpl->vlan), |
2088 | qs->lro_frag_tbl, | 2034 | &qs->lro_frag_tbl); |
2089 | frag_len, frag_len, | 2035 | goto out; |
2090 | grp, ntohs(cpl->vlan), | ||
2091 | cpl, 0); | ||
2092 | return; | ||
2093 | } | 2036 | } |
2094 | } | 2037 | } |
2095 | lro_receive_frags(&qs->lro_mgr, qs->lro_frag_tbl, | 2038 | napi_gro_frags(&qs->napi, &qs->lro_frag_tbl); |
2096 | frag_len, frag_len, cpl, 0); | ||
2097 | } | ||
2098 | 2039 | ||
2099 | /** | 2040 | out: |
2100 | * init_lro_mgr - initialize a LRO manager object | 2041 | qs->lro_frag_tbl.nr_frags = qs->lro_frag_tbl.len = 0; |
2101 | * @lro_mgr: the LRO manager object | ||
2102 | */ | ||
2103 | static void init_lro_mgr(struct sge_qset *qs, struct net_lro_mgr *lro_mgr) | ||
2104 | { | ||
2105 | lro_mgr->dev = qs->netdev; | ||
2106 | lro_mgr->features = LRO_F_NAPI; | ||
2107 | lro_mgr->frag_align_pad = NET_IP_ALIGN; | ||
2108 | lro_mgr->ip_summed = CHECKSUM_UNNECESSARY; | ||
2109 | lro_mgr->ip_summed_aggr = CHECKSUM_UNNECESSARY; | ||
2110 | lro_mgr->max_desc = T3_MAX_LRO_SES; | ||
2111 | lro_mgr->lro_arr = qs->lro_desc; | ||
2112 | lro_mgr->get_frag_header = t3_get_frag_header; | ||
2113 | lro_mgr->get_skb_header = t3_get_skb_header; | ||
2114 | lro_mgr->max_aggr = T3_MAX_LRO_MAX_PKTS; | ||
2115 | if (lro_mgr->max_aggr > MAX_SKB_FRAGS) | ||
2116 | lro_mgr->max_aggr = MAX_SKB_FRAGS; | ||
2117 | } | 2042 | } |
2118 | 2043 | ||
2119 | /** | 2044 | /** |
@@ -2357,10 +2282,6 @@ next_fl: | |||
2357 | } | 2282 | } |
2358 | 2283 | ||
2359 | deliver_partial_bundle(&adap->tdev, q, offload_skbs, ngathered); | 2284 | deliver_partial_bundle(&adap->tdev, q, offload_skbs, ngathered); |
2360 | lro_flush_all(&qs->lro_mgr); | ||
2361 | qs->port_stats[SGE_PSTAT_LRO_AGGR] = qs->lro_mgr.stats.aggregated; | ||
2362 | qs->port_stats[SGE_PSTAT_LRO_FLUSHED] = qs->lro_mgr.stats.flushed; | ||
2363 | qs->port_stats[SGE_PSTAT_LRO_NO_DESC] = qs->lro_mgr.stats.no_desc; | ||
2364 | 2285 | ||
2365 | if (sleeping) | 2286 | if (sleeping) |
2366 | check_ring_db(adap, qs, sleeping); | 2287 | check_ring_db(adap, qs, sleeping); |
@@ -2907,7 +2828,6 @@ int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports, | |||
2907 | { | 2828 | { |
2908 | int i, avail, ret = -ENOMEM; | 2829 | int i, avail, ret = -ENOMEM; |
2909 | struct sge_qset *q = &adapter->sge.qs[id]; | 2830 | struct sge_qset *q = &adapter->sge.qs[id]; |
2910 | struct net_lro_mgr *lro_mgr = &q->lro_mgr; | ||
2911 | 2831 | ||
2912 | init_qset_cntxt(q, id); | 2832 | init_qset_cntxt(q, id); |
2913 | setup_timer(&q->tx_reclaim_timer, sge_timer_cb, (unsigned long)q); | 2833 | setup_timer(&q->tx_reclaim_timer, sge_timer_cb, (unsigned long)q); |
@@ -2987,10 +2907,6 @@ int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports, | |||
2987 | q->fl[0].order = FL0_PG_ORDER; | 2907 | q->fl[0].order = FL0_PG_ORDER; |
2988 | q->fl[1].order = FL1_PG_ORDER; | 2908 | q->fl[1].order = FL1_PG_ORDER; |
2989 | 2909 | ||
2990 | q->lro_frag_tbl = kcalloc(MAX_FRAME_SIZE / FL1_PG_CHUNK_SIZE + 1, | ||
2991 | sizeof(struct skb_frag_struct), | ||
2992 | GFP_KERNEL); | ||
2993 | q->lro_nfrags = q->lro_frag_len = 0; | ||
2994 | spin_lock_irq(&adapter->sge.reg_lock); | 2910 | spin_lock_irq(&adapter->sge.reg_lock); |
2995 | 2911 | ||
2996 | /* FL threshold comparison uses < */ | 2912 | /* FL threshold comparison uses < */ |
@@ -3042,8 +2958,6 @@ int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports, | |||
3042 | q->tx_q = netdevq; | 2958 | q->tx_q = netdevq; |
3043 | t3_update_qset_coalesce(q, p); | 2959 | t3_update_qset_coalesce(q, p); |
3044 | 2960 | ||
3045 | init_lro_mgr(q, lro_mgr); | ||
3046 | |||
3047 | avail = refill_fl(adapter, &q->fl[0], q->fl[0].size, | 2961 | avail = refill_fl(adapter, &q->fl[0], q->fl[0].size, |
3048 | GFP_KERNEL | __GFP_COMP); | 2962 | GFP_KERNEL | __GFP_COMP); |
3049 | if (!avail) { | 2963 | if (!avail) { |
diff --git a/drivers/net/declance.c b/drivers/net/declance.c index 7ce3053530f9..861c867fca87 100644 --- a/drivers/net/declance.c +++ b/drivers/net/declance.c | |||
@@ -1027,7 +1027,7 @@ static int __init dec_lance_probe(struct device *bdev, const int type) | |||
1027 | printk(version); | 1027 | printk(version); |
1028 | 1028 | ||
1029 | if (bdev) | 1029 | if (bdev) |
1030 | snprintf(name, sizeof(name), "%s", bdev->bus_id); | 1030 | snprintf(name, sizeof(name), "%s", dev_name(bdev)); |
1031 | else { | 1031 | else { |
1032 | i = 0; | 1032 | i = 0; |
1033 | dev = root_lance_dev; | 1033 | dev = root_lance_dev; |
@@ -1105,10 +1105,10 @@ static int __init dec_lance_probe(struct device *bdev, const int type) | |||
1105 | 1105 | ||
1106 | start = to_tc_dev(bdev)->resource.start; | 1106 | start = to_tc_dev(bdev)->resource.start; |
1107 | len = to_tc_dev(bdev)->resource.end - start + 1; | 1107 | len = to_tc_dev(bdev)->resource.end - start + 1; |
1108 | if (!request_mem_region(start, len, bdev->bus_id)) { | 1108 | if (!request_mem_region(start, len, dev_name(bdev))) { |
1109 | printk(KERN_ERR | 1109 | printk(KERN_ERR |
1110 | "%s: Unable to reserve MMIO resource\n", | 1110 | "%s: Unable to reserve MMIO resource\n", |
1111 | bdev->bus_id); | 1111 | dev_name(bdev)); |
1112 | ret = -EBUSY; | 1112 | ret = -EBUSY; |
1113 | goto err_out_dev; | 1113 | goto err_out_dev; |
1114 | } | 1114 | } |
diff --git a/drivers/net/depca.c b/drivers/net/depca.c index e4cef491dc73..55625dbbae5a 100644 --- a/drivers/net/depca.c +++ b/drivers/net/depca.c | |||
@@ -606,8 +606,8 @@ static int __init depca_hw_init (struct net_device *dev, struct device *device) | |||
606 | if (!mem_start || lp->adapter < DEPCA || lp->adapter >=unknown) | 606 | if (!mem_start || lp->adapter < DEPCA || lp->adapter >=unknown) |
607 | return -ENXIO; | 607 | return -ENXIO; |
608 | 608 | ||
609 | printk ("%s: %s at 0x%04lx", | 609 | printk("%s: %s at 0x%04lx", |
610 | device->bus_id, depca_signature[lp->adapter], ioaddr); | 610 | dev_name(device), depca_signature[lp->adapter], ioaddr); |
611 | 611 | ||
612 | switch (lp->depca_bus) { | 612 | switch (lp->depca_bus) { |
613 | #ifdef CONFIG_MCA | 613 | #ifdef CONFIG_MCA |
@@ -669,7 +669,7 @@ static int __init depca_hw_init (struct net_device *dev, struct device *device) | |||
669 | 669 | ||
670 | spin_lock_init(&lp->lock); | 670 | spin_lock_init(&lp->lock); |
671 | sprintf(lp->adapter_name, "%s (%s)", | 671 | sprintf(lp->adapter_name, "%s (%s)", |
672 | depca_signature[lp->adapter], device->bus_id); | 672 | depca_signature[lp->adapter], dev_name(device)); |
673 | status = -EBUSY; | 673 | status = -EBUSY; |
674 | 674 | ||
675 | /* Initialisation Block */ | 675 | /* Initialisation Block */ |
diff --git a/drivers/net/e100.c b/drivers/net/e100.c index 86bb876fb123..861d2eeaa43c 100644 --- a/drivers/net/e100.c +++ b/drivers/net/e100.c | |||
@@ -1944,9 +1944,9 @@ static irqreturn_t e100_intr(int irq, void *dev_id) | |||
1944 | if (stat_ack & stat_ack_rnr) | 1944 | if (stat_ack & stat_ack_rnr) |
1945 | nic->ru_running = RU_SUSPENDED; | 1945 | nic->ru_running = RU_SUSPENDED; |
1946 | 1946 | ||
1947 | if (likely(netif_rx_schedule_prep(&nic->napi))) { | 1947 | if (likely(napi_schedule_prep(&nic->napi))) { |
1948 | e100_disable_irq(nic); | 1948 | e100_disable_irq(nic); |
1949 | __netif_rx_schedule(&nic->napi); | 1949 | __napi_schedule(&nic->napi); |
1950 | } | 1950 | } |
1951 | 1951 | ||
1952 | return IRQ_HANDLED; | 1952 | return IRQ_HANDLED; |
@@ -1962,7 +1962,7 @@ static int e100_poll(struct napi_struct *napi, int budget) | |||
1962 | 1962 | ||
1963 | /* If budget not fully consumed, exit the polling mode */ | 1963 | /* If budget not fully consumed, exit the polling mode */ |
1964 | if (work_done < budget) { | 1964 | if (work_done < budget) { |
1965 | netif_rx_complete(napi); | 1965 | napi_complete(napi); |
1966 | e100_enable_irq(nic); | 1966 | e100_enable_irq(nic); |
1967 | } | 1967 | } |
1968 | 1968 | ||
diff --git a/drivers/net/e1000/e1000.h b/drivers/net/e1000/e1000.h index f5581de04757..e9a416f40162 100644 --- a/drivers/net/e1000/e1000.h +++ b/drivers/net/e1000/e1000.h | |||
@@ -182,7 +182,6 @@ struct e1000_tx_ring { | |||
182 | /* array of buffer information structs */ | 182 | /* array of buffer information structs */ |
183 | struct e1000_buffer *buffer_info; | 183 | struct e1000_buffer *buffer_info; |
184 | 184 | ||
185 | spinlock_t tx_lock; | ||
186 | u16 tdh; | 185 | u16 tdh; |
187 | u16 tdt; | 186 | u16 tdt; |
188 | bool last_tx_tso; | 187 | bool last_tx_tso; |
@@ -238,7 +237,6 @@ struct e1000_adapter { | |||
238 | u16 link_speed; | 237 | u16 link_speed; |
239 | u16 link_duplex; | 238 | u16 link_duplex; |
240 | spinlock_t stats_lock; | 239 | spinlock_t stats_lock; |
241 | spinlock_t tx_queue_lock; | ||
242 | unsigned int total_tx_bytes; | 240 | unsigned int total_tx_bytes; |
243 | unsigned int total_tx_packets; | 241 | unsigned int total_tx_packets; |
244 | unsigned int total_rx_bytes; | 242 | unsigned int total_rx_bytes; |
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c index c986978ce761..40db34deebde 100644 --- a/drivers/net/e1000/e1000_main.c +++ b/drivers/net/e1000/e1000_main.c | |||
@@ -1048,8 +1048,6 @@ static int __devinit e1000_probe(struct pci_dev *pdev, | |||
1048 | if (pci_using_dac) | 1048 | if (pci_using_dac) |
1049 | netdev->features |= NETIF_F_HIGHDMA; | 1049 | netdev->features |= NETIF_F_HIGHDMA; |
1050 | 1050 | ||
1051 | netdev->features |= NETIF_F_LLTX; | ||
1052 | |||
1053 | netdev->vlan_features |= NETIF_F_TSO; | 1051 | netdev->vlan_features |= NETIF_F_TSO; |
1054 | netdev->vlan_features |= NETIF_F_TSO6; | 1052 | netdev->vlan_features |= NETIF_F_TSO6; |
1055 | netdev->vlan_features |= NETIF_F_HW_CSUM; | 1053 | netdev->vlan_features |= NETIF_F_HW_CSUM; |
@@ -1368,8 +1366,6 @@ static int __devinit e1000_sw_init(struct e1000_adapter *adapter) | |||
1368 | return -ENOMEM; | 1366 | return -ENOMEM; |
1369 | } | 1367 | } |
1370 | 1368 | ||
1371 | spin_lock_init(&adapter->tx_queue_lock); | ||
1372 | |||
1373 | /* Explicitly disable IRQ since the NIC can be in any state. */ | 1369 | /* Explicitly disable IRQ since the NIC can be in any state. */ |
1374 | e1000_irq_disable(adapter); | 1370 | e1000_irq_disable(adapter); |
1375 | 1371 | ||
@@ -1624,7 +1620,6 @@ setup_tx_desc_die: | |||
1624 | 1620 | ||
1625 | txdr->next_to_use = 0; | 1621 | txdr->next_to_use = 0; |
1626 | txdr->next_to_clean = 0; | 1622 | txdr->next_to_clean = 0; |
1627 | spin_lock_init(&txdr->tx_lock); | ||
1628 | 1623 | ||
1629 | return 0; | 1624 | return 0; |
1630 | } | 1625 | } |
@@ -2865,11 +2860,11 @@ static bool e1000_tx_csum(struct e1000_adapter *adapter, | |||
2865 | return false; | 2860 | return false; |
2866 | 2861 | ||
2867 | switch (skb->protocol) { | 2862 | switch (skb->protocol) { |
2868 | case __constant_htons(ETH_P_IP): | 2863 | case cpu_to_be16(ETH_P_IP): |
2869 | if (ip_hdr(skb)->protocol == IPPROTO_TCP) | 2864 | if (ip_hdr(skb)->protocol == IPPROTO_TCP) |
2870 | cmd_len |= E1000_TXD_CMD_TCP; | 2865 | cmd_len |= E1000_TXD_CMD_TCP; |
2871 | break; | 2866 | break; |
2872 | case __constant_htons(ETH_P_IPV6): | 2867 | case cpu_to_be16(ETH_P_IPV6): |
2873 | /* XXX not handling all IPV6 headers */ | 2868 | /* XXX not handling all IPV6 headers */ |
2874 | if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) | 2869 | if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) |
2875 | cmd_len |= E1000_TXD_CMD_TCP; | 2870 | cmd_len |= E1000_TXD_CMD_TCP; |
@@ -3185,7 +3180,6 @@ static int e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev) | |||
3185 | unsigned int max_txd_pwr = E1000_MAX_TXD_PWR; | 3180 | unsigned int max_txd_pwr = E1000_MAX_TXD_PWR; |
3186 | unsigned int tx_flags = 0; | 3181 | unsigned int tx_flags = 0; |
3187 | unsigned int len = skb->len - skb->data_len; | 3182 | unsigned int len = skb->len - skb->data_len; |
3188 | unsigned long flags; | ||
3189 | unsigned int nr_frags; | 3183 | unsigned int nr_frags; |
3190 | unsigned int mss; | 3184 | unsigned int mss; |
3191 | int count = 0; | 3185 | int count = 0; |
@@ -3290,22 +3284,15 @@ static int e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev) | |||
3290 | (hw->mac_type == e1000_82573)) | 3284 | (hw->mac_type == e1000_82573)) |
3291 | e1000_transfer_dhcp_info(adapter, skb); | 3285 | e1000_transfer_dhcp_info(adapter, skb); |
3292 | 3286 | ||
3293 | if (!spin_trylock_irqsave(&tx_ring->tx_lock, flags)) | ||
3294 | /* Collision - tell upper layer to requeue */ | ||
3295 | return NETDEV_TX_LOCKED; | ||
3296 | |||
3297 | /* need: count + 2 desc gap to keep tail from touching | 3287 | /* need: count + 2 desc gap to keep tail from touching |
3298 | * head, otherwise try next time */ | 3288 | * head, otherwise try next time */ |
3299 | if (unlikely(e1000_maybe_stop_tx(netdev, tx_ring, count + 2))) { | 3289 | if (unlikely(e1000_maybe_stop_tx(netdev, tx_ring, count + 2))) |
3300 | spin_unlock_irqrestore(&tx_ring->tx_lock, flags); | ||
3301 | return NETDEV_TX_BUSY; | 3290 | return NETDEV_TX_BUSY; |
3302 | } | ||
3303 | 3291 | ||
3304 | if (unlikely(hw->mac_type == e1000_82547)) { | 3292 | if (unlikely(hw->mac_type == e1000_82547)) { |
3305 | if (unlikely(e1000_82547_fifo_workaround(adapter, skb))) { | 3293 | if (unlikely(e1000_82547_fifo_workaround(adapter, skb))) { |
3306 | netif_stop_queue(netdev); | 3294 | netif_stop_queue(netdev); |
3307 | mod_timer(&adapter->tx_fifo_stall_timer, jiffies + 1); | 3295 | mod_timer(&adapter->tx_fifo_stall_timer, jiffies + 1); |
3308 | spin_unlock_irqrestore(&tx_ring->tx_lock, flags); | ||
3309 | return NETDEV_TX_BUSY; | 3296 | return NETDEV_TX_BUSY; |
3310 | } | 3297 | } |
3311 | } | 3298 | } |
@@ -3320,7 +3307,6 @@ static int e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev) | |||
3320 | tso = e1000_tso(adapter, tx_ring, skb); | 3307 | tso = e1000_tso(adapter, tx_ring, skb); |
3321 | if (tso < 0) { | 3308 | if (tso < 0) { |
3322 | dev_kfree_skb_any(skb); | 3309 | dev_kfree_skb_any(skb); |
3323 | spin_unlock_irqrestore(&tx_ring->tx_lock, flags); | ||
3324 | return NETDEV_TX_OK; | 3310 | return NETDEV_TX_OK; |
3325 | } | 3311 | } |
3326 | 3312 | ||
@@ -3345,7 +3331,6 @@ static int e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev) | |||
3345 | /* Make sure there is space in the ring for the next send. */ | 3331 | /* Make sure there is space in the ring for the next send. */ |
3346 | e1000_maybe_stop_tx(netdev, tx_ring, MAX_SKB_FRAGS + 2); | 3332 | e1000_maybe_stop_tx(netdev, tx_ring, MAX_SKB_FRAGS + 2); |
3347 | 3333 | ||
3348 | spin_unlock_irqrestore(&tx_ring->tx_lock, flags); | ||
3349 | return NETDEV_TX_OK; | 3334 | return NETDEV_TX_OK; |
3350 | } | 3335 | } |
3351 | 3336 | ||
@@ -3687,12 +3672,12 @@ static irqreturn_t e1000_intr_msi(int irq, void *data) | |||
3687 | mod_timer(&adapter->watchdog_timer, jiffies + 1); | 3672 | mod_timer(&adapter->watchdog_timer, jiffies + 1); |
3688 | } | 3673 | } |
3689 | 3674 | ||
3690 | if (likely(netif_rx_schedule_prep(&adapter->napi))) { | 3675 | if (likely(napi_schedule_prep(&adapter->napi))) { |
3691 | adapter->total_tx_bytes = 0; | 3676 | adapter->total_tx_bytes = 0; |
3692 | adapter->total_tx_packets = 0; | 3677 | adapter->total_tx_packets = 0; |
3693 | adapter->total_rx_bytes = 0; | 3678 | adapter->total_rx_bytes = 0; |
3694 | adapter->total_rx_packets = 0; | 3679 | adapter->total_rx_packets = 0; |
3695 | __netif_rx_schedule(&adapter->napi); | 3680 | __napi_schedule(&adapter->napi); |
3696 | } else | 3681 | } else |
3697 | e1000_irq_enable(adapter); | 3682 | e1000_irq_enable(adapter); |
3698 | 3683 | ||
@@ -3747,12 +3732,12 @@ static irqreturn_t e1000_intr(int irq, void *data) | |||
3747 | ew32(IMC, ~0); | 3732 | ew32(IMC, ~0); |
3748 | E1000_WRITE_FLUSH(); | 3733 | E1000_WRITE_FLUSH(); |
3749 | } | 3734 | } |
3750 | if (likely(netif_rx_schedule_prep(&adapter->napi))) { | 3735 | if (likely(napi_schedule_prep(&adapter->napi))) { |
3751 | adapter->total_tx_bytes = 0; | 3736 | adapter->total_tx_bytes = 0; |
3752 | adapter->total_tx_packets = 0; | 3737 | adapter->total_tx_packets = 0; |
3753 | adapter->total_rx_bytes = 0; | 3738 | adapter->total_rx_bytes = 0; |
3754 | adapter->total_rx_packets = 0; | 3739 | adapter->total_rx_packets = 0; |
3755 | __netif_rx_schedule(&adapter->napi); | 3740 | __napi_schedule(&adapter->napi); |
3756 | } else | 3741 | } else |
3757 | /* this really should not happen! if it does it is basically a | 3742 | /* this really should not happen! if it does it is basically a |
3758 | * bug, but not a hard error, so enable ints and continue */ | 3743 | * bug, but not a hard error, so enable ints and continue */ |
@@ -3773,15 +3758,7 @@ static int e1000_clean(struct napi_struct *napi, int budget) | |||
3773 | 3758 | ||
3774 | adapter = netdev_priv(poll_dev); | 3759 | adapter = netdev_priv(poll_dev); |
3775 | 3760 | ||
3776 | /* e1000_clean is called per-cpu. This lock protects | 3761 | tx_cleaned = e1000_clean_tx_irq(adapter, &adapter->tx_ring[0]); |
3777 | * tx_ring[0] from being cleaned by multiple cpus | ||
3778 | * simultaneously. A failure obtaining the lock means | ||
3779 | * tx_ring[0] is currently being cleaned anyway. */ | ||
3780 | if (spin_trylock(&adapter->tx_queue_lock)) { | ||
3781 | tx_cleaned = e1000_clean_tx_irq(adapter, | ||
3782 | &adapter->tx_ring[0]); | ||
3783 | spin_unlock(&adapter->tx_queue_lock); | ||
3784 | } | ||
3785 | 3762 | ||
3786 | adapter->clean_rx(adapter, &adapter->rx_ring[0], | 3763 | adapter->clean_rx(adapter, &adapter->rx_ring[0], |
3787 | &work_done, budget); | 3764 | &work_done, budget); |
@@ -3793,7 +3770,7 @@ static int e1000_clean(struct napi_struct *napi, int budget) | |||
3793 | if (work_done < budget) { | 3770 | if (work_done < budget) { |
3794 | if (likely(adapter->itr_setting & 3)) | 3771 | if (likely(adapter->itr_setting & 3)) |
3795 | e1000_set_itr(adapter); | 3772 | e1000_set_itr(adapter); |
3796 | netif_rx_complete(napi); | 3773 | napi_complete(napi); |
3797 | e1000_irq_enable(adapter); | 3774 | e1000_irq_enable(adapter); |
3798 | } | 3775 | } |
3799 | 3776 | ||
diff --git a/drivers/net/e1000e/e1000.h b/drivers/net/e1000e/e1000.h index 37bcb190eef8..28bf9a51346f 100644 --- a/drivers/net/e1000e/e1000.h +++ b/drivers/net/e1000e/e1000.h | |||
@@ -195,8 +195,6 @@ struct e1000_adapter { | |||
195 | u16 link_duplex; | 195 | u16 link_duplex; |
196 | u16 eeprom_vers; | 196 | u16 eeprom_vers; |
197 | 197 | ||
198 | spinlock_t tx_queue_lock; /* prevent concurrent tail updates */ | ||
199 | |||
200 | /* track device up/down/testing state */ | 198 | /* track device up/down/testing state */ |
201 | unsigned long state; | 199 | unsigned long state; |
202 | 200 | ||
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c index 91817d0afcaf..c425b19e3362 100644 --- a/drivers/net/e1000e/netdev.c +++ b/drivers/net/e1000e/netdev.c | |||
@@ -47,7 +47,7 @@ | |||
47 | 47 | ||
48 | #include "e1000.h" | 48 | #include "e1000.h" |
49 | 49 | ||
50 | #define DRV_VERSION "0.3.3.3-k6" | 50 | #define DRV_VERSION "0.3.3.4-k2" |
51 | char e1000e_driver_name[] = "e1000e"; | 51 | char e1000e_driver_name[] = "e1000e"; |
52 | const char e1000e_driver_version[] = DRV_VERSION; | 52 | const char e1000e_driver_version[] = DRV_VERSION; |
53 | 53 | ||
@@ -99,8 +99,8 @@ static void e1000_receive_skb(struct e1000_adapter *adapter, | |||
99 | skb->protocol = eth_type_trans(skb, netdev); | 99 | skb->protocol = eth_type_trans(skb, netdev); |
100 | 100 | ||
101 | if (adapter->vlgrp && (status & E1000_RXD_STAT_VP)) | 101 | if (adapter->vlgrp && (status & E1000_RXD_STAT_VP)) |
102 | vlan_hwaccel_receive_skb(skb, adapter->vlgrp, | 102 | vlan_gro_receive(&adapter->napi, adapter->vlgrp, |
103 | le16_to_cpu(vlan)); | 103 | le16_to_cpu(vlan), skb); |
104 | else | 104 | else |
105 | napi_gro_receive(&adapter->napi, skb); | 105 | napi_gro_receive(&adapter->napi, skb); |
106 | } | 106 | } |
@@ -1179,12 +1179,12 @@ static irqreturn_t e1000_intr_msi(int irq, void *data) | |||
1179 | mod_timer(&adapter->watchdog_timer, jiffies + 1); | 1179 | mod_timer(&adapter->watchdog_timer, jiffies + 1); |
1180 | } | 1180 | } |
1181 | 1181 | ||
1182 | if (netif_rx_schedule_prep(&adapter->napi)) { | 1182 | if (napi_schedule_prep(&adapter->napi)) { |
1183 | adapter->total_tx_bytes = 0; | 1183 | adapter->total_tx_bytes = 0; |
1184 | adapter->total_tx_packets = 0; | 1184 | adapter->total_tx_packets = 0; |
1185 | adapter->total_rx_bytes = 0; | 1185 | adapter->total_rx_bytes = 0; |
1186 | adapter->total_rx_packets = 0; | 1186 | adapter->total_rx_packets = 0; |
1187 | __netif_rx_schedule(&adapter->napi); | 1187 | __napi_schedule(&adapter->napi); |
1188 | } | 1188 | } |
1189 | 1189 | ||
1190 | return IRQ_HANDLED; | 1190 | return IRQ_HANDLED; |
@@ -1246,12 +1246,12 @@ static irqreturn_t e1000_intr(int irq, void *data) | |||
1246 | mod_timer(&adapter->watchdog_timer, jiffies + 1); | 1246 | mod_timer(&adapter->watchdog_timer, jiffies + 1); |
1247 | } | 1247 | } |
1248 | 1248 | ||
1249 | if (netif_rx_schedule_prep(&adapter->napi)) { | 1249 | if (napi_schedule_prep(&adapter->napi)) { |
1250 | adapter->total_tx_bytes = 0; | 1250 | adapter->total_tx_bytes = 0; |
1251 | adapter->total_tx_packets = 0; | 1251 | adapter->total_tx_packets = 0; |
1252 | adapter->total_rx_bytes = 0; | 1252 | adapter->total_rx_bytes = 0; |
1253 | adapter->total_rx_packets = 0; | 1253 | adapter->total_rx_packets = 0; |
1254 | __netif_rx_schedule(&adapter->napi); | 1254 | __napi_schedule(&adapter->napi); |
1255 | } | 1255 | } |
1256 | 1256 | ||
1257 | return IRQ_HANDLED; | 1257 | return IRQ_HANDLED; |
@@ -1320,10 +1320,10 @@ static irqreturn_t e1000_intr_msix_rx(int irq, void *data) | |||
1320 | adapter->rx_ring->set_itr = 0; | 1320 | adapter->rx_ring->set_itr = 0; |
1321 | } | 1321 | } |
1322 | 1322 | ||
1323 | if (netif_rx_schedule_prep(&adapter->napi)) { | 1323 | if (napi_schedule_prep(&adapter->napi)) { |
1324 | adapter->total_rx_bytes = 0; | 1324 | adapter->total_rx_bytes = 0; |
1325 | adapter->total_rx_packets = 0; | 1325 | adapter->total_rx_packets = 0; |
1326 | __netif_rx_schedule(&adapter->napi); | 1326 | __napi_schedule(&adapter->napi); |
1327 | } | 1327 | } |
1328 | return IRQ_HANDLED; | 1328 | return IRQ_HANDLED; |
1329 | } | 1329 | } |
@@ -1698,7 +1698,6 @@ int e1000e_setup_tx_resources(struct e1000_adapter *adapter) | |||
1698 | 1698 | ||
1699 | tx_ring->next_to_use = 0; | 1699 | tx_ring->next_to_use = 0; |
1700 | tx_ring->next_to_clean = 0; | 1700 | tx_ring->next_to_clean = 0; |
1701 | spin_lock_init(&adapter->tx_queue_lock); | ||
1702 | 1701 | ||
1703 | return 0; | 1702 | return 0; |
1704 | err: | 1703 | err: |
@@ -2007,16 +2006,7 @@ static int e1000_clean(struct napi_struct *napi, int budget) | |||
2007 | !(adapter->rx_ring->ims_val & adapter->tx_ring->ims_val)) | 2006 | !(adapter->rx_ring->ims_val & adapter->tx_ring->ims_val)) |
2008 | goto clean_rx; | 2007 | goto clean_rx; |
2009 | 2008 | ||
2010 | /* | 2009 | tx_cleaned = e1000_clean_tx_irq(adapter); |
2011 | * e1000_clean is called per-cpu. This lock protects | ||
2012 | * tx_ring from being cleaned by multiple cpus | ||
2013 | * simultaneously. A failure obtaining the lock means | ||
2014 | * tx_ring is currently being cleaned anyway. | ||
2015 | */ | ||
2016 | if (spin_trylock(&adapter->tx_queue_lock)) { | ||
2017 | tx_cleaned = e1000_clean_tx_irq(adapter); | ||
2018 | spin_unlock(&adapter->tx_queue_lock); | ||
2019 | } | ||
2020 | 2010 | ||
2021 | clean_rx: | 2011 | clean_rx: |
2022 | adapter->clean_rx(adapter, &work_done, budget); | 2012 | adapter->clean_rx(adapter, &work_done, budget); |
@@ -2028,7 +2018,7 @@ clean_rx: | |||
2028 | if (work_done < budget) { | 2018 | if (work_done < budget) { |
2029 | if (adapter->itr_setting & 3) | 2019 | if (adapter->itr_setting & 3) |
2030 | e1000_set_itr(adapter); | 2020 | e1000_set_itr(adapter); |
2031 | netif_rx_complete(napi); | 2021 | napi_complete(napi); |
2032 | if (adapter->msix_entries) | 2022 | if (adapter->msix_entries) |
2033 | ew32(IMS, adapter->rx_ring->ims_val); | 2023 | ew32(IMS, adapter->rx_ring->ims_val); |
2034 | else | 2024 | else |
@@ -2922,8 +2912,6 @@ static int __devinit e1000_sw_init(struct e1000_adapter *adapter) | |||
2922 | if (e1000_alloc_queues(adapter)) | 2912 | if (e1000_alloc_queues(adapter)) |
2923 | return -ENOMEM; | 2913 | return -ENOMEM; |
2924 | 2914 | ||
2925 | spin_lock_init(&adapter->tx_queue_lock); | ||
2926 | |||
2927 | /* Explicitly disable IRQ since the NIC can be in any state. */ | 2915 | /* Explicitly disable IRQ since the NIC can be in any state. */ |
2928 | e1000_irq_disable(adapter); | 2916 | e1000_irq_disable(adapter); |
2929 | 2917 | ||
@@ -3782,11 +3770,11 @@ static bool e1000_tx_csum(struct e1000_adapter *adapter, struct sk_buff *skb) | |||
3782 | return 0; | 3770 | return 0; |
3783 | 3771 | ||
3784 | switch (skb->protocol) { | 3772 | switch (skb->protocol) { |
3785 | case __constant_htons(ETH_P_IP): | 3773 | case cpu_to_be16(ETH_P_IP): |
3786 | if (ip_hdr(skb)->protocol == IPPROTO_TCP) | 3774 | if (ip_hdr(skb)->protocol == IPPROTO_TCP) |
3787 | cmd_len |= E1000_TXD_CMD_TCP; | 3775 | cmd_len |= E1000_TXD_CMD_TCP; |
3788 | break; | 3776 | break; |
3789 | case __constant_htons(ETH_P_IPV6): | 3777 | case cpu_to_be16(ETH_P_IPV6): |
3790 | /* XXX not handling all IPV6 headers */ | 3778 | /* XXX not handling all IPV6 headers */ |
3791 | if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) | 3779 | if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) |
3792 | cmd_len |= E1000_TXD_CMD_TCP; | 3780 | cmd_len |= E1000_TXD_CMD_TCP; |
@@ -4069,7 +4057,6 @@ static int e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev) | |||
4069 | unsigned int max_txd_pwr = E1000_MAX_TXD_PWR; | 4057 | unsigned int max_txd_pwr = E1000_MAX_TXD_PWR; |
4070 | unsigned int tx_flags = 0; | 4058 | unsigned int tx_flags = 0; |
4071 | unsigned int len = skb->len - skb->data_len; | 4059 | unsigned int len = skb->len - skb->data_len; |
4072 | unsigned long irq_flags; | ||
4073 | unsigned int nr_frags; | 4060 | unsigned int nr_frags; |
4074 | unsigned int mss; | 4061 | unsigned int mss; |
4075 | int count = 0; | 4062 | int count = 0; |
@@ -4138,18 +4125,12 @@ static int e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev) | |||
4138 | if (adapter->hw.mac.tx_pkt_filtering) | 4125 | if (adapter->hw.mac.tx_pkt_filtering) |
4139 | e1000_transfer_dhcp_info(adapter, skb); | 4126 | e1000_transfer_dhcp_info(adapter, skb); |
4140 | 4127 | ||
4141 | if (!spin_trylock_irqsave(&adapter->tx_queue_lock, irq_flags)) | ||
4142 | /* Collision - tell upper layer to requeue */ | ||
4143 | return NETDEV_TX_LOCKED; | ||
4144 | |||
4145 | /* | 4128 | /* |
4146 | * need: count + 2 desc gap to keep tail from touching | 4129 | * need: count + 2 desc gap to keep tail from touching |
4147 | * head, otherwise try next time | 4130 | * head, otherwise try next time |
4148 | */ | 4131 | */ |
4149 | if (e1000_maybe_stop_tx(netdev, count + 2)) { | 4132 | if (e1000_maybe_stop_tx(netdev, count + 2)) |
4150 | spin_unlock_irqrestore(&adapter->tx_queue_lock, irq_flags); | ||
4151 | return NETDEV_TX_BUSY; | 4133 | return NETDEV_TX_BUSY; |
4152 | } | ||
4153 | 4134 | ||
4154 | if (adapter->vlgrp && vlan_tx_tag_present(skb)) { | 4135 | if (adapter->vlgrp && vlan_tx_tag_present(skb)) { |
4155 | tx_flags |= E1000_TX_FLAGS_VLAN; | 4136 | tx_flags |= E1000_TX_FLAGS_VLAN; |
@@ -4161,7 +4142,6 @@ static int e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev) | |||
4161 | tso = e1000_tso(adapter, skb); | 4142 | tso = e1000_tso(adapter, skb); |
4162 | if (tso < 0) { | 4143 | if (tso < 0) { |
4163 | dev_kfree_skb_any(skb); | 4144 | dev_kfree_skb_any(skb); |
4164 | spin_unlock_irqrestore(&adapter->tx_queue_lock, irq_flags); | ||
4165 | return NETDEV_TX_OK; | 4145 | return NETDEV_TX_OK; |
4166 | } | 4146 | } |
4167 | 4147 | ||
@@ -4182,7 +4162,6 @@ static int e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev) | |||
4182 | if (count < 0) { | 4162 | if (count < 0) { |
4183 | /* handle pci_map_single() error in e1000_tx_map */ | 4163 | /* handle pci_map_single() error in e1000_tx_map */ |
4184 | dev_kfree_skb_any(skb); | 4164 | dev_kfree_skb_any(skb); |
4185 | spin_unlock_irqrestore(&adapter->tx_queue_lock, irq_flags); | ||
4186 | return NETDEV_TX_OK; | 4165 | return NETDEV_TX_OK; |
4187 | } | 4166 | } |
4188 | 4167 | ||
@@ -4193,7 +4172,6 @@ static int e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev) | |||
4193 | /* Make sure there is space in the ring for the next send. */ | 4172 | /* Make sure there is space in the ring for the next send. */ |
4194 | e1000_maybe_stop_tx(netdev, MAX_SKB_FRAGS + 2); | 4173 | e1000_maybe_stop_tx(netdev, MAX_SKB_FRAGS + 2); |
4195 | 4174 | ||
4196 | spin_unlock_irqrestore(&adapter->tx_queue_lock, irq_flags); | ||
4197 | return NETDEV_TX_OK; | 4175 | return NETDEV_TX_OK; |
4198 | } | 4176 | } |
4199 | 4177 | ||
@@ -4922,12 +4900,6 @@ static int __devinit e1000_probe(struct pci_dev *pdev, | |||
4922 | if (pci_using_dac) | 4900 | if (pci_using_dac) |
4923 | netdev->features |= NETIF_F_HIGHDMA; | 4901 | netdev->features |= NETIF_F_HIGHDMA; |
4924 | 4902 | ||
4925 | /* | ||
4926 | * We should not be using LLTX anymore, but we are still Tx faster with | ||
4927 | * it. | ||
4928 | */ | ||
4929 | netdev->features |= NETIF_F_LLTX; | ||
4930 | |||
4931 | if (e1000e_enable_mng_pass_thru(&adapter->hw)) | 4903 | if (e1000e_enable_mng_pass_thru(&adapter->hw)) |
4932 | adapter->flags |= FLAG_MNG_PT_ENABLED; | 4904 | adapter->flags |= FLAG_MNG_PT_ENABLED; |
4933 | 4905 | ||
diff --git a/drivers/net/ehea/ehea.h b/drivers/net/ehea/ehea.h index 6271b9411ccf..f7e2ccfd3e8c 100644 --- a/drivers/net/ehea/ehea.h +++ b/drivers/net/ehea/ehea.h | |||
@@ -40,7 +40,7 @@ | |||
40 | #include <asm/io.h> | 40 | #include <asm/io.h> |
41 | 41 | ||
42 | #define DRV_NAME "ehea" | 42 | #define DRV_NAME "ehea" |
43 | #define DRV_VERSION "EHEA_0096" | 43 | #define DRV_VERSION "EHEA_0097" |
44 | 44 | ||
45 | /* eHEA capability flags */ | 45 | /* eHEA capability flags */ |
46 | #define DLPAR_PORT_ADD_REM 1 | 46 | #define DLPAR_PORT_ADD_REM 1 |
diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c index dfe92264e825..489fdb90f764 100644 --- a/drivers/net/ehea/ehea_main.c +++ b/drivers/net/ehea/ehea_main.c | |||
@@ -308,7 +308,7 @@ static struct net_device_stats *ehea_get_stats(struct net_device *dev) | |||
308 | 308 | ||
309 | memset(stats, 0, sizeof(*stats)); | 309 | memset(stats, 0, sizeof(*stats)); |
310 | 310 | ||
311 | cb2 = kzalloc(PAGE_SIZE, GFP_ATOMIC); | 311 | cb2 = (void *)get_zeroed_page(GFP_ATOMIC); |
312 | if (!cb2) { | 312 | if (!cb2) { |
313 | ehea_error("no mem for cb2"); | 313 | ehea_error("no mem for cb2"); |
314 | goto out; | 314 | goto out; |
@@ -341,7 +341,7 @@ static struct net_device_stats *ehea_get_stats(struct net_device *dev) | |||
341 | stats->rx_packets = rx_packets; | 341 | stats->rx_packets = rx_packets; |
342 | 342 | ||
343 | out_herr: | 343 | out_herr: |
344 | kfree(cb2); | 344 | free_page((unsigned long)cb2); |
345 | out: | 345 | out: |
346 | return stats; | 346 | return stats; |
347 | } | 347 | } |
@@ -370,8 +370,6 @@ static void ehea_refill_rq1(struct ehea_port_res *pr, int index, int nr_of_wqes) | |||
370 | EHEA_L_PKT_SIZE); | 370 | EHEA_L_PKT_SIZE); |
371 | if (!skb_arr_rq1[index]) { | 371 | if (!skb_arr_rq1[index]) { |
372 | pr->rq1_skba.os_skbs = fill_wqes - i; | 372 | pr->rq1_skba.os_skbs = fill_wqes - i; |
373 | ehea_error("%s: no mem for skb/%d wqes filled", | ||
374 | dev->name, i); | ||
375 | break; | 373 | break; |
376 | } | 374 | } |
377 | } | 375 | } |
@@ -387,26 +385,19 @@ static void ehea_refill_rq1(struct ehea_port_res *pr, int index, int nr_of_wqes) | |||
387 | ehea_update_rq1a(pr->qp, adder); | 385 | ehea_update_rq1a(pr->qp, adder); |
388 | } | 386 | } |
389 | 387 | ||
390 | static int ehea_init_fill_rq1(struct ehea_port_res *pr, int nr_rq1a) | 388 | static void ehea_init_fill_rq1(struct ehea_port_res *pr, int nr_rq1a) |
391 | { | 389 | { |
392 | int ret = 0; | ||
393 | struct sk_buff **skb_arr_rq1 = pr->rq1_skba.arr; | 390 | struct sk_buff **skb_arr_rq1 = pr->rq1_skba.arr; |
394 | struct net_device *dev = pr->port->netdev; | 391 | struct net_device *dev = pr->port->netdev; |
395 | int i; | 392 | int i; |
396 | 393 | ||
397 | for (i = 0; i < pr->rq1_skba.len; i++) { | 394 | for (i = 0; i < pr->rq1_skba.len; i++) { |
398 | skb_arr_rq1[i] = netdev_alloc_skb(dev, EHEA_L_PKT_SIZE); | 395 | skb_arr_rq1[i] = netdev_alloc_skb(dev, EHEA_L_PKT_SIZE); |
399 | if (!skb_arr_rq1[i]) { | 396 | if (!skb_arr_rq1[i]) |
400 | ehea_error("%s: no mem for skb/%d wqes filled", | 397 | break; |
401 | dev->name, i); | ||
402 | ret = -ENOMEM; | ||
403 | goto out; | ||
404 | } | ||
405 | } | 398 | } |
406 | /* Ring doorbell */ | 399 | /* Ring doorbell */ |
407 | ehea_update_rq1a(pr->qp, nr_rq1a); | 400 | ehea_update_rq1a(pr->qp, nr_rq1a); |
408 | out: | ||
409 | return ret; | ||
410 | } | 401 | } |
411 | 402 | ||
412 | static int ehea_refill_rq_def(struct ehea_port_res *pr, | 403 | static int ehea_refill_rq_def(struct ehea_port_res *pr, |
@@ -435,10 +426,12 @@ static int ehea_refill_rq_def(struct ehea_port_res *pr, | |||
435 | u64 tmp_addr; | 426 | u64 tmp_addr; |
436 | struct sk_buff *skb = netdev_alloc_skb(dev, packet_size); | 427 | struct sk_buff *skb = netdev_alloc_skb(dev, packet_size); |
437 | if (!skb) { | 428 | if (!skb) { |
438 | ehea_error("%s: no mem for skb/%d wqes filled", | ||
439 | pr->port->netdev->name, i); | ||
440 | q_skba->os_skbs = fill_wqes - i; | 429 | q_skba->os_skbs = fill_wqes - i; |
441 | ret = -ENOMEM; | 430 | if (q_skba->os_skbs == q_skba->len - 2) { |
431 | ehea_info("%s: rq%i ran dry - no mem for skb", | ||
432 | pr->port->netdev->name, rq_nr); | ||
433 | ret = -ENOMEM; | ||
434 | } | ||
442 | break; | 435 | break; |
443 | } | 436 | } |
444 | skb_reserve(skb, NET_IP_ALIGN); | 437 | skb_reserve(skb, NET_IP_ALIGN); |
@@ -830,7 +823,7 @@ static int ehea_poll(struct napi_struct *napi, int budget) | |||
830 | while ((rx != budget) || force_irq) { | 823 | while ((rx != budget) || force_irq) { |
831 | pr->poll_counter = 0; | 824 | pr->poll_counter = 0; |
832 | force_irq = 0; | 825 | force_irq = 0; |
833 | netif_rx_complete(napi); | 826 | napi_complete(napi); |
834 | ehea_reset_cq_ep(pr->recv_cq); | 827 | ehea_reset_cq_ep(pr->recv_cq); |
835 | ehea_reset_cq_ep(pr->send_cq); | 828 | ehea_reset_cq_ep(pr->send_cq); |
836 | ehea_reset_cq_n1(pr->recv_cq); | 829 | ehea_reset_cq_n1(pr->recv_cq); |
@@ -841,7 +834,7 @@ static int ehea_poll(struct napi_struct *napi, int budget) | |||
841 | if (!cqe && !cqe_skb) | 834 | if (!cqe && !cqe_skb) |
842 | return rx; | 835 | return rx; |
843 | 836 | ||
844 | if (!netif_rx_reschedule(napi)) | 837 | if (!napi_reschedule(napi)) |
845 | return rx; | 838 | return rx; |
846 | 839 | ||
847 | cqe_skb = ehea_proc_cqes(pr, EHEA_POLL_MAX_CQES); | 840 | cqe_skb = ehea_proc_cqes(pr, EHEA_POLL_MAX_CQES); |
@@ -859,7 +852,7 @@ static void ehea_netpoll(struct net_device *dev) | |||
859 | int i; | 852 | int i; |
860 | 853 | ||
861 | for (i = 0; i < port->num_def_qps; i++) | 854 | for (i = 0; i < port->num_def_qps; i++) |
862 | netif_rx_schedule(&port->port_res[i].napi); | 855 | napi_schedule(&port->port_res[i].napi); |
863 | } | 856 | } |
864 | #endif | 857 | #endif |
865 | 858 | ||
@@ -867,7 +860,7 @@ static irqreturn_t ehea_recv_irq_handler(int irq, void *param) | |||
867 | { | 860 | { |
868 | struct ehea_port_res *pr = param; | 861 | struct ehea_port_res *pr = param; |
869 | 862 | ||
870 | netif_rx_schedule(&pr->napi); | 863 | napi_schedule(&pr->napi); |
871 | 864 | ||
872 | return IRQ_HANDLED; | 865 | return IRQ_HANDLED; |
873 | } | 866 | } |
@@ -915,7 +908,7 @@ int ehea_sense_port_attr(struct ehea_port *port) | |||
915 | struct hcp_ehea_port_cb0 *cb0; | 908 | struct hcp_ehea_port_cb0 *cb0; |
916 | 909 | ||
917 | /* may be called via ehea_neq_tasklet() */ | 910 | /* may be called via ehea_neq_tasklet() */ |
918 | cb0 = kzalloc(PAGE_SIZE, GFP_ATOMIC); | 911 | cb0 = (void *)get_zeroed_page(GFP_ATOMIC); |
919 | if (!cb0) { | 912 | if (!cb0) { |
920 | ehea_error("no mem for cb0"); | 913 | ehea_error("no mem for cb0"); |
921 | ret = -ENOMEM; | 914 | ret = -ENOMEM; |
@@ -996,7 +989,7 @@ int ehea_sense_port_attr(struct ehea_port *port) | |||
996 | out_free: | 989 | out_free: |
997 | if (ret || netif_msg_probe(port)) | 990 | if (ret || netif_msg_probe(port)) |
998 | ehea_dump(cb0, sizeof(*cb0), "ehea_sense_port_attr"); | 991 | ehea_dump(cb0, sizeof(*cb0), "ehea_sense_port_attr"); |
999 | kfree(cb0); | 992 | free_page((unsigned long)cb0); |
1000 | out: | 993 | out: |
1001 | return ret; | 994 | return ret; |
1002 | } | 995 | } |
@@ -1007,7 +1000,7 @@ int ehea_set_portspeed(struct ehea_port *port, u32 port_speed) | |||
1007 | u64 hret; | 1000 | u64 hret; |
1008 | int ret = 0; | 1001 | int ret = 0; |
1009 | 1002 | ||
1010 | cb4 = kzalloc(PAGE_SIZE, GFP_KERNEL); | 1003 | cb4 = (void *)get_zeroed_page(GFP_KERNEL); |
1011 | if (!cb4) { | 1004 | if (!cb4) { |
1012 | ehea_error("no mem for cb4"); | 1005 | ehea_error("no mem for cb4"); |
1013 | ret = -ENOMEM; | 1006 | ret = -ENOMEM; |
@@ -1075,7 +1068,7 @@ int ehea_set_portspeed(struct ehea_port *port, u32 port_speed) | |||
1075 | if (!prop_carrier_state || (port->phy_link == EHEA_PHY_LINK_UP)) | 1068 | if (!prop_carrier_state || (port->phy_link == EHEA_PHY_LINK_UP)) |
1076 | netif_carrier_on(port->netdev); | 1069 | netif_carrier_on(port->netdev); |
1077 | 1070 | ||
1078 | kfree(cb4); | 1071 | free_page((unsigned long)cb4); |
1079 | out: | 1072 | out: |
1080 | return ret; | 1073 | return ret; |
1081 | } | 1074 | } |
@@ -1201,11 +1194,11 @@ static int ehea_fill_port_res(struct ehea_port_res *pr) | |||
1201 | int ret; | 1194 | int ret; |
1202 | struct ehea_qp_init_attr *init_attr = &pr->qp->init_attr; | 1195 | struct ehea_qp_init_attr *init_attr = &pr->qp->init_attr; |
1203 | 1196 | ||
1204 | ret = ehea_init_fill_rq1(pr, init_attr->act_nr_rwqes_rq1 | 1197 | ehea_init_fill_rq1(pr, init_attr->act_nr_rwqes_rq1 |
1205 | - init_attr->act_nr_rwqes_rq2 | 1198 | - init_attr->act_nr_rwqes_rq2 |
1206 | - init_attr->act_nr_rwqes_rq3 - 1); | 1199 | - init_attr->act_nr_rwqes_rq3 - 1); |
1207 | 1200 | ||
1208 | ret |= ehea_refill_rq2(pr, init_attr->act_nr_rwqes_rq2 - 1); | 1201 | ret = ehea_refill_rq2(pr, init_attr->act_nr_rwqes_rq2 - 1); |
1209 | 1202 | ||
1210 | ret |= ehea_refill_rq3(pr, init_attr->act_nr_rwqes_rq3 - 1); | 1203 | ret |= ehea_refill_rq3(pr, init_attr->act_nr_rwqes_rq3 - 1); |
1211 | 1204 | ||
@@ -1302,7 +1295,7 @@ static int ehea_configure_port(struct ehea_port *port) | |||
1302 | struct hcp_ehea_port_cb0 *cb0; | 1295 | struct hcp_ehea_port_cb0 *cb0; |
1303 | 1296 | ||
1304 | ret = -ENOMEM; | 1297 | ret = -ENOMEM; |
1305 | cb0 = kzalloc(PAGE_SIZE, GFP_KERNEL); | 1298 | cb0 = (void *)get_zeroed_page(GFP_KERNEL); |
1306 | if (!cb0) | 1299 | if (!cb0) |
1307 | goto out; | 1300 | goto out; |
1308 | 1301 | ||
@@ -1338,7 +1331,7 @@ static int ehea_configure_port(struct ehea_port *port) | |||
1338 | ret = 0; | 1331 | ret = 0; |
1339 | 1332 | ||
1340 | out_free: | 1333 | out_free: |
1341 | kfree(cb0); | 1334 | free_page((unsigned long)cb0); |
1342 | out: | 1335 | out: |
1343 | return ret; | 1336 | return ret; |
1344 | } | 1337 | } |
@@ -1748,7 +1741,7 @@ static int ehea_set_mac_addr(struct net_device *dev, void *sa) | |||
1748 | goto out; | 1741 | goto out; |
1749 | } | 1742 | } |
1750 | 1743 | ||
1751 | cb0 = kzalloc(PAGE_SIZE, GFP_KERNEL); | 1744 | cb0 = (void *)get_zeroed_page(GFP_KERNEL); |
1752 | if (!cb0) { | 1745 | if (!cb0) { |
1753 | ehea_error("no mem for cb0"); | 1746 | ehea_error("no mem for cb0"); |
1754 | ret = -ENOMEM; | 1747 | ret = -ENOMEM; |
@@ -1793,7 +1786,7 @@ out_upregs: | |||
1793 | ehea_update_bcmc_registrations(); | 1786 | ehea_update_bcmc_registrations(); |
1794 | spin_unlock(&ehea_bcmc_regs.lock); | 1787 | spin_unlock(&ehea_bcmc_regs.lock); |
1795 | out_free: | 1788 | out_free: |
1796 | kfree(cb0); | 1789 | free_page((unsigned long)cb0); |
1797 | out: | 1790 | out: |
1798 | return ret; | 1791 | return ret; |
1799 | } | 1792 | } |
@@ -1817,7 +1810,7 @@ static void ehea_promiscuous(struct net_device *dev, int enable) | |||
1817 | if ((enable && port->promisc) || (!enable && !port->promisc)) | 1810 | if ((enable && port->promisc) || (!enable && !port->promisc)) |
1818 | return; | 1811 | return; |
1819 | 1812 | ||
1820 | cb7 = kzalloc(PAGE_SIZE, GFP_ATOMIC); | 1813 | cb7 = (void *)get_zeroed_page(GFP_ATOMIC); |
1821 | if (!cb7) { | 1814 | if (!cb7) { |
1822 | ehea_error("no mem for cb7"); | 1815 | ehea_error("no mem for cb7"); |
1823 | goto out; | 1816 | goto out; |
@@ -1836,7 +1829,7 @@ static void ehea_promiscuous(struct net_device *dev, int enable) | |||
1836 | 1829 | ||
1837 | port->promisc = enable; | 1830 | port->promisc = enable; |
1838 | out: | 1831 | out: |
1839 | kfree(cb7); | 1832 | free_page((unsigned long)cb7); |
1840 | return; | 1833 | return; |
1841 | } | 1834 | } |
1842 | 1835 | ||
@@ -2217,7 +2210,7 @@ static void ehea_vlan_rx_register(struct net_device *dev, | |||
2217 | 2210 | ||
2218 | port->vgrp = grp; | 2211 | port->vgrp = grp; |
2219 | 2212 | ||
2220 | cb1 = kzalloc(PAGE_SIZE, GFP_KERNEL); | 2213 | cb1 = (void *)get_zeroed_page(GFP_KERNEL); |
2221 | if (!cb1) { | 2214 | if (!cb1) { |
2222 | ehea_error("no mem for cb1"); | 2215 | ehea_error("no mem for cb1"); |
2223 | goto out; | 2216 | goto out; |
@@ -2228,7 +2221,7 @@ static void ehea_vlan_rx_register(struct net_device *dev, | |||
2228 | if (hret != H_SUCCESS) | 2221 | if (hret != H_SUCCESS) |
2229 | ehea_error("modify_ehea_port failed"); | 2222 | ehea_error("modify_ehea_port failed"); |
2230 | 2223 | ||
2231 | kfree(cb1); | 2224 | free_page((unsigned long)cb1); |
2232 | out: | 2225 | out: |
2233 | return; | 2226 | return; |
2234 | } | 2227 | } |
@@ -2241,7 +2234,7 @@ static void ehea_vlan_rx_add_vid(struct net_device *dev, unsigned short vid) | |||
2241 | int index; | 2234 | int index; |
2242 | u64 hret; | 2235 | u64 hret; |
2243 | 2236 | ||
2244 | cb1 = kzalloc(PAGE_SIZE, GFP_KERNEL); | 2237 | cb1 = (void *)get_zeroed_page(GFP_KERNEL); |
2245 | if (!cb1) { | 2238 | if (!cb1) { |
2246 | ehea_error("no mem for cb1"); | 2239 | ehea_error("no mem for cb1"); |
2247 | goto out; | 2240 | goto out; |
@@ -2262,7 +2255,7 @@ static void ehea_vlan_rx_add_vid(struct net_device *dev, unsigned short vid) | |||
2262 | if (hret != H_SUCCESS) | 2255 | if (hret != H_SUCCESS) |
2263 | ehea_error("modify_ehea_port failed"); | 2256 | ehea_error("modify_ehea_port failed"); |
2264 | out: | 2257 | out: |
2265 | kfree(cb1); | 2258 | free_page((unsigned long)cb1); |
2266 | return; | 2259 | return; |
2267 | } | 2260 | } |
2268 | 2261 | ||
@@ -2276,7 +2269,7 @@ static void ehea_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) | |||
2276 | 2269 | ||
2277 | vlan_group_set_device(port->vgrp, vid, NULL); | 2270 | vlan_group_set_device(port->vgrp, vid, NULL); |
2278 | 2271 | ||
2279 | cb1 = kzalloc(PAGE_SIZE, GFP_KERNEL); | 2272 | cb1 = (void *)get_zeroed_page(GFP_KERNEL); |
2280 | if (!cb1) { | 2273 | if (!cb1) { |
2281 | ehea_error("no mem for cb1"); | 2274 | ehea_error("no mem for cb1"); |
2282 | goto out; | 2275 | goto out; |
@@ -2297,7 +2290,7 @@ static void ehea_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) | |||
2297 | if (hret != H_SUCCESS) | 2290 | if (hret != H_SUCCESS) |
2298 | ehea_error("modify_ehea_port failed"); | 2291 | ehea_error("modify_ehea_port failed"); |
2299 | out: | 2292 | out: |
2300 | kfree(cb1); | 2293 | free_page((unsigned long)cb1); |
2301 | return; | 2294 | return; |
2302 | } | 2295 | } |
2303 | 2296 | ||
@@ -2309,7 +2302,7 @@ int ehea_activate_qp(struct ehea_adapter *adapter, struct ehea_qp *qp) | |||
2309 | u64 dummy64 = 0; | 2302 | u64 dummy64 = 0; |
2310 | struct hcp_modify_qp_cb0 *cb0; | 2303 | struct hcp_modify_qp_cb0 *cb0; |
2311 | 2304 | ||
2312 | cb0 = kzalloc(PAGE_SIZE, GFP_KERNEL); | 2305 | cb0 = (void *)get_zeroed_page(GFP_KERNEL); |
2313 | if (!cb0) { | 2306 | if (!cb0) { |
2314 | ret = -ENOMEM; | 2307 | ret = -ENOMEM; |
2315 | goto out; | 2308 | goto out; |
@@ -2372,7 +2365,7 @@ int ehea_activate_qp(struct ehea_adapter *adapter, struct ehea_qp *qp) | |||
2372 | 2365 | ||
2373 | ret = 0; | 2366 | ret = 0; |
2374 | out: | 2367 | out: |
2375 | kfree(cb0); | 2368 | free_page((unsigned long)cb0); |
2376 | return ret; | 2369 | return ret; |
2377 | } | 2370 | } |
2378 | 2371 | ||
@@ -2664,7 +2657,7 @@ int ehea_stop_qps(struct net_device *dev) | |||
2664 | u64 dummy64 = 0; | 2657 | u64 dummy64 = 0; |
2665 | u16 dummy16 = 0; | 2658 | u16 dummy16 = 0; |
2666 | 2659 | ||
2667 | cb0 = kzalloc(PAGE_SIZE, GFP_KERNEL); | 2660 | cb0 = (void *)get_zeroed_page(GFP_KERNEL); |
2668 | if (!cb0) { | 2661 | if (!cb0) { |
2669 | ret = -ENOMEM; | 2662 | ret = -ENOMEM; |
2670 | goto out; | 2663 | goto out; |
@@ -2716,7 +2709,7 @@ int ehea_stop_qps(struct net_device *dev) | |||
2716 | 2709 | ||
2717 | ret = 0; | 2710 | ret = 0; |
2718 | out: | 2711 | out: |
2719 | kfree(cb0); | 2712 | free_page((unsigned long)cb0); |
2720 | 2713 | ||
2721 | return ret; | 2714 | return ret; |
2722 | } | 2715 | } |
@@ -2766,7 +2759,7 @@ int ehea_restart_qps(struct net_device *dev) | |||
2766 | u64 dummy64 = 0; | 2759 | u64 dummy64 = 0; |
2767 | u16 dummy16 = 0; | 2760 | u16 dummy16 = 0; |
2768 | 2761 | ||
2769 | cb0 = kzalloc(PAGE_SIZE, GFP_KERNEL); | 2762 | cb0 = (void *)get_zeroed_page(GFP_KERNEL); |
2770 | if (!cb0) { | 2763 | if (!cb0) { |
2771 | ret = -ENOMEM; | 2764 | ret = -ENOMEM; |
2772 | goto out; | 2765 | goto out; |
@@ -2819,7 +2812,7 @@ int ehea_restart_qps(struct net_device *dev) | |||
2819 | ehea_refill_rq3(pr, 0); | 2812 | ehea_refill_rq3(pr, 0); |
2820 | } | 2813 | } |
2821 | out: | 2814 | out: |
2822 | kfree(cb0); | 2815 | free_page((unsigned long)cb0); |
2823 | 2816 | ||
2824 | return ret; | 2817 | return ret; |
2825 | } | 2818 | } |
@@ -2950,7 +2943,7 @@ int ehea_sense_adapter_attr(struct ehea_adapter *adapter) | |||
2950 | u64 hret; | 2943 | u64 hret; |
2951 | int ret; | 2944 | int ret; |
2952 | 2945 | ||
2953 | cb = kzalloc(PAGE_SIZE, GFP_KERNEL); | 2946 | cb = (void *)get_zeroed_page(GFP_KERNEL); |
2954 | if (!cb) { | 2947 | if (!cb) { |
2955 | ret = -ENOMEM; | 2948 | ret = -ENOMEM; |
2956 | goto out; | 2949 | goto out; |
@@ -2967,7 +2960,7 @@ int ehea_sense_adapter_attr(struct ehea_adapter *adapter) | |||
2967 | ret = 0; | 2960 | ret = 0; |
2968 | 2961 | ||
2969 | out_herr: | 2962 | out_herr: |
2970 | kfree(cb); | 2963 | free_page((unsigned long)cb); |
2971 | out: | 2964 | out: |
2972 | return ret; | 2965 | return ret; |
2973 | } | 2966 | } |
@@ -2981,7 +2974,7 @@ int ehea_get_jumboframe_status(struct ehea_port *port, int *jumbo) | |||
2981 | *jumbo = 0; | 2974 | *jumbo = 0; |
2982 | 2975 | ||
2983 | /* (Try to) enable *jumbo frames */ | 2976 | /* (Try to) enable *jumbo frames */ |
2984 | cb4 = kzalloc(PAGE_SIZE, GFP_KERNEL); | 2977 | cb4 = (void *)get_zeroed_page(GFP_KERNEL); |
2985 | if (!cb4) { | 2978 | if (!cb4) { |
2986 | ehea_error("no mem for cb4"); | 2979 | ehea_error("no mem for cb4"); |
2987 | ret = -ENOMEM; | 2980 | ret = -ENOMEM; |
@@ -3009,7 +3002,7 @@ int ehea_get_jumboframe_status(struct ehea_port *port, int *jumbo) | |||
3009 | } else | 3002 | } else |
3010 | ret = -EINVAL; | 3003 | ret = -EINVAL; |
3011 | 3004 | ||
3012 | kfree(cb4); | 3005 | free_page((unsigned long)cb4); |
3013 | } | 3006 | } |
3014 | out: | 3007 | out: |
3015 | return ret; | 3008 | return ret; |
@@ -3040,7 +3033,7 @@ static struct device *ehea_register_port(struct ehea_port *port, | |||
3040 | port->ofdev.dev.parent = &port->adapter->ofdev->dev; | 3033 | port->ofdev.dev.parent = &port->adapter->ofdev->dev; |
3041 | port->ofdev.dev.bus = &ibmebus_bus_type; | 3034 | port->ofdev.dev.bus = &ibmebus_bus_type; |
3042 | 3035 | ||
3043 | sprintf(port->ofdev.dev.bus_id, "port%d", port_name_cnt++); | 3036 | dev_set_name(&port->ofdev.dev, "port%d", port_name_cnt++); |
3044 | port->ofdev.dev.release = logical_port_release; | 3037 | port->ofdev.dev.release = logical_port_release; |
3045 | 3038 | ||
3046 | ret = of_device_register(&port->ofdev); | 3039 | ret = of_device_register(&port->ofdev); |
@@ -3069,6 +3062,22 @@ static void ehea_unregister_port(struct ehea_port *port) | |||
3069 | of_device_unregister(&port->ofdev); | 3062 | of_device_unregister(&port->ofdev); |
3070 | } | 3063 | } |
3071 | 3064 | ||
3065 | static const struct net_device_ops ehea_netdev_ops = { | ||
3066 | .ndo_open = ehea_open, | ||
3067 | .ndo_stop = ehea_stop, | ||
3068 | .ndo_start_xmit = ehea_start_xmit, | ||
3069 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
3070 | .ndo_poll_controller = ehea_netpoll, | ||
3071 | #endif | ||
3072 | .ndo_get_stats = ehea_get_stats, | ||
3073 | .ndo_set_mac_address = ehea_set_mac_addr, | ||
3074 | .ndo_set_multicast_list = ehea_set_multicast_list, | ||
3075 | .ndo_change_mtu = ehea_change_mtu, | ||
3076 | .ndo_vlan_rx_register = ehea_vlan_rx_register, | ||
3077 | .ndo_vlan_rx_add_vid = ehea_vlan_rx_add_vid, | ||
3078 | .ndo_vlan_rx_kill_vid = ehea_vlan_rx_kill_vid | ||
3079 | }; | ||
3080 | |||
3072 | struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter, | 3081 | struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter, |
3073 | u32 logical_port_id, | 3082 | u32 logical_port_id, |
3074 | struct device_node *dn) | 3083 | struct device_node *dn) |
@@ -3121,19 +3130,9 @@ struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter, | |||
3121 | /* initialize net_device structure */ | 3130 | /* initialize net_device structure */ |
3122 | memcpy(dev->dev_addr, &port->mac_addr, ETH_ALEN); | 3131 | memcpy(dev->dev_addr, &port->mac_addr, ETH_ALEN); |
3123 | 3132 | ||
3124 | dev->open = ehea_open; | 3133 | dev->netdev_ops = &ehea_netdev_ops; |
3125 | #ifdef CONFIG_NET_POLL_CONTROLLER | 3134 | ehea_set_ethtool_ops(dev); |
3126 | dev->poll_controller = ehea_netpoll; | 3135 | |
3127 | #endif | ||
3128 | dev->stop = ehea_stop; | ||
3129 | dev->hard_start_xmit = ehea_start_xmit; | ||
3130 | dev->get_stats = ehea_get_stats; | ||
3131 | dev->set_multicast_list = ehea_set_multicast_list; | ||
3132 | dev->set_mac_address = ehea_set_mac_addr; | ||
3133 | dev->change_mtu = ehea_change_mtu; | ||
3134 | dev->vlan_rx_register = ehea_vlan_rx_register; | ||
3135 | dev->vlan_rx_add_vid = ehea_vlan_rx_add_vid; | ||
3136 | dev->vlan_rx_kill_vid = ehea_vlan_rx_kill_vid; | ||
3137 | dev->features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_TSO | 3136 | dev->features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_TSO |
3138 | | NETIF_F_HIGHDMA | NETIF_F_IP_CSUM | NETIF_F_HW_VLAN_TX | 3137 | | NETIF_F_HIGHDMA | NETIF_F_IP_CSUM | NETIF_F_HW_VLAN_TX |
3139 | | NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER | 3138 | | NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER |
@@ -3142,7 +3141,6 @@ struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter, | |||
3142 | dev->watchdog_timeo = EHEA_WATCH_DOG_TIMEOUT; | 3141 | dev->watchdog_timeo = EHEA_WATCH_DOG_TIMEOUT; |
3143 | 3142 | ||
3144 | INIT_WORK(&port->reset_task, ehea_reset_port); | 3143 | INIT_WORK(&port->reset_task, ehea_reset_port); |
3145 | ehea_set_ethtool_ops(dev); | ||
3146 | 3144 | ||
3147 | ret = register_netdev(dev); | 3145 | ret = register_netdev(dev); |
3148 | if (ret) { | 3146 | if (ret) { |
diff --git a/drivers/net/ehea/ehea_qmr.c b/drivers/net/ehea/ehea_qmr.c index 49d766ebbcf4..3747457f5e69 100644 --- a/drivers/net/ehea/ehea_qmr.c +++ b/drivers/net/ehea/ehea_qmr.c | |||
@@ -1005,7 +1005,7 @@ void ehea_error_data(struct ehea_adapter *adapter, u64 res_handle) | |||
1005 | unsigned long ret; | 1005 | unsigned long ret; |
1006 | u64 *rblock; | 1006 | u64 *rblock; |
1007 | 1007 | ||
1008 | rblock = kzalloc(PAGE_SIZE, GFP_KERNEL); | 1008 | rblock = (void *)get_zeroed_page(GFP_KERNEL); |
1009 | if (!rblock) { | 1009 | if (!rblock) { |
1010 | ehea_error("Cannot allocate rblock memory."); | 1010 | ehea_error("Cannot allocate rblock memory."); |
1011 | return; | 1011 | return; |
@@ -1022,5 +1022,5 @@ void ehea_error_data(struct ehea_adapter *adapter, u64 res_handle) | |||
1022 | else | 1022 | else |
1023 | ehea_error("Error data could not be fetched: %llX", res_handle); | 1023 | ehea_error("Error data could not be fetched: %llX", res_handle); |
1024 | 1024 | ||
1025 | kfree(rblock); | 1025 | free_page((unsigned long)rblock); |
1026 | } | 1026 | } |
diff --git a/drivers/net/enic/enic_main.c b/drivers/net/enic/enic_main.c index 7d60551d538f..5dd11563553e 100644 --- a/drivers/net/enic/enic_main.c +++ b/drivers/net/enic/enic_main.c | |||
@@ -411,8 +411,8 @@ static irqreturn_t enic_isr_legacy(int irq, void *data) | |||
411 | } | 411 | } |
412 | 412 | ||
413 | if (ENIC_TEST_INTR(pba, ENIC_INTX_WQ_RQ)) { | 413 | if (ENIC_TEST_INTR(pba, ENIC_INTX_WQ_RQ)) { |
414 | if (netif_rx_schedule_prep(&enic->napi)) | 414 | if (napi_schedule_prep(&enic->napi)) |
415 | __netif_rx_schedule(&enic->napi); | 415 | __napi_schedule(&enic->napi); |
416 | } else { | 416 | } else { |
417 | vnic_intr_unmask(&enic->intr[ENIC_INTX_WQ_RQ]); | 417 | vnic_intr_unmask(&enic->intr[ENIC_INTX_WQ_RQ]); |
418 | } | 418 | } |
@@ -440,7 +440,7 @@ static irqreturn_t enic_isr_msi(int irq, void *data) | |||
440 | * writes). | 440 | * writes). |
441 | */ | 441 | */ |
442 | 442 | ||
443 | netif_rx_schedule(&enic->napi); | 443 | napi_schedule(&enic->napi); |
444 | 444 | ||
445 | return IRQ_HANDLED; | 445 | return IRQ_HANDLED; |
446 | } | 446 | } |
@@ -450,7 +450,7 @@ static irqreturn_t enic_isr_msix_rq(int irq, void *data) | |||
450 | struct enic *enic = data; | 450 | struct enic *enic = data; |
451 | 451 | ||
452 | /* schedule NAPI polling for RQ cleanup */ | 452 | /* schedule NAPI polling for RQ cleanup */ |
453 | netif_rx_schedule(&enic->napi); | 453 | napi_schedule(&enic->napi); |
454 | 454 | ||
455 | return IRQ_HANDLED; | 455 | return IRQ_HANDLED; |
456 | } | 456 | } |
@@ -570,11 +570,11 @@ static inline void enic_queue_wq_skb_tso(struct enic *enic, | |||
570 | * to each TCP segment resulting from the TSO. | 570 | * to each TCP segment resulting from the TSO. |
571 | */ | 571 | */ |
572 | 572 | ||
573 | if (skb->protocol == __constant_htons(ETH_P_IP)) { | 573 | if (skb->protocol == cpu_to_be16(ETH_P_IP)) { |
574 | ip_hdr(skb)->check = 0; | 574 | ip_hdr(skb)->check = 0; |
575 | tcp_hdr(skb)->check = ~csum_tcpudp_magic(ip_hdr(skb)->saddr, | 575 | tcp_hdr(skb)->check = ~csum_tcpudp_magic(ip_hdr(skb)->saddr, |
576 | ip_hdr(skb)->daddr, 0, IPPROTO_TCP, 0); | 576 | ip_hdr(skb)->daddr, 0, IPPROTO_TCP, 0); |
577 | } else if (skb->protocol == __constant_htons(ETH_P_IPV6)) { | 577 | } else if (skb->protocol == cpu_to_be16(ETH_P_IPV6)) { |
578 | tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, | 578 | tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, |
579 | &ipv6_hdr(skb)->daddr, 0, IPPROTO_TCP, 0); | 579 | &ipv6_hdr(skb)->daddr, 0, IPPROTO_TCP, 0); |
580 | } | 580 | } |
@@ -1068,7 +1068,7 @@ static int enic_poll(struct napi_struct *napi, int budget) | |||
1068 | if (netdev->features & NETIF_F_LRO) | 1068 | if (netdev->features & NETIF_F_LRO) |
1069 | lro_flush_all(&enic->lro_mgr); | 1069 | lro_flush_all(&enic->lro_mgr); |
1070 | 1070 | ||
1071 | netif_rx_complete(napi); | 1071 | napi_complete(napi); |
1072 | vnic_intr_unmask(&enic->intr[ENIC_MSIX_RQ]); | 1072 | vnic_intr_unmask(&enic->intr[ENIC_MSIX_RQ]); |
1073 | } | 1073 | } |
1074 | 1074 | ||
@@ -1112,7 +1112,7 @@ static int enic_poll_msix(struct napi_struct *napi, int budget) | |||
1112 | if (netdev->features & NETIF_F_LRO) | 1112 | if (netdev->features & NETIF_F_LRO) |
1113 | lro_flush_all(&enic->lro_mgr); | 1113 | lro_flush_all(&enic->lro_mgr); |
1114 | 1114 | ||
1115 | netif_rx_complete(napi); | 1115 | napi_complete(napi); |
1116 | vnic_intr_unmask(&enic->intr[ENIC_MSIX_RQ]); | 1116 | vnic_intr_unmask(&enic->intr[ENIC_MSIX_RQ]); |
1117 | } | 1117 | } |
1118 | 1118 | ||
diff --git a/drivers/net/epic100.c b/drivers/net/epic100.c index a539bc3163cf..b60e27dfcfa7 100644 --- a/drivers/net/epic100.c +++ b/drivers/net/epic100.c | |||
@@ -1114,9 +1114,9 @@ static irqreturn_t epic_interrupt(int irq, void *dev_instance) | |||
1114 | 1114 | ||
1115 | if ((status & EpicNapiEvent) && !ep->reschedule_in_poll) { | 1115 | if ((status & EpicNapiEvent) && !ep->reschedule_in_poll) { |
1116 | spin_lock(&ep->napi_lock); | 1116 | spin_lock(&ep->napi_lock); |
1117 | if (netif_rx_schedule_prep(&ep->napi)) { | 1117 | if (napi_schedule_prep(&ep->napi)) { |
1118 | epic_napi_irq_off(dev, ep); | 1118 | epic_napi_irq_off(dev, ep); |
1119 | __netif_rx_schedule(&ep->napi); | 1119 | __napi_schedule(&ep->napi); |
1120 | } else | 1120 | } else |
1121 | ep->reschedule_in_poll++; | 1121 | ep->reschedule_in_poll++; |
1122 | spin_unlock(&ep->napi_lock); | 1122 | spin_unlock(&ep->napi_lock); |
@@ -1293,7 +1293,7 @@ rx_action: | |||
1293 | 1293 | ||
1294 | more = ep->reschedule_in_poll; | 1294 | more = ep->reschedule_in_poll; |
1295 | if (!more) { | 1295 | if (!more) { |
1296 | __netif_rx_complete(napi); | 1296 | __napi_complete(napi); |
1297 | outl(EpicNapiEvent, ioaddr + INTSTAT); | 1297 | outl(EpicNapiEvent, ioaddr + INTSTAT); |
1298 | epic_napi_irq_on(dev, ep); | 1298 | epic_napi_irq_on(dev, ep); |
1299 | } else | 1299 | } else |
diff --git a/drivers/net/fec.c b/drivers/net/fec.c index 2769083bfe83..fe2650237e34 100644 --- a/drivers/net/fec.c +++ b/drivers/net/fec.c | |||
@@ -36,30 +36,43 @@ | |||
36 | #include <linux/spinlock.h> | 36 | #include <linux/spinlock.h> |
37 | #include <linux/workqueue.h> | 37 | #include <linux/workqueue.h> |
38 | #include <linux/bitops.h> | 38 | #include <linux/bitops.h> |
39 | #include <linux/io.h> | ||
40 | #include <linux/irq.h> | ||
41 | #include <linux/clk.h> | ||
42 | #include <linux/platform_device.h> | ||
39 | 43 | ||
40 | #include <asm/irq.h> | ||
41 | #include <asm/uaccess.h> | ||
42 | #include <asm/io.h> | ||
43 | #include <asm/pgtable.h> | ||
44 | #include <asm/cacheflush.h> | 44 | #include <asm/cacheflush.h> |
45 | 45 | ||
46 | #ifndef CONFIG_ARCH_MXC | ||
46 | #include <asm/coldfire.h> | 47 | #include <asm/coldfire.h> |
47 | #include <asm/mcfsim.h> | 48 | #include <asm/mcfsim.h> |
49 | #endif | ||
50 | |||
48 | #include "fec.h" | 51 | #include "fec.h" |
49 | 52 | ||
50 | #if defined(CONFIG_FEC2) | 53 | #ifdef CONFIG_ARCH_MXC |
51 | #define FEC_MAX_PORTS 2 | 54 | #include <mach/hardware.h> |
55 | #define FEC_ALIGNMENT 0xf | ||
52 | #else | 56 | #else |
53 | #define FEC_MAX_PORTS 1 | 57 | #define FEC_ALIGNMENT 0x3 |
54 | #endif | 58 | #endif |
55 | 59 | ||
60 | #if defined CONFIG_M5272 || defined CONFIG_M527x || defined CONFIG_M523x \ | ||
61 | || defined CONFIG_M528x || defined CONFIG_M532x || defined CONFIG_M520x | ||
62 | #define FEC_LEGACY | ||
63 | /* | ||
64 | * Define the fixed address of the FEC hardware. | ||
65 | */ | ||
56 | #if defined(CONFIG_M5272) | 66 | #if defined(CONFIG_M5272) |
57 | #define HAVE_mii_link_interrupt | 67 | #define HAVE_mii_link_interrupt |
58 | #endif | 68 | #endif |
59 | 69 | ||
60 | /* | 70 | #if defined(CONFIG_FEC2) |
61 | * Define the fixed address of the FEC hardware. | 71 | #define FEC_MAX_PORTS 2 |
62 | */ | 72 | #else |
73 | #define FEC_MAX_PORTS 1 | ||
74 | #endif | ||
75 | |||
63 | static unsigned int fec_hw[] = { | 76 | static unsigned int fec_hw[] = { |
64 | #if defined(CONFIG_M5272) | 77 | #if defined(CONFIG_M5272) |
65 | (MCF_MBAR + 0x840), | 78 | (MCF_MBAR + 0x840), |
@@ -72,8 +85,6 @@ static unsigned int fec_hw[] = { | |||
72 | (MCF_MBAR+0x30000), | 85 | (MCF_MBAR+0x30000), |
73 | #elif defined(CONFIG_M532x) | 86 | #elif defined(CONFIG_M532x) |
74 | (MCF_MBAR+0xfc030000), | 87 | (MCF_MBAR+0xfc030000), |
75 | #else | ||
76 | &(((immap_t *)IMAP_ADDR)->im_cpm.cp_fec), | ||
77 | #endif | 88 | #endif |
78 | }; | 89 | }; |
79 | 90 | ||
@@ -99,6 +110,8 @@ static unsigned char fec_mac_default[] = { | |||
99 | #define FEC_FLASHMAC 0 | 110 | #define FEC_FLASHMAC 0 |
100 | #endif | 111 | #endif |
101 | 112 | ||
113 | #endif /* FEC_LEGACY */ | ||
114 | |||
102 | /* Forward declarations of some structures to support different PHYs | 115 | /* Forward declarations of some structures to support different PHYs |
103 | */ | 116 | */ |
104 | 117 | ||
@@ -162,7 +175,7 @@ typedef struct { | |||
162 | * account when setting it. | 175 | * account when setting it. |
163 | */ | 176 | */ |
164 | #if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \ | 177 | #if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \ |
165 | defined(CONFIG_M520x) || defined(CONFIG_M532x) | 178 | defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARCH_MXC) |
166 | #define OPT_FRAME_SIZE (PKT_MAXBUF_SIZE << 16) | 179 | #define OPT_FRAME_SIZE (PKT_MAXBUF_SIZE << 16) |
167 | #else | 180 | #else |
168 | #define OPT_FRAME_SIZE 0 | 181 | #define OPT_FRAME_SIZE 0 |
@@ -182,6 +195,8 @@ struct fec_enet_private { | |||
182 | 195 | ||
183 | struct net_device *netdev; | 196 | struct net_device *netdev; |
184 | 197 | ||
198 | struct clk *clk; | ||
199 | |||
185 | /* The saved address of a sent-in-place packet/buffer, for skfree(). */ | 200 | /* The saved address of a sent-in-place packet/buffer, for skfree(). */ |
186 | unsigned char *tx_bounce[TX_RING_SIZE]; | 201 | unsigned char *tx_bounce[TX_RING_SIZE]; |
187 | struct sk_buff* tx_skbuff[TX_RING_SIZE]; | 202 | struct sk_buff* tx_skbuff[TX_RING_SIZE]; |
@@ -190,6 +205,7 @@ struct fec_enet_private { | |||
190 | 205 | ||
191 | /* CPM dual port RAM relative addresses. | 206 | /* CPM dual port RAM relative addresses. |
192 | */ | 207 | */ |
208 | dma_addr_t bd_dma; | ||
193 | cbd_t *rx_bd_base; /* Address of Rx and Tx buffers. */ | 209 | cbd_t *rx_bd_base; /* Address of Rx and Tx buffers. */ |
194 | cbd_t *tx_bd_base; | 210 | cbd_t *tx_bd_base; |
195 | cbd_t *cur_rx, *cur_tx; /* The next free ring entry */ | 211 | cbd_t *cur_rx, *cur_tx; /* The next free ring entry */ |
@@ -342,10 +358,10 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
342 | * 4-byte boundaries. Use bounce buffers to copy data | 358 | * 4-byte boundaries. Use bounce buffers to copy data |
343 | * and get it aligned. Ugh. | 359 | * and get it aligned. Ugh. |
344 | */ | 360 | */ |
345 | if (bdp->cbd_bufaddr & 0x3) { | 361 | if (bdp->cbd_bufaddr & FEC_ALIGNMENT) { |
346 | unsigned int index; | 362 | unsigned int index; |
347 | index = bdp - fep->tx_bd_base; | 363 | index = bdp - fep->tx_bd_base; |
348 | memcpy(fep->tx_bounce[index], (void *) bdp->cbd_bufaddr, bdp->cbd_datlen); | 364 | memcpy(fep->tx_bounce[index], (void *)skb->data, skb->len); |
349 | bdp->cbd_bufaddr = __pa(fep->tx_bounce[index]); | 365 | bdp->cbd_bufaddr = __pa(fep->tx_bounce[index]); |
350 | } | 366 | } |
351 | 367 | ||
@@ -359,8 +375,8 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
359 | /* Push the data cache so the CPM does not get stale memory | 375 | /* Push the data cache so the CPM does not get stale memory |
360 | * data. | 376 | * data. |
361 | */ | 377 | */ |
362 | flush_dcache_range((unsigned long)skb->data, | 378 | dma_sync_single(NULL, bdp->cbd_bufaddr, |
363 | (unsigned long)skb->data + skb->len); | 379 | bdp->cbd_datlen, DMA_TO_DEVICE); |
364 | 380 | ||
365 | /* Send it on its way. Tell FEC it's ready, interrupt when done, | 381 | /* Send it on its way. Tell FEC it's ready, interrupt when done, |
366 | * it's the last BD of the frame, and to put the CRC on the end. | 382 | * it's the last BD of the frame, and to put the CRC on the end. |
@@ -633,6 +649,9 @@ while (!((status = bdp->cbd_sc) & BD_ENET_RX_EMPTY)) { | |||
633 | dev->stats.rx_bytes += pkt_len; | 649 | dev->stats.rx_bytes += pkt_len; |
634 | data = (__u8*)__va(bdp->cbd_bufaddr); | 650 | data = (__u8*)__va(bdp->cbd_bufaddr); |
635 | 651 | ||
652 | dma_sync_single(NULL, (unsigned long)__pa(data), | ||
653 | pkt_len - 4, DMA_FROM_DEVICE); | ||
654 | |||
636 | /* This does 16 byte alignment, exactly what we need. | 655 | /* This does 16 byte alignment, exactly what we need. |
637 | * The packet length includes FCS, but we don't want to | 656 | * The packet length includes FCS, but we don't want to |
638 | * include that when passing upstream as it messes up | 657 | * include that when passing upstream as it messes up |
@@ -1114,7 +1133,7 @@ static phy_info_t const phy_info_am79c874 = { | |||
1114 | /* register definitions for the 8721 */ | 1133 | /* register definitions for the 8721 */ |
1115 | 1134 | ||
1116 | #define MII_KS8721BL_RXERCR 21 | 1135 | #define MII_KS8721BL_RXERCR 21 |
1117 | #define MII_KS8721BL_ICSR 22 | 1136 | #define MII_KS8721BL_ICSR 27 |
1118 | #define MII_KS8721BL_PHYCR 31 | 1137 | #define MII_KS8721BL_PHYCR 31 |
1119 | 1138 | ||
1120 | static phy_cmd_t const phy_cmd_ks8721bl_config[] = { | 1139 | static phy_cmd_t const phy_cmd_ks8721bl_config[] = { |
@@ -1308,10 +1327,6 @@ static void __inline__ fec_get_mac(struct net_device *dev) | |||
1308 | dev->dev_addr[ETH_ALEN-1] = fec_mac_default[ETH_ALEN-1] + fep->index; | 1327 | dev->dev_addr[ETH_ALEN-1] = fec_mac_default[ETH_ALEN-1] + fep->index; |
1309 | } | 1328 | } |
1310 | 1329 | ||
1311 | static void __inline__ fec_enable_phy_intr(void) | ||
1312 | { | ||
1313 | } | ||
1314 | |||
1315 | static void __inline__ fec_disable_phy_intr(void) | 1330 | static void __inline__ fec_disable_phy_intr(void) |
1316 | { | 1331 | { |
1317 | volatile unsigned long *icrp; | 1332 | volatile unsigned long *icrp; |
@@ -1327,17 +1342,6 @@ static void __inline__ fec_phy_ack_intr(void) | |||
1327 | *icrp = 0x0d000000; | 1342 | *icrp = 0x0d000000; |
1328 | } | 1343 | } |
1329 | 1344 | ||
1330 | static void __inline__ fec_localhw_setup(void) | ||
1331 | { | ||
1332 | } | ||
1333 | |||
1334 | /* | ||
1335 | * Do not need to make region uncached on 5272. | ||
1336 | */ | ||
1337 | static void __inline__ fec_uncache(unsigned long addr) | ||
1338 | { | ||
1339 | } | ||
1340 | |||
1341 | /* ------------------------------------------------------------------------- */ | 1345 | /* ------------------------------------------------------------------------- */ |
1342 | 1346 | ||
1343 | #elif defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) | 1347 | #elif defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) |
@@ -1477,10 +1481,6 @@ static void __inline__ fec_get_mac(struct net_device *dev) | |||
1477 | dev->dev_addr[ETH_ALEN-1] = fec_mac_default[ETH_ALEN-1] + fep->index; | 1481 | dev->dev_addr[ETH_ALEN-1] = fec_mac_default[ETH_ALEN-1] + fep->index; |
1478 | } | 1482 | } |
1479 | 1483 | ||
1480 | static void __inline__ fec_enable_phy_intr(void) | ||
1481 | { | ||
1482 | } | ||
1483 | |||
1484 | static void __inline__ fec_disable_phy_intr(void) | 1484 | static void __inline__ fec_disable_phy_intr(void) |
1485 | { | 1485 | { |
1486 | } | 1486 | } |
@@ -1489,17 +1489,6 @@ static void __inline__ fec_phy_ack_intr(void) | |||
1489 | { | 1489 | { |
1490 | } | 1490 | } |
1491 | 1491 | ||
1492 | static void __inline__ fec_localhw_setup(void) | ||
1493 | { | ||
1494 | } | ||
1495 | |||
1496 | /* | ||
1497 | * Do not need to make region uncached on 5272. | ||
1498 | */ | ||
1499 | static void __inline__ fec_uncache(unsigned long addr) | ||
1500 | { | ||
1501 | } | ||
1502 | |||
1503 | /* ------------------------------------------------------------------------- */ | 1492 | /* ------------------------------------------------------------------------- */ |
1504 | 1493 | ||
1505 | #elif defined(CONFIG_M520x) | 1494 | #elif defined(CONFIG_M520x) |
@@ -1598,10 +1587,6 @@ static void __inline__ fec_get_mac(struct net_device *dev) | |||
1598 | dev->dev_addr[ETH_ALEN-1] = fec_mac_default[ETH_ALEN-1] + fep->index; | 1587 | dev->dev_addr[ETH_ALEN-1] = fec_mac_default[ETH_ALEN-1] + fep->index; |
1599 | } | 1588 | } |
1600 | 1589 | ||
1601 | static void __inline__ fec_enable_phy_intr(void) | ||
1602 | { | ||
1603 | } | ||
1604 | |||
1605 | static void __inline__ fec_disable_phy_intr(void) | 1590 | static void __inline__ fec_disable_phy_intr(void) |
1606 | { | 1591 | { |
1607 | } | 1592 | } |
@@ -1610,14 +1595,6 @@ static void __inline__ fec_phy_ack_intr(void) | |||
1610 | { | 1595 | { |
1611 | } | 1596 | } |
1612 | 1597 | ||
1613 | static void __inline__ fec_localhw_setup(void) | ||
1614 | { | ||
1615 | } | ||
1616 | |||
1617 | static void __inline__ fec_uncache(unsigned long addr) | ||
1618 | { | ||
1619 | } | ||
1620 | |||
1621 | /* ------------------------------------------------------------------------- */ | 1598 | /* ------------------------------------------------------------------------- */ |
1622 | 1599 | ||
1623 | #elif defined(CONFIG_M532x) | 1600 | #elif defined(CONFIG_M532x) |
@@ -1737,10 +1714,6 @@ static void __inline__ fec_get_mac(struct net_device *dev) | |||
1737 | dev->dev_addr[ETH_ALEN-1] = fec_mac_default[ETH_ALEN-1] + fep->index; | 1714 | dev->dev_addr[ETH_ALEN-1] = fec_mac_default[ETH_ALEN-1] + fep->index; |
1738 | } | 1715 | } |
1739 | 1716 | ||
1740 | static void __inline__ fec_enable_phy_intr(void) | ||
1741 | { | ||
1742 | } | ||
1743 | |||
1744 | static void __inline__ fec_disable_phy_intr(void) | 1717 | static void __inline__ fec_disable_phy_intr(void) |
1745 | { | 1718 | { |
1746 | } | 1719 | } |
@@ -1749,107 +1722,6 @@ static void __inline__ fec_phy_ack_intr(void) | |||
1749 | { | 1722 | { |
1750 | } | 1723 | } |
1751 | 1724 | ||
1752 | static void __inline__ fec_localhw_setup(void) | ||
1753 | { | ||
1754 | } | ||
1755 | |||
1756 | /* | ||
1757 | * Do not need to make region uncached on 532x. | ||
1758 | */ | ||
1759 | static void __inline__ fec_uncache(unsigned long addr) | ||
1760 | { | ||
1761 | } | ||
1762 | |||
1763 | /* ------------------------------------------------------------------------- */ | ||
1764 | |||
1765 | |||
1766 | #else | ||
1767 | |||
1768 | /* | ||
1769 | * Code specific to the MPC860T setup. | ||
1770 | */ | ||
1771 | static void __inline__ fec_request_intrs(struct net_device *dev) | ||
1772 | { | ||
1773 | volatile immap_t *immap; | ||
1774 | |||
1775 | immap = (immap_t *)IMAP_ADDR; /* pointer to internal registers */ | ||
1776 | |||
1777 | if (request_8xxirq(FEC_INTERRUPT, fec_enet_interrupt, 0, "fec", dev) != 0) | ||
1778 | panic("Could not allocate FEC IRQ!"); | ||
1779 | } | ||
1780 | |||
1781 | static void __inline__ fec_get_mac(struct net_device *dev) | ||
1782 | { | ||
1783 | bd_t *bd; | ||
1784 | |||
1785 | bd = (bd_t *)__res; | ||
1786 | memcpy(dev->dev_addr, bd->bi_enetaddr, ETH_ALEN); | ||
1787 | } | ||
1788 | |||
1789 | static void __inline__ fec_set_mii(struct net_device *dev, struct fec_enet_private *fep) | ||
1790 | { | ||
1791 | extern uint _get_IMMR(void); | ||
1792 | volatile immap_t *immap; | ||
1793 | volatile fec_t *fecp; | ||
1794 | |||
1795 | fecp = fep->hwp; | ||
1796 | immap = (immap_t *)IMAP_ADDR; /* pointer to internal registers */ | ||
1797 | |||
1798 | /* Configure all of port D for MII. | ||
1799 | */ | ||
1800 | immap->im_ioport.iop_pdpar = 0x1fff; | ||
1801 | |||
1802 | /* Bits moved from Rev. D onward. | ||
1803 | */ | ||
1804 | if ((_get_IMMR() & 0xffff) < 0x0501) | ||
1805 | immap->im_ioport.iop_pddir = 0x1c58; /* Pre rev. D */ | ||
1806 | else | ||
1807 | immap->im_ioport.iop_pddir = 0x1fff; /* Rev. D and later */ | ||
1808 | |||
1809 | /* Set MII speed to 2.5 MHz | ||
1810 | */ | ||
1811 | fecp->fec_mii_speed = fep->phy_speed = | ||
1812 | ((bd->bi_busfreq * 1000000) / 2500000) & 0x7e; | ||
1813 | } | ||
1814 | |||
1815 | static void __inline__ fec_enable_phy_intr(void) | ||
1816 | { | ||
1817 | volatile fec_t *fecp; | ||
1818 | |||
1819 | fecp = fep->hwp; | ||
1820 | |||
1821 | /* Enable MII command finished interrupt | ||
1822 | */ | ||
1823 | fecp->fec_ivec = (FEC_INTERRUPT/2) << 29; | ||
1824 | } | ||
1825 | |||
1826 | static void __inline__ fec_disable_phy_intr(void) | ||
1827 | { | ||
1828 | } | ||
1829 | |||
1830 | static void __inline__ fec_phy_ack_intr(void) | ||
1831 | { | ||
1832 | } | ||
1833 | |||
1834 | static void __inline__ fec_localhw_setup(void) | ||
1835 | { | ||
1836 | volatile fec_t *fecp; | ||
1837 | |||
1838 | fecp = fep->hwp; | ||
1839 | fecp->fec_r_hash = PKT_MAXBUF_SIZE; | ||
1840 | /* Enable big endian and don't care about SDMA FC. | ||
1841 | */ | ||
1842 | fecp->fec_fun_code = 0x78000000; | ||
1843 | } | ||
1844 | |||
1845 | static void __inline__ fec_uncache(unsigned long addr) | ||
1846 | { | ||
1847 | pte_t *pte; | ||
1848 | pte = va_to_pte(mem_addr); | ||
1849 | pte_val(*pte) |= _PAGE_NO_CACHE; | ||
1850 | flush_tlb_page(init_mm.mmap, mem_addr); | ||
1851 | } | ||
1852 | |||
1853 | #endif | 1725 | #endif |
1854 | 1726 | ||
1855 | /* ------------------------------------------------------------------------- */ | 1727 | /* ------------------------------------------------------------------------- */ |
@@ -2055,7 +1927,9 @@ mii_discover_phy(uint mii_reg, struct net_device *dev) | |||
2055 | printk("FEC: No PHY device found.\n"); | 1927 | printk("FEC: No PHY device found.\n"); |
2056 | /* Disable external MII interface */ | 1928 | /* Disable external MII interface */ |
2057 | fecp->fec_mii_speed = fep->phy_speed = 0; | 1929 | fecp->fec_mii_speed = fep->phy_speed = 0; |
1930 | #ifdef FREC_LEGACY | ||
2058 | fec_disable_phy_intr(); | 1931 | fec_disable_phy_intr(); |
1932 | #endif | ||
2059 | } | 1933 | } |
2060 | } | 1934 | } |
2061 | 1935 | ||
@@ -2237,12 +2111,12 @@ fec_set_mac_address(struct net_device *dev) | |||
2237 | 2111 | ||
2238 | } | 2112 | } |
2239 | 2113 | ||
2240 | /* Initialize the FEC Ethernet on 860T (or ColdFire 5272). | ||
2241 | */ | ||
2242 | /* | 2114 | /* |
2243 | * XXX: We need to clean up on failure exits here. | 2115 | * XXX: We need to clean up on failure exits here. |
2116 | * | ||
2117 | * index is only used in legacy code | ||
2244 | */ | 2118 | */ |
2245 | int __init fec_enet_init(struct net_device *dev) | 2119 | int __init fec_enet_init(struct net_device *dev, int index) |
2246 | { | 2120 | { |
2247 | struct fec_enet_private *fep = netdev_priv(dev); | 2121 | struct fec_enet_private *fep = netdev_priv(dev); |
2248 | unsigned long mem_addr; | 2122 | unsigned long mem_addr; |
@@ -2250,15 +2124,11 @@ int __init fec_enet_init(struct net_device *dev) | |||
2250 | cbd_t *cbd_base; | 2124 | cbd_t *cbd_base; |
2251 | volatile fec_t *fecp; | 2125 | volatile fec_t *fecp; |
2252 | int i, j; | 2126 | int i, j; |
2253 | static int index = 0; | ||
2254 | |||
2255 | /* Only allow us to be probed once. */ | ||
2256 | if (index >= FEC_MAX_PORTS) | ||
2257 | return -ENXIO; | ||
2258 | 2127 | ||
2259 | /* Allocate memory for buffer descriptors. | 2128 | /* Allocate memory for buffer descriptors. |
2260 | */ | 2129 | */ |
2261 | mem_addr = __get_free_page(GFP_KERNEL); | 2130 | mem_addr = (unsigned long)dma_alloc_coherent(NULL, PAGE_SIZE, |
2131 | &fep->bd_dma, GFP_KERNEL); | ||
2262 | if (mem_addr == 0) { | 2132 | if (mem_addr == 0) { |
2263 | printk("FEC: allocate descriptor memory failed?\n"); | 2133 | printk("FEC: allocate descriptor memory failed?\n"); |
2264 | return -ENOMEM; | 2134 | return -ENOMEM; |
@@ -2269,7 +2139,7 @@ int __init fec_enet_init(struct net_device *dev) | |||
2269 | 2139 | ||
2270 | /* Create an Ethernet device instance. | 2140 | /* Create an Ethernet device instance. |
2271 | */ | 2141 | */ |
2272 | fecp = (volatile fec_t *) fec_hw[index]; | 2142 | fecp = (volatile fec_t *)dev->base_addr; |
2273 | 2143 | ||
2274 | fep->index = index; | 2144 | fep->index = index; |
2275 | fep->hwp = fecp; | 2145 | fep->hwp = fecp; |
@@ -2280,18 +2150,24 @@ int __init fec_enet_init(struct net_device *dev) | |||
2280 | fecp->fec_ecntrl = 1; | 2150 | fecp->fec_ecntrl = 1; |
2281 | udelay(10); | 2151 | udelay(10); |
2282 | 2152 | ||
2283 | /* Set the Ethernet address. If using multiple Enets on the 8xx, | 2153 | /* Set the Ethernet address */ |
2284 | * this needs some work to get unique addresses. | 2154 | #ifdef FEC_LEGACY |
2285 | * | ||
2286 | * This is our default MAC address unless the user changes | ||
2287 | * it via eth_mac_addr (our dev->set_mac_addr handler). | ||
2288 | */ | ||
2289 | fec_get_mac(dev); | 2155 | fec_get_mac(dev); |
2156 | #else | ||
2157 | { | ||
2158 | unsigned long l; | ||
2159 | l = fecp->fec_addr_low; | ||
2160 | dev->dev_addr[0] = (unsigned char)((l & 0xFF000000) >> 24); | ||
2161 | dev->dev_addr[1] = (unsigned char)((l & 0x00FF0000) >> 16); | ||
2162 | dev->dev_addr[2] = (unsigned char)((l & 0x0000FF00) >> 8); | ||
2163 | dev->dev_addr[3] = (unsigned char)((l & 0x000000FF) >> 0); | ||
2164 | l = fecp->fec_addr_high; | ||
2165 | dev->dev_addr[4] = (unsigned char)((l & 0xFF000000) >> 24); | ||
2166 | dev->dev_addr[5] = (unsigned char)((l & 0x00FF0000) >> 16); | ||
2167 | } | ||
2168 | #endif | ||
2290 | 2169 | ||
2291 | cbd_base = (cbd_t *)mem_addr; | 2170 | cbd_base = (cbd_t *)mem_addr; |
2292 | /* XXX: missing check for allocation failure */ | ||
2293 | |||
2294 | fec_uncache(mem_addr); | ||
2295 | 2171 | ||
2296 | /* Set receive and transmit descriptor base. | 2172 | /* Set receive and transmit descriptor base. |
2297 | */ | 2173 | */ |
@@ -2313,8 +2189,6 @@ int __init fec_enet_init(struct net_device *dev) | |||
2313 | mem_addr = __get_free_page(GFP_KERNEL); | 2189 | mem_addr = __get_free_page(GFP_KERNEL); |
2314 | /* XXX: missing check for allocation failure */ | 2190 | /* XXX: missing check for allocation failure */ |
2315 | 2191 | ||
2316 | fec_uncache(mem_addr); | ||
2317 | |||
2318 | /* Initialize the BD for every fragment in the page. | 2192 | /* Initialize the BD for every fragment in the page. |
2319 | */ | 2193 | */ |
2320 | for (j=0; j<FEC_ENET_RX_FRPPG; j++) { | 2194 | for (j=0; j<FEC_ENET_RX_FRPPG; j++) { |
@@ -2357,13 +2231,16 @@ int __init fec_enet_init(struct net_device *dev) | |||
2357 | 2231 | ||
2358 | /* Set receive and transmit descriptor base. | 2232 | /* Set receive and transmit descriptor base. |
2359 | */ | 2233 | */ |
2360 | fecp->fec_r_des_start = __pa((uint)(fep->rx_bd_base)); | 2234 | fecp->fec_r_des_start = fep->bd_dma; |
2361 | fecp->fec_x_des_start = __pa((uint)(fep->tx_bd_base)); | 2235 | fecp->fec_x_des_start = (unsigned long)fep->bd_dma + sizeof(cbd_t) |
2236 | * RX_RING_SIZE; | ||
2362 | 2237 | ||
2238 | #ifdef FEC_LEGACY | ||
2363 | /* Install our interrupt handlers. This varies depending on | 2239 | /* Install our interrupt handlers. This varies depending on |
2364 | * the architecture. | 2240 | * the architecture. |
2365 | */ | 2241 | */ |
2366 | fec_request_intrs(dev); | 2242 | fec_request_intrs(dev); |
2243 | #endif | ||
2367 | 2244 | ||
2368 | fecp->fec_grp_hash_table_high = 0; | 2245 | fecp->fec_grp_hash_table_high = 0; |
2369 | fecp->fec_grp_hash_table_low = 0; | 2246 | fecp->fec_grp_hash_table_low = 0; |
@@ -2375,8 +2252,6 @@ int __init fec_enet_init(struct net_device *dev) | |||
2375 | fecp->fec_hash_table_low = 0; | 2252 | fecp->fec_hash_table_low = 0; |
2376 | #endif | 2253 | #endif |
2377 | 2254 | ||
2378 | dev->base_addr = (unsigned long)fecp; | ||
2379 | |||
2380 | /* The FEC Ethernet specific entries in the device structure. */ | 2255 | /* The FEC Ethernet specific entries in the device structure. */ |
2381 | dev->open = fec_enet_open; | 2256 | dev->open = fec_enet_open; |
2382 | dev->hard_start_xmit = fec_enet_start_xmit; | 2257 | dev->hard_start_xmit = fec_enet_start_xmit; |
@@ -2390,7 +2265,20 @@ int __init fec_enet_init(struct net_device *dev) | |||
2390 | mii_free = mii_cmds; | 2265 | mii_free = mii_cmds; |
2391 | 2266 | ||
2392 | /* setup MII interface */ | 2267 | /* setup MII interface */ |
2268 | #ifdef FEC_LEGACY | ||
2393 | fec_set_mii(dev, fep); | 2269 | fec_set_mii(dev, fep); |
2270 | #else | ||
2271 | fecp->fec_r_cntrl = OPT_FRAME_SIZE | 0x04; | ||
2272 | fecp->fec_x_cntrl = 0x00; | ||
2273 | |||
2274 | /* | ||
2275 | * Set MII speed to 2.5 MHz | ||
2276 | */ | ||
2277 | fep->phy_speed = ((((clk_get_rate(fep->clk) / 2 + 4999999) | ||
2278 | / 2500000) / 2) & 0x3F) << 1; | ||
2279 | fecp->fec_mii_speed = fep->phy_speed; | ||
2280 | fec_restart(dev, 0); | ||
2281 | #endif | ||
2394 | 2282 | ||
2395 | /* Clear and enable interrupts */ | 2283 | /* Clear and enable interrupts */ |
2396 | fecp->fec_ievent = 0xffc00000; | 2284 | fecp->fec_ievent = 0xffc00000; |
@@ -2403,7 +2291,6 @@ int __init fec_enet_init(struct net_device *dev) | |||
2403 | fep->phy_addr = 0; | 2291 | fep->phy_addr = 0; |
2404 | mii_queue(dev, mk_mii_read(MII_REG_PHYIR1), mii_discover_phy); | 2292 | mii_queue(dev, mk_mii_read(MII_REG_PHYIR1), mii_discover_phy); |
2405 | 2293 | ||
2406 | index++; | ||
2407 | return 0; | 2294 | return 0; |
2408 | } | 2295 | } |
2409 | 2296 | ||
@@ -2430,7 +2317,6 @@ fec_restart(struct net_device *dev, int duplex) | |||
2430 | /* Clear any outstanding interrupt. | 2317 | /* Clear any outstanding interrupt. |
2431 | */ | 2318 | */ |
2432 | fecp->fec_ievent = 0xffc00000; | 2319 | fecp->fec_ievent = 0xffc00000; |
2433 | fec_enable_phy_intr(); | ||
2434 | 2320 | ||
2435 | /* Set station address. | 2321 | /* Set station address. |
2436 | */ | 2322 | */ |
@@ -2445,12 +2331,11 @@ fec_restart(struct net_device *dev, int duplex) | |||
2445 | */ | 2331 | */ |
2446 | fecp->fec_r_buff_size = PKT_MAXBLR_SIZE; | 2332 | fecp->fec_r_buff_size = PKT_MAXBLR_SIZE; |
2447 | 2333 | ||
2448 | fec_localhw_setup(); | ||
2449 | |||
2450 | /* Set receive and transmit descriptor base. | 2334 | /* Set receive and transmit descriptor base. |
2451 | */ | 2335 | */ |
2452 | fecp->fec_r_des_start = __pa((uint)(fep->rx_bd_base)); | 2336 | fecp->fec_r_des_start = fep->bd_dma; |
2453 | fecp->fec_x_des_start = __pa((uint)(fep->tx_bd_base)); | 2337 | fecp->fec_x_des_start = (unsigned long)fep->bd_dma + sizeof(cbd_t) |
2338 | * RX_RING_SIZE; | ||
2454 | 2339 | ||
2455 | fep->dirty_tx = fep->cur_tx = fep->tx_bd_base; | 2340 | fep->dirty_tx = fep->cur_tx = fep->tx_bd_base; |
2456 | fep->cur_rx = fep->rx_bd_base; | 2341 | fep->cur_rx = fep->rx_bd_base; |
@@ -2552,12 +2437,12 @@ fec_stop(struct net_device *dev) | |||
2552 | /* Clear outstanding MII command interrupts. | 2437 | /* Clear outstanding MII command interrupts. |
2553 | */ | 2438 | */ |
2554 | fecp->fec_ievent = FEC_ENET_MII; | 2439 | fecp->fec_ievent = FEC_ENET_MII; |
2555 | fec_enable_phy_intr(); | ||
2556 | 2440 | ||
2557 | fecp->fec_imask = FEC_ENET_MII; | 2441 | fecp->fec_imask = FEC_ENET_MII; |
2558 | fecp->fec_mii_speed = fep->phy_speed; | 2442 | fecp->fec_mii_speed = fep->phy_speed; |
2559 | } | 2443 | } |
2560 | 2444 | ||
2445 | #ifdef FEC_LEGACY | ||
2561 | static int __init fec_enet_module_init(void) | 2446 | static int __init fec_enet_module_init(void) |
2562 | { | 2447 | { |
2563 | struct net_device *dev; | 2448 | struct net_device *dev; |
@@ -2569,7 +2454,8 @@ static int __init fec_enet_module_init(void) | |||
2569 | dev = alloc_etherdev(sizeof(struct fec_enet_private)); | 2454 | dev = alloc_etherdev(sizeof(struct fec_enet_private)); |
2570 | if (!dev) | 2455 | if (!dev) |
2571 | return -ENOMEM; | 2456 | return -ENOMEM; |
2572 | err = fec_enet_init(dev); | 2457 | dev->base_addr = (unsigned long)fec_hw[i]; |
2458 | err = fec_enet_init(dev, i); | ||
2573 | if (err) { | 2459 | if (err) { |
2574 | free_netdev(dev); | 2460 | free_netdev(dev); |
2575 | continue; | 2461 | continue; |
@@ -2584,6 +2470,170 @@ static int __init fec_enet_module_init(void) | |||
2584 | } | 2470 | } |
2585 | return 0; | 2471 | return 0; |
2586 | } | 2472 | } |
2473 | #else | ||
2474 | |||
2475 | static int __devinit | ||
2476 | fec_probe(struct platform_device *pdev) | ||
2477 | { | ||
2478 | struct fec_enet_private *fep; | ||
2479 | struct net_device *ndev; | ||
2480 | int i, irq, ret = 0; | ||
2481 | struct resource *r; | ||
2482 | |||
2483 | r = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
2484 | if (!r) | ||
2485 | return -ENXIO; | ||
2486 | |||
2487 | r = request_mem_region(r->start, resource_size(r), pdev->name); | ||
2488 | if (!r) | ||
2489 | return -EBUSY; | ||
2490 | |||
2491 | /* Init network device */ | ||
2492 | ndev = alloc_etherdev(sizeof(struct fec_enet_private)); | ||
2493 | if (!ndev) | ||
2494 | return -ENOMEM; | ||
2495 | |||
2496 | SET_NETDEV_DEV(ndev, &pdev->dev); | ||
2497 | |||
2498 | /* setup board info structure */ | ||
2499 | fep = netdev_priv(ndev); | ||
2500 | memset(fep, 0, sizeof(*fep)); | ||
2501 | |||
2502 | ndev->base_addr = (unsigned long)ioremap(r->start, resource_size(r)); | ||
2503 | |||
2504 | if (!ndev->base_addr) { | ||
2505 | ret = -ENOMEM; | ||
2506 | goto failed_ioremap; | ||
2507 | } | ||
2508 | |||
2509 | platform_set_drvdata(pdev, ndev); | ||
2510 | |||
2511 | /* This device has up to three irqs on some platforms */ | ||
2512 | for (i = 0; i < 3; i++) { | ||
2513 | irq = platform_get_irq(pdev, i); | ||
2514 | if (i && irq < 0) | ||
2515 | break; | ||
2516 | ret = request_irq(irq, fec_enet_interrupt, IRQF_DISABLED, pdev->name, ndev); | ||
2517 | if (ret) { | ||
2518 | while (i >= 0) { | ||
2519 | irq = platform_get_irq(pdev, i); | ||
2520 | free_irq(irq, ndev); | ||
2521 | i--; | ||
2522 | } | ||
2523 | goto failed_irq; | ||
2524 | } | ||
2525 | } | ||
2526 | |||
2527 | fep->clk = clk_get(&pdev->dev, "fec_clk"); | ||
2528 | if (IS_ERR(fep->clk)) { | ||
2529 | ret = PTR_ERR(fep->clk); | ||
2530 | goto failed_clk; | ||
2531 | } | ||
2532 | clk_enable(fep->clk); | ||
2533 | |||
2534 | ret = fec_enet_init(ndev, 0); | ||
2535 | if (ret) | ||
2536 | goto failed_init; | ||
2537 | |||
2538 | ret = register_netdev(ndev); | ||
2539 | if (ret) | ||
2540 | goto failed_register; | ||
2541 | |||
2542 | return 0; | ||
2543 | |||
2544 | failed_register: | ||
2545 | failed_init: | ||
2546 | clk_disable(fep->clk); | ||
2547 | clk_put(fep->clk); | ||
2548 | failed_clk: | ||
2549 | for (i = 0; i < 3; i++) { | ||
2550 | irq = platform_get_irq(pdev, i); | ||
2551 | if (irq > 0) | ||
2552 | free_irq(irq, ndev); | ||
2553 | } | ||
2554 | failed_irq: | ||
2555 | iounmap((void __iomem *)ndev->base_addr); | ||
2556 | failed_ioremap: | ||
2557 | free_netdev(ndev); | ||
2558 | |||
2559 | return ret; | ||
2560 | } | ||
2561 | |||
2562 | static int __devexit | ||
2563 | fec_drv_remove(struct platform_device *pdev) | ||
2564 | { | ||
2565 | struct net_device *ndev = platform_get_drvdata(pdev); | ||
2566 | struct fec_enet_private *fep = netdev_priv(ndev); | ||
2567 | |||
2568 | platform_set_drvdata(pdev, NULL); | ||
2569 | |||
2570 | fec_stop(ndev); | ||
2571 | clk_disable(fep->clk); | ||
2572 | clk_put(fep->clk); | ||
2573 | iounmap((void __iomem *)ndev->base_addr); | ||
2574 | unregister_netdev(ndev); | ||
2575 | free_netdev(ndev); | ||
2576 | return 0; | ||
2577 | } | ||
2578 | |||
2579 | static int | ||
2580 | fec_suspend(struct platform_device *dev, pm_message_t state) | ||
2581 | { | ||
2582 | struct net_device *ndev = platform_get_drvdata(dev); | ||
2583 | struct fec_enet_private *fep; | ||
2584 | |||
2585 | if (ndev) { | ||
2586 | fep = netdev_priv(ndev); | ||
2587 | if (netif_running(ndev)) { | ||
2588 | netif_device_detach(ndev); | ||
2589 | fec_stop(ndev); | ||
2590 | } | ||
2591 | } | ||
2592 | return 0; | ||
2593 | } | ||
2594 | |||
2595 | static int | ||
2596 | fec_resume(struct platform_device *dev) | ||
2597 | { | ||
2598 | struct net_device *ndev = platform_get_drvdata(dev); | ||
2599 | |||
2600 | if (ndev) { | ||
2601 | if (netif_running(ndev)) { | ||
2602 | fec_enet_init(ndev, 0); | ||
2603 | netif_device_attach(ndev); | ||
2604 | } | ||
2605 | } | ||
2606 | return 0; | ||
2607 | } | ||
2608 | |||
2609 | static struct platform_driver fec_driver = { | ||
2610 | .driver = { | ||
2611 | .name = "fec", | ||
2612 | .owner = THIS_MODULE, | ||
2613 | }, | ||
2614 | .probe = fec_probe, | ||
2615 | .remove = __devexit_p(fec_drv_remove), | ||
2616 | .suspend = fec_suspend, | ||
2617 | .resume = fec_resume, | ||
2618 | }; | ||
2619 | |||
2620 | static int __init | ||
2621 | fec_enet_module_init(void) | ||
2622 | { | ||
2623 | printk(KERN_INFO "FEC Ethernet Driver\n"); | ||
2624 | |||
2625 | return platform_driver_register(&fec_driver); | ||
2626 | } | ||
2627 | |||
2628 | static void __exit | ||
2629 | fec_enet_cleanup(void) | ||
2630 | { | ||
2631 | platform_driver_unregister(&fec_driver); | ||
2632 | } | ||
2633 | |||
2634 | module_exit(fec_enet_cleanup); | ||
2635 | |||
2636 | #endif /* FEC_LEGACY */ | ||
2587 | 2637 | ||
2588 | module_init(fec_enet_module_init); | 2638 | module_init(fec_enet_module_init); |
2589 | 2639 | ||
diff --git a/drivers/net/fec.h b/drivers/net/fec.h index 292719daceff..76c64c92e190 100644 --- a/drivers/net/fec.h +++ b/drivers/net/fec.h | |||
@@ -14,7 +14,7 @@ | |||
14 | /****************************************************************************/ | 14 | /****************************************************************************/ |
15 | 15 | ||
16 | #if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \ | 16 | #if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \ |
17 | defined(CONFIG_M520x) || defined(CONFIG_M532x) | 17 | defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARCH_MXC) |
18 | /* | 18 | /* |
19 | * Just figures, Motorola would have to change the offsets for | 19 | * Just figures, Motorola would have to change the offsets for |
20 | * registers in the same peripheral device on different models | 20 | * registers in the same peripheral device on different models |
@@ -103,12 +103,19 @@ typedef struct fec { | |||
103 | /* | 103 | /* |
104 | * Define the buffer descriptor structure. | 104 | * Define the buffer descriptor structure. |
105 | */ | 105 | */ |
106 | #ifdef CONFIG_ARCH_MXC | ||
107 | typedef struct bufdesc { | ||
108 | unsigned short cbd_datlen; /* Data length */ | ||
109 | unsigned short cbd_sc; /* Control and status info */ | ||
110 | unsigned long cbd_bufaddr; /* Buffer address */ | ||
111 | } cbd_t; | ||
112 | #else | ||
106 | typedef struct bufdesc { | 113 | typedef struct bufdesc { |
107 | unsigned short cbd_sc; /* Control and status info */ | 114 | unsigned short cbd_sc; /* Control and status info */ |
108 | unsigned short cbd_datlen; /* Data length */ | 115 | unsigned short cbd_datlen; /* Data length */ |
109 | unsigned long cbd_bufaddr; /* Buffer address */ | 116 | unsigned long cbd_bufaddr; /* Buffer address */ |
110 | } cbd_t; | 117 | } cbd_t; |
111 | 118 | #endif | |
112 | 119 | ||
113 | /* | 120 | /* |
114 | * The following definitions courtesy of commproc.h, which where | 121 | * The following definitions courtesy of commproc.h, which where |
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c index 5b910cf63740..875509d7d86b 100644 --- a/drivers/net/forcedeth.c +++ b/drivers/net/forcedeth.c | |||
@@ -1760,7 +1760,7 @@ static void nv_do_rx_refill(unsigned long data) | |||
1760 | struct fe_priv *np = netdev_priv(dev); | 1760 | struct fe_priv *np = netdev_priv(dev); |
1761 | 1761 | ||
1762 | /* Just reschedule NAPI rx processing */ | 1762 | /* Just reschedule NAPI rx processing */ |
1763 | netif_rx_schedule(&np->napi); | 1763 | napi_schedule(&np->napi); |
1764 | } | 1764 | } |
1765 | #else | 1765 | #else |
1766 | static void nv_do_rx_refill(unsigned long data) | 1766 | static void nv_do_rx_refill(unsigned long data) |
@@ -3406,7 +3406,7 @@ static irqreturn_t nv_nic_irq(int foo, void *data) | |||
3406 | #ifdef CONFIG_FORCEDETH_NAPI | 3406 | #ifdef CONFIG_FORCEDETH_NAPI |
3407 | if (events & NVREG_IRQ_RX_ALL) { | 3407 | if (events & NVREG_IRQ_RX_ALL) { |
3408 | spin_lock(&np->lock); | 3408 | spin_lock(&np->lock); |
3409 | netif_rx_schedule(&np->napi); | 3409 | napi_schedule(&np->napi); |
3410 | 3410 | ||
3411 | /* Disable furthur receive irq's */ | 3411 | /* Disable furthur receive irq's */ |
3412 | np->irqmask &= ~NVREG_IRQ_RX_ALL; | 3412 | np->irqmask &= ~NVREG_IRQ_RX_ALL; |
@@ -3523,7 +3523,7 @@ static irqreturn_t nv_nic_irq_optimized(int foo, void *data) | |||
3523 | #ifdef CONFIG_FORCEDETH_NAPI | 3523 | #ifdef CONFIG_FORCEDETH_NAPI |
3524 | if (events & NVREG_IRQ_RX_ALL) { | 3524 | if (events & NVREG_IRQ_RX_ALL) { |
3525 | spin_lock(&np->lock); | 3525 | spin_lock(&np->lock); |
3526 | netif_rx_schedule(&np->napi); | 3526 | napi_schedule(&np->napi); |
3527 | 3527 | ||
3528 | /* Disable furthur receive irq's */ | 3528 | /* Disable furthur receive irq's */ |
3529 | np->irqmask &= ~NVREG_IRQ_RX_ALL; | 3529 | np->irqmask &= ~NVREG_IRQ_RX_ALL; |
@@ -3680,7 +3680,7 @@ static int nv_napi_poll(struct napi_struct *napi, int budget) | |||
3680 | /* re-enable receive interrupts */ | 3680 | /* re-enable receive interrupts */ |
3681 | spin_lock_irqsave(&np->lock, flags); | 3681 | spin_lock_irqsave(&np->lock, flags); |
3682 | 3682 | ||
3683 | __netif_rx_complete(napi); | 3683 | __napi_complete(napi); |
3684 | 3684 | ||
3685 | np->irqmask |= NVREG_IRQ_RX_ALL; | 3685 | np->irqmask |= NVREG_IRQ_RX_ALL; |
3686 | if (np->msi_flags & NV_MSI_X_ENABLED) | 3686 | if (np->msi_flags & NV_MSI_X_ENABLED) |
@@ -3706,7 +3706,7 @@ static irqreturn_t nv_nic_irq_rx(int foo, void *data) | |||
3706 | writel(NVREG_IRQ_RX_ALL, base + NvRegMSIXIrqStatus); | 3706 | writel(NVREG_IRQ_RX_ALL, base + NvRegMSIXIrqStatus); |
3707 | 3707 | ||
3708 | if (events) { | 3708 | if (events) { |
3709 | netif_rx_schedule(&np->napi); | 3709 | napi_schedule(&np->napi); |
3710 | /* disable receive interrupts on the nic */ | 3710 | /* disable receive interrupts on the nic */ |
3711 | writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask); | 3711 | writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask); |
3712 | pci_push(base); | 3712 | pci_push(base); |
diff --git a/drivers/net/fs_enet/fs_enet-main.c b/drivers/net/fs_enet/fs_enet-main.c index ce900e54d8d1..b037ce9857bf 100644 --- a/drivers/net/fs_enet/fs_enet-main.c +++ b/drivers/net/fs_enet/fs_enet-main.c | |||
@@ -209,7 +209,7 @@ static int fs_enet_rx_napi(struct napi_struct *napi, int budget) | |||
209 | 209 | ||
210 | if (received < budget) { | 210 | if (received < budget) { |
211 | /* done */ | 211 | /* done */ |
212 | netif_rx_complete(napi); | 212 | napi_complete(napi); |
213 | (*fep->ops->napi_enable_rx)(dev); | 213 | (*fep->ops->napi_enable_rx)(dev); |
214 | } | 214 | } |
215 | return received; | 215 | return received; |
@@ -478,7 +478,7 @@ fs_enet_interrupt(int irq, void *dev_id) | |||
478 | /* NOTE: it is possible for FCCs in NAPI mode */ | 478 | /* NOTE: it is possible for FCCs in NAPI mode */ |
479 | /* to submit a spurious interrupt while in poll */ | 479 | /* to submit a spurious interrupt while in poll */ |
480 | if (napi_ok) | 480 | if (napi_ok) |
481 | __netif_rx_schedule(&fep->napi); | 481 | __napi_schedule(&fep->napi); |
482 | } | 482 | } |
483 | } | 483 | } |
484 | 484 | ||
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c index 3f7eab42aef1..eb8302c5ba8c 100644 --- a/drivers/net/gianfar.c +++ b/drivers/net/gianfar.c | |||
@@ -141,8 +141,6 @@ void gfar_start(struct net_device *dev); | |||
141 | static void gfar_clear_exact_match(struct net_device *dev); | 141 | static void gfar_clear_exact_match(struct net_device *dev); |
142 | static void gfar_set_mac_for_addr(struct net_device *dev, int num, u8 *addr); | 142 | static void gfar_set_mac_for_addr(struct net_device *dev, int num, u8 *addr); |
143 | 143 | ||
144 | extern const struct ethtool_ops gfar_ethtool_ops; | ||
145 | |||
146 | MODULE_AUTHOR("Freescale Semiconductor, Inc"); | 144 | MODULE_AUTHOR("Freescale Semiconductor, Inc"); |
147 | MODULE_DESCRIPTION("Gianfar Ethernet Driver"); | 145 | MODULE_DESCRIPTION("Gianfar Ethernet Driver"); |
148 | MODULE_LICENSE("GPL"); | 146 | MODULE_LICENSE("GPL"); |
@@ -463,6 +461,9 @@ static int gfar_probe(struct of_device *ofdev, | |||
463 | goto register_fail; | 461 | goto register_fail; |
464 | } | 462 | } |
465 | 463 | ||
464 | device_init_wakeup(&dev->dev, | ||
465 | priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET); | ||
466 | |||
466 | /* fill out IRQ number and name fields */ | 467 | /* fill out IRQ number and name fields */ |
467 | len_devname = strlen(dev->name); | 468 | len_devname = strlen(dev->name); |
468 | strncpy(&priv->int_name_tx[0], dev->name, len_devname); | 469 | strncpy(&priv->int_name_tx[0], dev->name, len_devname); |
@@ -1200,6 +1201,8 @@ static int gfar_enet_open(struct net_device *dev) | |||
1200 | 1201 | ||
1201 | netif_start_queue(dev); | 1202 | netif_start_queue(dev); |
1202 | 1203 | ||
1204 | device_set_wakeup_enable(&dev->dev, priv->wol_en); | ||
1205 | |||
1203 | return err; | 1206 | return err; |
1204 | } | 1207 | } |
1205 | 1208 | ||
@@ -1623,9 +1626,9 @@ static void gfar_schedule_cleanup(struct net_device *dev) | |||
1623 | spin_lock_irqsave(&priv->txlock, flags); | 1626 | spin_lock_irqsave(&priv->txlock, flags); |
1624 | spin_lock(&priv->rxlock); | 1627 | spin_lock(&priv->rxlock); |
1625 | 1628 | ||
1626 | if (netif_rx_schedule_prep(&priv->napi)) { | 1629 | if (napi_schedule_prep(&priv->napi)) { |
1627 | gfar_write(&priv->regs->imask, IMASK_RTX_DISABLED); | 1630 | gfar_write(&priv->regs->imask, IMASK_RTX_DISABLED); |
1628 | __netif_rx_schedule(&priv->napi); | 1631 | __napi_schedule(&priv->napi); |
1629 | } | 1632 | } |
1630 | 1633 | ||
1631 | spin_unlock(&priv->rxlock); | 1634 | spin_unlock(&priv->rxlock); |
@@ -1882,7 +1885,7 @@ static int gfar_poll(struct napi_struct *napi, int budget) | |||
1882 | return budget; | 1885 | return budget; |
1883 | 1886 | ||
1884 | if (rx_cleaned < budget) { | 1887 | if (rx_cleaned < budget) { |
1885 | netif_rx_complete(napi); | 1888 | napi_complete(napi); |
1886 | 1889 | ||
1887 | /* Clear the halt bit in RSTAT */ | 1890 | /* Clear the halt bit in RSTAT */ |
1888 | gfar_write(&priv->regs->rstat, RSTAT_CLEAR_RHALT); | 1891 | gfar_write(&priv->regs->rstat, RSTAT_CLEAR_RHALT); |
diff --git a/drivers/net/gianfar.h b/drivers/net/gianfar.h index b1a83344acc7..7820720ceeed 100644 --- a/drivers/net/gianfar.h +++ b/drivers/net/gianfar.h | |||
@@ -830,4 +830,6 @@ int gfar_local_mdio_write(struct gfar_mii __iomem *regs, int mii_id, | |||
830 | int regnum, u16 value); | 830 | int regnum, u16 value); |
831 | int gfar_local_mdio_read(struct gfar_mii __iomem *regs, int mii_id, int regnum); | 831 | int gfar_local_mdio_read(struct gfar_mii __iomem *regs, int mii_id, int regnum); |
832 | 832 | ||
833 | extern const struct ethtool_ops gfar_ethtool_ops; | ||
834 | |||
833 | #endif /* __GIANFAR_H */ | 835 | #endif /* __GIANFAR_H */ |
diff --git a/drivers/net/gianfar_ethtool.c b/drivers/net/gianfar_ethtool.c index 59b3b5d98efe..dbf06e9313cc 100644 --- a/drivers/net/gianfar_ethtool.c +++ b/drivers/net/gianfar_ethtool.c | |||
@@ -600,6 +600,7 @@ static int gfar_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) | |||
600 | 600 | ||
601 | spin_lock_irqsave(&priv->bflock, flags); | 601 | spin_lock_irqsave(&priv->bflock, flags); |
602 | priv->wol_en = wol->wolopts & WAKE_MAGIC ? 1 : 0; | 602 | priv->wol_en = wol->wolopts & WAKE_MAGIC ? 1 : 0; |
603 | device_set_wakeup_enable(&dev->dev, priv->wol_en); | ||
603 | spin_unlock_irqrestore(&priv->bflock, flags); | 604 | spin_unlock_irqrestore(&priv->bflock, flags); |
604 | 605 | ||
605 | return 0; | 606 | return 0; |
diff --git a/drivers/net/gianfar_mii.c b/drivers/net/gianfar_mii.c index f49a426ad681..64e4679b3279 100644 --- a/drivers/net/gianfar_mii.c +++ b/drivers/net/gianfar_mii.c | |||
@@ -105,7 +105,7 @@ int gfar_local_mdio_read(struct gfar_mii __iomem *regs, int mii_id, int regnum) | |||
105 | * All PHY configuration is done through the TSEC1 MIIM regs */ | 105 | * All PHY configuration is done through the TSEC1 MIIM regs */ |
106 | int gfar_mdio_write(struct mii_bus *bus, int mii_id, int regnum, u16 value) | 106 | int gfar_mdio_write(struct mii_bus *bus, int mii_id, int regnum, u16 value) |
107 | { | 107 | { |
108 | struct gfar_mii __iomem *regs = (void __iomem *)bus->priv; | 108 | struct gfar_mii __iomem *regs = (void __force __iomem *)bus->priv; |
109 | 109 | ||
110 | /* Write to the local MII regs */ | 110 | /* Write to the local MII regs */ |
111 | return(gfar_local_mdio_write(regs, mii_id, regnum, value)); | 111 | return(gfar_local_mdio_write(regs, mii_id, regnum, value)); |
@@ -116,7 +116,7 @@ int gfar_mdio_write(struct mii_bus *bus, int mii_id, int regnum, u16 value) | |||
116 | * configuration has to be done through the TSEC1 MIIM regs */ | 116 | * configuration has to be done through the TSEC1 MIIM regs */ |
117 | int gfar_mdio_read(struct mii_bus *bus, int mii_id, int regnum) | 117 | int gfar_mdio_read(struct mii_bus *bus, int mii_id, int regnum) |
118 | { | 118 | { |
119 | struct gfar_mii __iomem *regs = (void __iomem *)bus->priv; | 119 | struct gfar_mii __iomem *regs = (void __force __iomem *)bus->priv; |
120 | 120 | ||
121 | /* Read the local MII regs */ | 121 | /* Read the local MII regs */ |
122 | return(gfar_local_mdio_read(regs, mii_id, regnum)); | 122 | return(gfar_local_mdio_read(regs, mii_id, regnum)); |
@@ -125,7 +125,7 @@ int gfar_mdio_read(struct mii_bus *bus, int mii_id, int regnum) | |||
125 | /* Reset the MIIM registers, and wait for the bus to free */ | 125 | /* Reset the MIIM registers, and wait for the bus to free */ |
126 | static int gfar_mdio_reset(struct mii_bus *bus) | 126 | static int gfar_mdio_reset(struct mii_bus *bus) |
127 | { | 127 | { |
128 | struct gfar_mii __iomem *regs = (void __iomem *)bus->priv; | 128 | struct gfar_mii __iomem *regs = (void __force __iomem *)bus->priv; |
129 | unsigned int timeout = PHY_INIT_TIMEOUT; | 129 | unsigned int timeout = PHY_INIT_TIMEOUT; |
130 | 130 | ||
131 | mutex_lock(&bus->mdio_lock); | 131 | mutex_lock(&bus->mdio_lock); |
@@ -268,8 +268,8 @@ static int gfar_mdio_probe(struct of_device *ofdev, | |||
268 | * Also, we have to cast back to struct gfar_mii because of | 268 | * Also, we have to cast back to struct gfar_mii because of |
269 | * definition weirdness done in gianfar.h. | 269 | * definition weirdness done in gianfar.h. |
270 | */ | 270 | */ |
271 | enet_regs = (struct gfar __iomem *) | 271 | enet_regs = (struct gfar __force __iomem *) |
272 | ((char *)regs - offsetof(struct gfar, gfar_mii_regs)); | 272 | ((char __force *)regs - offsetof(struct gfar, gfar_mii_regs)); |
273 | 273 | ||
274 | for_each_child_of_node(np, tbi) { | 274 | for_each_child_of_node(np, tbi) { |
275 | if (!strncmp(tbi->type, "tbi-phy", 8)) | 275 | if (!strncmp(tbi->type, "tbi-phy", 8)) |
@@ -337,7 +337,7 @@ static int gfar_mdio_remove(struct of_device *ofdev) | |||
337 | 337 | ||
338 | dev_set_drvdata(&ofdev->dev, NULL); | 338 | dev_set_drvdata(&ofdev->dev, NULL); |
339 | 339 | ||
340 | iounmap((void __iomem *)bus->priv); | 340 | iounmap((void __force __iomem *)bus->priv); |
341 | bus->priv = NULL; | 341 | bus->priv = NULL; |
342 | kfree(bus->irq); | 342 | kfree(bus->irq); |
343 | mdiobus_free(bus); | 343 | mdiobus_free(bus); |
diff --git a/drivers/net/gianfar_sysfs.c b/drivers/net/gianfar_sysfs.c index 782c20170082..74e0b4d42587 100644 --- a/drivers/net/gianfar_sysfs.c +++ b/drivers/net/gianfar_sysfs.c | |||
@@ -81,7 +81,7 @@ static ssize_t gfar_set_bd_stash(struct device *dev, | |||
81 | return count; | 81 | return count; |
82 | } | 82 | } |
83 | 83 | ||
84 | DEVICE_ATTR(bd_stash, 0644, gfar_show_bd_stash, gfar_set_bd_stash); | 84 | static DEVICE_ATTR(bd_stash, 0644, gfar_show_bd_stash, gfar_set_bd_stash); |
85 | 85 | ||
86 | static ssize_t gfar_show_rx_stash_size(struct device *dev, | 86 | static ssize_t gfar_show_rx_stash_size(struct device *dev, |
87 | struct device_attribute *attr, char *buf) | 87 | struct device_attribute *attr, char *buf) |
@@ -130,8 +130,8 @@ out: | |||
130 | return count; | 130 | return count; |
131 | } | 131 | } |
132 | 132 | ||
133 | DEVICE_ATTR(rx_stash_size, 0644, gfar_show_rx_stash_size, | 133 | static DEVICE_ATTR(rx_stash_size, 0644, gfar_show_rx_stash_size, |
134 | gfar_set_rx_stash_size); | 134 | gfar_set_rx_stash_size); |
135 | 135 | ||
136 | /* Stashing will only be enabled when rx_stash_size != 0 */ | 136 | /* Stashing will only be enabled when rx_stash_size != 0 */ |
137 | static ssize_t gfar_show_rx_stash_index(struct device *dev, | 137 | static ssize_t gfar_show_rx_stash_index(struct device *dev, |
@@ -172,8 +172,8 @@ out: | |||
172 | return count; | 172 | return count; |
173 | } | 173 | } |
174 | 174 | ||
175 | DEVICE_ATTR(rx_stash_index, 0644, gfar_show_rx_stash_index, | 175 | static DEVICE_ATTR(rx_stash_index, 0644, gfar_show_rx_stash_index, |
176 | gfar_set_rx_stash_index); | 176 | gfar_set_rx_stash_index); |
177 | 177 | ||
178 | static ssize_t gfar_show_fifo_threshold(struct device *dev, | 178 | static ssize_t gfar_show_fifo_threshold(struct device *dev, |
179 | struct device_attribute *attr, | 179 | struct device_attribute *attr, |
@@ -210,8 +210,8 @@ static ssize_t gfar_set_fifo_threshold(struct device *dev, | |||
210 | return count; | 210 | return count; |
211 | } | 211 | } |
212 | 212 | ||
213 | DEVICE_ATTR(fifo_threshold, 0644, gfar_show_fifo_threshold, | 213 | static DEVICE_ATTR(fifo_threshold, 0644, gfar_show_fifo_threshold, |
214 | gfar_set_fifo_threshold); | 214 | gfar_set_fifo_threshold); |
215 | 215 | ||
216 | static ssize_t gfar_show_fifo_starve(struct device *dev, | 216 | static ssize_t gfar_show_fifo_starve(struct device *dev, |
217 | struct device_attribute *attr, char *buf) | 217 | struct device_attribute *attr, char *buf) |
@@ -247,7 +247,8 @@ static ssize_t gfar_set_fifo_starve(struct device *dev, | |||
247 | return count; | 247 | return count; |
248 | } | 248 | } |
249 | 249 | ||
250 | DEVICE_ATTR(fifo_starve, 0644, gfar_show_fifo_starve, gfar_set_fifo_starve); | 250 | static DEVICE_ATTR(fifo_starve, 0644, gfar_show_fifo_starve, |
251 | gfar_set_fifo_starve); | ||
251 | 252 | ||
252 | static ssize_t gfar_show_fifo_starve_off(struct device *dev, | 253 | static ssize_t gfar_show_fifo_starve_off(struct device *dev, |
253 | struct device_attribute *attr, | 254 | struct device_attribute *attr, |
@@ -284,8 +285,8 @@ static ssize_t gfar_set_fifo_starve_off(struct device *dev, | |||
284 | return count; | 285 | return count; |
285 | } | 286 | } |
286 | 287 | ||
287 | DEVICE_ATTR(fifo_starve_off, 0644, gfar_show_fifo_starve_off, | 288 | static DEVICE_ATTR(fifo_starve_off, 0644, gfar_show_fifo_starve_off, |
288 | gfar_set_fifo_starve_off); | 289 | gfar_set_fifo_starve_off); |
289 | 290 | ||
290 | void gfar_init_sysfs(struct net_device *dev) | 291 | void gfar_init_sysfs(struct net_device *dev) |
291 | { | 292 | { |
diff --git a/drivers/net/hamachi.c b/drivers/net/hamachi.c index 7e8b3c59a7d6..455641f8677e 100644 --- a/drivers/net/hamachi.c +++ b/drivers/net/hamachi.c | |||
@@ -1244,7 +1244,7 @@ do { \ | |||
1244 | csum_add(sum, (ih)->saddr & 0xffff); \ | 1244 | csum_add(sum, (ih)->saddr & 0xffff); \ |
1245 | csum_add(sum, (ih)->daddr >> 16); \ | 1245 | csum_add(sum, (ih)->daddr >> 16); \ |
1246 | csum_add(sum, (ih)->daddr & 0xffff); \ | 1246 | csum_add(sum, (ih)->daddr & 0xffff); \ |
1247 | csum_add(sum, __constant_htons(IPPROTO_UDP)); \ | 1247 | csum_add(sum, cpu_to_be16(IPPROTO_UDP)); \ |
1248 | csum_add(sum, (uh)->len); \ | 1248 | csum_add(sum, (uh)->len); \ |
1249 | } while (0) | 1249 | } while (0) |
1250 | 1250 | ||
@@ -1255,7 +1255,7 @@ do { \ | |||
1255 | csum_add(sum, (ih)->saddr & 0xffff); \ | 1255 | csum_add(sum, (ih)->saddr & 0xffff); \ |
1256 | csum_add(sum, (ih)->daddr >> 16); \ | 1256 | csum_add(sum, (ih)->daddr >> 16); \ |
1257 | csum_add(sum, (ih)->daddr & 0xffff); \ | 1257 | csum_add(sum, (ih)->daddr & 0xffff); \ |
1258 | csum_add(sum, __constant_htons(IPPROTO_TCP)); \ | 1258 | csum_add(sum, cpu_to_be16(IPPROTO_TCP)); \ |
1259 | csum_add(sum, htons(len)); \ | 1259 | csum_add(sum, htons(len)); \ |
1260 | } while (0) | 1260 | } while (0) |
1261 | #endif | 1261 | #endif |
@@ -1296,7 +1296,7 @@ static int hamachi_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1296 | /* tack on checksum tag */ | 1296 | /* tack on checksum tag */ |
1297 | u32 tagval = 0; | 1297 | u32 tagval = 0; |
1298 | struct ethhdr *eh = (struct ethhdr *)skb->data; | 1298 | struct ethhdr *eh = (struct ethhdr *)skb->data; |
1299 | if (eh->h_proto == __constant_htons(ETH_P_IP)) { | 1299 | if (eh->h_proto == cpu_to_be16(ETH_P_IP)) { |
1300 | struct iphdr *ih = (struct iphdr *)((char *)eh + ETH_HLEN); | 1300 | struct iphdr *ih = (struct iphdr *)((char *)eh + ETH_HLEN); |
1301 | if (ih->protocol == IPPROTO_UDP) { | 1301 | if (ih->protocol == IPPROTO_UDP) { |
1302 | struct udphdr *uh | 1302 | struct udphdr *uh |
@@ -1605,7 +1605,7 @@ static int hamachi_rx(struct net_device *dev) | |||
1605 | */ | 1605 | */ |
1606 | if (ntohs(ih->tot_len) >= 46){ | 1606 | if (ntohs(ih->tot_len) >= 46){ |
1607 | /* don't worry about frags */ | 1607 | /* don't worry about frags */ |
1608 | if (!(ih->frag_off & __constant_htons(IP_MF|IP_OFFSET))) { | 1608 | if (!(ih->frag_off & cpu_to_be16(IP_MF|IP_OFFSET))) { |
1609 | u32 inv = *(u32 *) &buf_addr[data_size - 16]; | 1609 | u32 inv = *(u32 *) &buf_addr[data_size - 16]; |
1610 | u32 *p = (u32 *) &buf_addr[data_size - 20]; | 1610 | u32 *p = (u32 *) &buf_addr[data_size - 20]; |
1611 | register u32 crc, p_r, p_r1; | 1611 | register u32 crc, p_r, p_r1; |
diff --git a/drivers/net/hamradio/6pack.c b/drivers/net/hamradio/6pack.c index 2d4089894ec7..3da9f394b4c6 100644 --- a/drivers/net/hamradio/6pack.c +++ b/drivers/net/hamradio/6pack.c | |||
@@ -322,23 +322,25 @@ static const struct header_ops sp_header_ops = { | |||
322 | .rebuild = sp_rebuild_header, | 322 | .rebuild = sp_rebuild_header, |
323 | }; | 323 | }; |
324 | 324 | ||
325 | static const struct net_device_ops sp_netdev_ops = { | ||
326 | .ndo_open = sp_open_dev, | ||
327 | .ndo_stop = sp_close, | ||
328 | .ndo_start_xmit = sp_xmit, | ||
329 | .ndo_set_mac_address = sp_set_mac_address, | ||
330 | }; | ||
331 | |||
325 | static void sp_setup(struct net_device *dev) | 332 | static void sp_setup(struct net_device *dev) |
326 | { | 333 | { |
327 | /* Finish setting up the DEVICE info. */ | 334 | /* Finish setting up the DEVICE info. */ |
328 | dev->mtu = SIXP_MTU; | 335 | dev->netdev_ops = &sp_netdev_ops; |
329 | dev->hard_start_xmit = sp_xmit; | ||
330 | dev->open = sp_open_dev; | ||
331 | dev->destructor = free_netdev; | 336 | dev->destructor = free_netdev; |
332 | dev->stop = sp_close; | 337 | dev->mtu = SIXP_MTU; |
333 | |||
334 | dev->set_mac_address = sp_set_mac_address; | ||
335 | dev->hard_header_len = AX25_MAX_HEADER_LEN; | 338 | dev->hard_header_len = AX25_MAX_HEADER_LEN; |
336 | dev->header_ops = &sp_header_ops; | 339 | dev->header_ops = &sp_header_ops; |
337 | 340 | ||
338 | dev->addr_len = AX25_ADDR_LEN; | 341 | dev->addr_len = AX25_ADDR_LEN; |
339 | dev->type = ARPHRD_AX25; | 342 | dev->type = ARPHRD_AX25; |
340 | dev->tx_queue_len = 10; | 343 | dev->tx_queue_len = 10; |
341 | dev->tx_timeout = NULL; | ||
342 | 344 | ||
343 | /* Only activated in AX.25 mode */ | 345 | /* Only activated in AX.25 mode */ |
344 | memcpy(dev->broadcast, &ax25_bcast, AX25_ADDR_LEN); | 346 | memcpy(dev->broadcast, &ax25_bcast, AX25_ADDR_LEN); |
diff --git a/drivers/net/hamradio/baycom_epp.c b/drivers/net/hamradio/baycom_epp.c index 81a65e3a1c05..bb78c11559cd 100644 --- a/drivers/net/hamradio/baycom_epp.c +++ b/drivers/net/hamradio/baycom_epp.c | |||
@@ -203,7 +203,6 @@ struct baycom_state { | |||
203 | unsigned char buf[TXBUFFER_SIZE]; | 203 | unsigned char buf[TXBUFFER_SIZE]; |
204 | } hdlctx; | 204 | } hdlctx; |
205 | 205 | ||
206 | struct net_device_stats stats; | ||
207 | unsigned int ptt_keyed; | 206 | unsigned int ptt_keyed; |
208 | struct sk_buff *skb; /* next transmit packet */ | 207 | struct sk_buff *skb; /* next transmit packet */ |
209 | 208 | ||
@@ -423,7 +422,7 @@ static void encode_hdlc(struct baycom_state *bc) | |||
423 | bc->hdlctx.bufptr = bc->hdlctx.buf; | 422 | bc->hdlctx.bufptr = bc->hdlctx.buf; |
424 | bc->hdlctx.bufcnt = wp - bc->hdlctx.buf; | 423 | bc->hdlctx.bufcnt = wp - bc->hdlctx.buf; |
425 | dev_kfree_skb(skb); | 424 | dev_kfree_skb(skb); |
426 | bc->stats.tx_packets++; | 425 | bc->dev->stats.tx_packets++; |
427 | } | 426 | } |
428 | 427 | ||
429 | /* ---------------------------------------------------------------------- */ | 428 | /* ---------------------------------------------------------------------- */ |
@@ -547,7 +546,7 @@ static void do_rxpacket(struct net_device *dev) | |||
547 | pktlen = bc->hdlcrx.bufcnt-2+1; /* KISS kludge */ | 546 | pktlen = bc->hdlcrx.bufcnt-2+1; /* KISS kludge */ |
548 | if (!(skb = dev_alloc_skb(pktlen))) { | 547 | if (!(skb = dev_alloc_skb(pktlen))) { |
549 | printk("%s: memory squeeze, dropping packet\n", dev->name); | 548 | printk("%s: memory squeeze, dropping packet\n", dev->name); |
550 | bc->stats.rx_dropped++; | 549 | dev->stats.rx_dropped++; |
551 | return; | 550 | return; |
552 | } | 551 | } |
553 | cp = skb_put(skb, pktlen); | 552 | cp = skb_put(skb, pktlen); |
@@ -555,7 +554,7 @@ static void do_rxpacket(struct net_device *dev) | |||
555 | memcpy(cp, bc->hdlcrx.buf, pktlen - 1); | 554 | memcpy(cp, bc->hdlcrx.buf, pktlen - 1); |
556 | skb->protocol = ax25_type_trans(skb, dev); | 555 | skb->protocol = ax25_type_trans(skb, dev); |
557 | netif_rx(skb); | 556 | netif_rx(skb); |
558 | bc->stats.rx_packets++; | 557 | dev->stats.rx_packets++; |
559 | } | 558 | } |
560 | 559 | ||
561 | static int receive(struct net_device *dev, int cnt) | 560 | static int receive(struct net_device *dev, int cnt) |
@@ -802,19 +801,6 @@ static int baycom_set_mac_address(struct net_device *dev, void *addr) | |||
802 | 801 | ||
803 | /* --------------------------------------------------------------------- */ | 802 | /* --------------------------------------------------------------------- */ |
804 | 803 | ||
805 | static struct net_device_stats *baycom_get_stats(struct net_device *dev) | ||
806 | { | ||
807 | struct baycom_state *bc = netdev_priv(dev); | ||
808 | |||
809 | /* | ||
810 | * Get the current statistics. This may be called with the | ||
811 | * card open or closed. | ||
812 | */ | ||
813 | return &bc->stats; | ||
814 | } | ||
815 | |||
816 | /* --------------------------------------------------------------------- */ | ||
817 | |||
818 | static void epp_wakeup(void *handle) | 804 | static void epp_wakeup(void *handle) |
819 | { | 805 | { |
820 | struct net_device *dev = (struct net_device *)handle; | 806 | struct net_device *dev = (struct net_device *)handle; |
@@ -1065,10 +1051,10 @@ static int baycom_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) | |||
1065 | hi.data.cs.ptt = !!(bc->stat & EPP_PTTBIT); | 1051 | hi.data.cs.ptt = !!(bc->stat & EPP_PTTBIT); |
1066 | hi.data.cs.dcd = !(bc->stat & EPP_DCDBIT); | 1052 | hi.data.cs.dcd = !(bc->stat & EPP_DCDBIT); |
1067 | hi.data.cs.ptt_keyed = bc->ptt_keyed; | 1053 | hi.data.cs.ptt_keyed = bc->ptt_keyed; |
1068 | hi.data.cs.tx_packets = bc->stats.tx_packets; | 1054 | hi.data.cs.tx_packets = dev->stats.tx_packets; |
1069 | hi.data.cs.tx_errors = bc->stats.tx_errors; | 1055 | hi.data.cs.tx_errors = dev->stats.tx_errors; |
1070 | hi.data.cs.rx_packets = bc->stats.rx_packets; | 1056 | hi.data.cs.rx_packets = dev->stats.rx_packets; |
1071 | hi.data.cs.rx_errors = bc->stats.rx_errors; | 1057 | hi.data.cs.rx_errors = dev->stats.rx_errors; |
1072 | break; | 1058 | break; |
1073 | 1059 | ||
1074 | case HDLCDRVCTL_OLDGETSTAT: | 1060 | case HDLCDRVCTL_OLDGETSTAT: |
@@ -1116,6 +1102,14 @@ static int baycom_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) | |||
1116 | 1102 | ||
1117 | /* --------------------------------------------------------------------- */ | 1103 | /* --------------------------------------------------------------------- */ |
1118 | 1104 | ||
1105 | static const struct net_device_ops baycom_netdev_ops = { | ||
1106 | .ndo_open = epp_open, | ||
1107 | .ndo_stop = epp_close, | ||
1108 | .ndo_do_ioctl = baycom_ioctl, | ||
1109 | .ndo_start_xmit = baycom_send_packet, | ||
1110 | .ndo_set_mac_address = baycom_set_mac_address, | ||
1111 | }; | ||
1112 | |||
1119 | /* | 1113 | /* |
1120 | * Check for a network adaptor of this type, and return '0' if one exists. | 1114 | * Check for a network adaptor of this type, and return '0' if one exists. |
1121 | * If dev->base_addr == 0, probe all likely locations. | 1115 | * If dev->base_addr == 0, probe all likely locations. |
@@ -1143,17 +1137,12 @@ static void baycom_probe(struct net_device *dev) | |||
1143 | /* | 1137 | /* |
1144 | * initialize the device struct | 1138 | * initialize the device struct |
1145 | */ | 1139 | */ |
1146 | dev->open = epp_open; | ||
1147 | dev->stop = epp_close; | ||
1148 | dev->do_ioctl = baycom_ioctl; | ||
1149 | dev->hard_start_xmit = baycom_send_packet; | ||
1150 | dev->get_stats = baycom_get_stats; | ||
1151 | 1140 | ||
1152 | /* Fill in the fields of the device structure */ | 1141 | /* Fill in the fields of the device structure */ |
1153 | bc->skb = NULL; | 1142 | bc->skb = NULL; |
1154 | 1143 | ||
1144 | dev->netdev_ops = &baycom_netdev_ops; | ||
1155 | dev->header_ops = &ax25_header_ops; | 1145 | dev->header_ops = &ax25_header_ops; |
1156 | dev->set_mac_address = baycom_set_mac_address; | ||
1157 | 1146 | ||
1158 | dev->type = ARPHRD_AX25; /* AF_AX25 device */ | 1147 | dev->type = ARPHRD_AX25; /* AF_AX25 device */ |
1159 | dev->hard_header_len = AX25_MAX_HEADER_LEN + AX25_BPQ_HEADER_LEN; | 1148 | dev->hard_header_len = AX25_MAX_HEADER_LEN + AX25_BPQ_HEADER_LEN; |
diff --git a/drivers/net/hamradio/bpqether.c b/drivers/net/hamradio/bpqether.c index 46f8f3390e7d..2c619bc99ae7 100644 --- a/drivers/net/hamradio/bpqether.c +++ b/drivers/net/hamradio/bpqether.c | |||
@@ -97,7 +97,7 @@ static int bpq_rcv(struct sk_buff *, struct net_device *, struct packet_type *, | |||
97 | static int bpq_device_event(struct notifier_block *, unsigned long, void *); | 97 | static int bpq_device_event(struct notifier_block *, unsigned long, void *); |
98 | 98 | ||
99 | static struct packet_type bpq_packet_type = { | 99 | static struct packet_type bpq_packet_type = { |
100 | .type = __constant_htons(ETH_P_BPQ), | 100 | .type = cpu_to_be16(ETH_P_BPQ), |
101 | .func = bpq_rcv, | 101 | .func = bpq_rcv, |
102 | }; | 102 | }; |
103 | 103 | ||
@@ -110,7 +110,6 @@ struct bpqdev { | |||
110 | struct list_head bpq_list; /* list of bpq devices chain */ | 110 | struct list_head bpq_list; /* list of bpq devices chain */ |
111 | struct net_device *ethdev; /* link to ethernet device */ | 111 | struct net_device *ethdev; /* link to ethernet device */ |
112 | struct net_device *axdev; /* bpq device (bpq#) */ | 112 | struct net_device *axdev; /* bpq device (bpq#) */ |
113 | struct net_device_stats stats; /* some statistics */ | ||
114 | char dest_addr[6]; /* ether destination address */ | 113 | char dest_addr[6]; /* ether destination address */ |
115 | char acpt_addr[6]; /* accept ether frames from this address only */ | 114 | char acpt_addr[6]; /* accept ether frames from this address only */ |
116 | }; | 115 | }; |
@@ -222,8 +221,8 @@ static int bpq_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_ty | |||
222 | skb_pull(skb, 2); /* Remove the length bytes */ | 221 | skb_pull(skb, 2); /* Remove the length bytes */ |
223 | skb_trim(skb, len); /* Set the length of the data */ | 222 | skb_trim(skb, len); /* Set the length of the data */ |
224 | 223 | ||
225 | bpq->stats.rx_packets++; | 224 | dev->stats.rx_packets++; |
226 | bpq->stats.rx_bytes += len; | 225 | dev->stats.rx_bytes += len; |
227 | 226 | ||
228 | ptr = skb_push(skb, 1); | 227 | ptr = skb_push(skb, 1); |
229 | *ptr = 0; | 228 | *ptr = 0; |
@@ -292,7 +291,7 @@ static int bpq_xmit(struct sk_buff *skb, struct net_device *dev) | |||
292 | bpq = netdev_priv(dev); | 291 | bpq = netdev_priv(dev); |
293 | 292 | ||
294 | if ((dev = bpq_get_ether_dev(dev)) == NULL) { | 293 | if ((dev = bpq_get_ether_dev(dev)) == NULL) { |
295 | bpq->stats.tx_dropped++; | 294 | dev->stats.tx_dropped++; |
296 | kfree_skb(skb); | 295 | kfree_skb(skb); |
297 | return -ENODEV; | 296 | return -ENODEV; |
298 | } | 297 | } |
@@ -300,8 +299,8 @@ static int bpq_xmit(struct sk_buff *skb, struct net_device *dev) | |||
300 | skb->protocol = ax25_type_trans(skb, dev); | 299 | skb->protocol = ax25_type_trans(skb, dev); |
301 | skb_reset_network_header(skb); | 300 | skb_reset_network_header(skb); |
302 | dev_hard_header(skb, dev, ETH_P_BPQ, bpq->dest_addr, NULL, 0); | 301 | dev_hard_header(skb, dev, ETH_P_BPQ, bpq->dest_addr, NULL, 0); |
303 | bpq->stats.tx_packets++; | 302 | dev->stats.tx_packets++; |
304 | bpq->stats.tx_bytes+=skb->len; | 303 | dev->stats.tx_bytes+=skb->len; |
305 | 304 | ||
306 | dev_queue_xmit(skb); | 305 | dev_queue_xmit(skb); |
307 | netif_wake_queue(dev); | 306 | netif_wake_queue(dev); |
@@ -309,16 +308,6 @@ static int bpq_xmit(struct sk_buff *skb, struct net_device *dev) | |||
309 | } | 308 | } |
310 | 309 | ||
311 | /* | 310 | /* |
312 | * Statistics | ||
313 | */ | ||
314 | static struct net_device_stats *bpq_get_stats(struct net_device *dev) | ||
315 | { | ||
316 | struct bpqdev *bpq = netdev_priv(dev); | ||
317 | |||
318 | return &bpq->stats; | ||
319 | } | ||
320 | |||
321 | /* | ||
322 | * Set AX.25 callsign | 311 | * Set AX.25 callsign |
323 | */ | 312 | */ |
324 | static int bpq_set_mac_address(struct net_device *dev, void *addr) | 313 | static int bpq_set_mac_address(struct net_device *dev, void *addr) |
@@ -454,7 +443,7 @@ static int bpq_seq_show(struct seq_file *seq, void *v) | |||
454 | return 0; | 443 | return 0; |
455 | } | 444 | } |
456 | 445 | ||
457 | static struct seq_operations bpq_seqops = { | 446 | static const struct seq_operations bpq_seqops = { |
458 | .start = bpq_seq_start, | 447 | .start = bpq_seq_start, |
459 | .next = bpq_seq_next, | 448 | .next = bpq_seq_next, |
460 | .stop = bpq_seq_stop, | 449 | .stop = bpq_seq_stop, |
@@ -477,16 +466,17 @@ static const struct file_operations bpq_info_fops = { | |||
477 | 466 | ||
478 | /* ------------------------------------------------------------------------ */ | 467 | /* ------------------------------------------------------------------------ */ |
479 | 468 | ||
469 | static const struct net_device_ops bpq_netdev_ops = { | ||
470 | .ndo_open = bpq_open, | ||
471 | .ndo_stop = bpq_close, | ||
472 | .ndo_start_xmit = bpq_xmit, | ||
473 | .ndo_set_mac_address = bpq_set_mac_address, | ||
474 | .ndo_do_ioctl = bpq_ioctl, | ||
475 | }; | ||
480 | 476 | ||
481 | static void bpq_setup(struct net_device *dev) | 477 | static void bpq_setup(struct net_device *dev) |
482 | { | 478 | { |
483 | 479 | dev->netdev_ops = &bpq_netdev_ops; | |
484 | dev->hard_start_xmit = bpq_xmit; | ||
485 | dev->open = bpq_open; | ||
486 | dev->stop = bpq_close; | ||
487 | dev->set_mac_address = bpq_set_mac_address; | ||
488 | dev->get_stats = bpq_get_stats; | ||
489 | dev->do_ioctl = bpq_ioctl; | ||
490 | dev->destructor = free_netdev; | 480 | dev->destructor = free_netdev; |
491 | 481 | ||
492 | memcpy(dev->broadcast, &ax25_bcast, AX25_ADDR_LEN); | 482 | memcpy(dev->broadcast, &ax25_bcast, AX25_ADDR_LEN); |
diff --git a/drivers/net/hamradio/dmascc.c b/drivers/net/hamradio/dmascc.c index e67103396ed7..881bf818bb48 100644 --- a/drivers/net/hamradio/dmascc.c +++ b/drivers/net/hamradio/dmascc.c | |||
@@ -195,7 +195,7 @@ struct scc_priv { | |||
195 | int chip; | 195 | int chip; |
196 | struct net_device *dev; | 196 | struct net_device *dev; |
197 | struct scc_info *info; | 197 | struct scc_info *info; |
198 | struct net_device_stats stats; | 198 | |
199 | int channel; | 199 | int channel; |
200 | int card_base, scc_cmd, scc_data; | 200 | int card_base, scc_cmd, scc_data; |
201 | int tmr_cnt, tmr_ctrl, tmr_mode; | 201 | int tmr_cnt, tmr_ctrl, tmr_mode; |
@@ -239,7 +239,6 @@ static int scc_open(struct net_device *dev); | |||
239 | static int scc_close(struct net_device *dev); | 239 | static int scc_close(struct net_device *dev); |
240 | static int scc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd); | 240 | static int scc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd); |
241 | static int scc_send_packet(struct sk_buff *skb, struct net_device *dev); | 241 | static int scc_send_packet(struct sk_buff *skb, struct net_device *dev); |
242 | static struct net_device_stats *scc_get_stats(struct net_device *dev); | ||
243 | static int scc_set_mac_address(struct net_device *dev, void *sa); | 242 | static int scc_set_mac_address(struct net_device *dev, void *sa); |
244 | 243 | ||
245 | static inline void tx_on(struct scc_priv *priv); | 244 | static inline void tx_on(struct scc_priv *priv); |
@@ -441,6 +440,13 @@ static void __init dev_setup(struct net_device *dev) | |||
441 | memcpy(dev->dev_addr, &ax25_defaddr, AX25_ADDR_LEN); | 440 | memcpy(dev->dev_addr, &ax25_defaddr, AX25_ADDR_LEN); |
442 | } | 441 | } |
443 | 442 | ||
443 | static const struct net_device_ops scc_netdev_ops = { | ||
444 | .ndo_open = scc_open, | ||
445 | .ndo_stop = scc_close, | ||
446 | .ndo_start_xmit = scc_send_packet, | ||
447 | .ndo_do_ioctl = scc_ioctl, | ||
448 | }; | ||
449 | |||
444 | static int __init setup_adapter(int card_base, int type, int n) | 450 | static int __init setup_adapter(int card_base, int type, int n) |
445 | { | 451 | { |
446 | int i, irq, chip; | 452 | int i, irq, chip; |
@@ -576,11 +582,7 @@ static int __init setup_adapter(int card_base, int type, int n) | |||
576 | sprintf(dev->name, "dmascc%i", 2 * n + i); | 582 | sprintf(dev->name, "dmascc%i", 2 * n + i); |
577 | dev->base_addr = card_base; | 583 | dev->base_addr = card_base; |
578 | dev->irq = irq; | 584 | dev->irq = irq; |
579 | dev->open = scc_open; | 585 | dev->netdev_ops = &scc_netdev_ops; |
580 | dev->stop = scc_close; | ||
581 | dev->do_ioctl = scc_ioctl; | ||
582 | dev->hard_start_xmit = scc_send_packet; | ||
583 | dev->get_stats = scc_get_stats; | ||
584 | dev->header_ops = &ax25_header_ops; | 586 | dev->header_ops = &ax25_header_ops; |
585 | dev->set_mac_address = scc_set_mac_address; | 587 | dev->set_mac_address = scc_set_mac_address; |
586 | } | 588 | } |
@@ -961,14 +963,6 @@ static int scc_send_packet(struct sk_buff *skb, struct net_device *dev) | |||
961 | } | 963 | } |
962 | 964 | ||
963 | 965 | ||
964 | static struct net_device_stats *scc_get_stats(struct net_device *dev) | ||
965 | { | ||
966 | struct scc_priv *priv = dev->ml_priv; | ||
967 | |||
968 | return &priv->stats; | ||
969 | } | ||
970 | |||
971 | |||
972 | static int scc_set_mac_address(struct net_device *dev, void *sa) | 966 | static int scc_set_mac_address(struct net_device *dev, void *sa) |
973 | { | 967 | { |
974 | memcpy(dev->dev_addr, ((struct sockaddr *) sa)->sa_data, | 968 | memcpy(dev->dev_addr, ((struct sockaddr *) sa)->sa_data, |
@@ -1216,17 +1210,17 @@ static void special_condition(struct scc_priv *priv, int rc) | |||
1216 | } | 1210 | } |
1217 | if (priv->rx_over) { | 1211 | if (priv->rx_over) { |
1218 | /* We had an overrun */ | 1212 | /* We had an overrun */ |
1219 | priv->stats.rx_errors++; | 1213 | priv->dev->stats.rx_errors++; |
1220 | if (priv->rx_over == 2) | 1214 | if (priv->rx_over == 2) |
1221 | priv->stats.rx_length_errors++; | 1215 | priv->dev->stats.rx_length_errors++; |
1222 | else | 1216 | else |
1223 | priv->stats.rx_fifo_errors++; | 1217 | priv->dev->stats.rx_fifo_errors++; |
1224 | priv->rx_over = 0; | 1218 | priv->rx_over = 0; |
1225 | } else if (rc & CRC_ERR) { | 1219 | } else if (rc & CRC_ERR) { |
1226 | /* Count invalid CRC only if packet length >= minimum */ | 1220 | /* Count invalid CRC only if packet length >= minimum */ |
1227 | if (cb >= 15) { | 1221 | if (cb >= 15) { |
1228 | priv->stats.rx_errors++; | 1222 | priv->dev->stats.rx_errors++; |
1229 | priv->stats.rx_crc_errors++; | 1223 | priv->dev->stats.rx_crc_errors++; |
1230 | } | 1224 | } |
1231 | } else { | 1225 | } else { |
1232 | if (cb >= 15) { | 1226 | if (cb >= 15) { |
@@ -1239,8 +1233,8 @@ static void special_condition(struct scc_priv *priv, int rc) | |||
1239 | priv->rx_count++; | 1233 | priv->rx_count++; |
1240 | schedule_work(&priv->rx_work); | 1234 | schedule_work(&priv->rx_work); |
1241 | } else { | 1235 | } else { |
1242 | priv->stats.rx_errors++; | 1236 | priv->dev->stats.rx_errors++; |
1243 | priv->stats.rx_over_errors++; | 1237 | priv->dev->stats.rx_over_errors++; |
1244 | } | 1238 | } |
1245 | } | 1239 | } |
1246 | } | 1240 | } |
@@ -1275,7 +1269,7 @@ static void rx_bh(struct work_struct *ugli_api) | |||
1275 | skb = dev_alloc_skb(cb + 1); | 1269 | skb = dev_alloc_skb(cb + 1); |
1276 | if (skb == NULL) { | 1270 | if (skb == NULL) { |
1277 | /* Drop packet */ | 1271 | /* Drop packet */ |
1278 | priv->stats.rx_dropped++; | 1272 | priv->dev->stats.rx_dropped++; |
1279 | } else { | 1273 | } else { |
1280 | /* Fill buffer */ | 1274 | /* Fill buffer */ |
1281 | data = skb_put(skb, cb + 1); | 1275 | data = skb_put(skb, cb + 1); |
@@ -1283,8 +1277,8 @@ static void rx_bh(struct work_struct *ugli_api) | |||
1283 | memcpy(&data[1], priv->rx_buf[i], cb); | 1277 | memcpy(&data[1], priv->rx_buf[i], cb); |
1284 | skb->protocol = ax25_type_trans(skb, priv->dev); | 1278 | skb->protocol = ax25_type_trans(skb, priv->dev); |
1285 | netif_rx(skb); | 1279 | netif_rx(skb); |
1286 | priv->stats.rx_packets++; | 1280 | priv->dev->stats.rx_packets++; |
1287 | priv->stats.rx_bytes += cb; | 1281 | priv->dev->stats.rx_bytes += cb; |
1288 | } | 1282 | } |
1289 | spin_lock_irqsave(&priv->ring_lock, flags); | 1283 | spin_lock_irqsave(&priv->ring_lock, flags); |
1290 | /* Move tail */ | 1284 | /* Move tail */ |
@@ -1351,15 +1345,15 @@ static void es_isr(struct scc_priv *priv) | |||
1351 | write_scc(priv, R1, EXT_INT_ENAB | WT_FN_RDYFN); | 1345 | write_scc(priv, R1, EXT_INT_ENAB | WT_FN_RDYFN); |
1352 | if (res) { | 1346 | if (res) { |
1353 | /* Update packet statistics */ | 1347 | /* Update packet statistics */ |
1354 | priv->stats.tx_errors++; | 1348 | priv->dev->stats.tx_errors++; |
1355 | priv->stats.tx_fifo_errors++; | 1349 | priv->dev->stats.tx_fifo_errors++; |
1356 | /* Other underrun interrupts may already be waiting */ | 1350 | /* Other underrun interrupts may already be waiting */ |
1357 | write_scc(priv, R0, RES_EXT_INT); | 1351 | write_scc(priv, R0, RES_EXT_INT); |
1358 | write_scc(priv, R0, RES_EXT_INT); | 1352 | write_scc(priv, R0, RES_EXT_INT); |
1359 | } else { | 1353 | } else { |
1360 | /* Update packet statistics */ | 1354 | /* Update packet statistics */ |
1361 | priv->stats.tx_packets++; | 1355 | priv->dev->stats.tx_packets++; |
1362 | priv->stats.tx_bytes += priv->tx_len[i]; | 1356 | priv->dev->stats.tx_bytes += priv->tx_len[i]; |
1363 | /* Remove frame from FIFO */ | 1357 | /* Remove frame from FIFO */ |
1364 | priv->tx_tail = (i + 1) % NUM_TX_BUF; | 1358 | priv->tx_tail = (i + 1) % NUM_TX_BUF; |
1365 | priv->tx_count--; | 1359 | priv->tx_count--; |
@@ -1425,7 +1419,7 @@ static void tm_isr(struct scc_priv *priv) | |||
1425 | write_scc(priv, R15, DCDIE); | 1419 | write_scc(priv, R15, DCDIE); |
1426 | priv->rr0 = read_scc(priv, R0); | 1420 | priv->rr0 = read_scc(priv, R0); |
1427 | if (priv->rr0 & DCD) { | 1421 | if (priv->rr0 & DCD) { |
1428 | priv->stats.collisions++; | 1422 | priv->dev->stats.collisions++; |
1429 | rx_on(priv); | 1423 | rx_on(priv); |
1430 | priv->state = RX_ON; | 1424 | priv->state = RX_ON; |
1431 | } else { | 1425 | } else { |
diff --git a/drivers/net/hamradio/hdlcdrv.c b/drivers/net/hamradio/hdlcdrv.c index 8eba61a1d4ab..61de56e45eed 100644 --- a/drivers/net/hamradio/hdlcdrv.c +++ b/drivers/net/hamradio/hdlcdrv.c | |||
@@ -154,7 +154,7 @@ static void hdlc_rx_flag(struct net_device *dev, struct hdlcdrv_state *s) | |||
154 | pkt_len = s->hdlcrx.len - 2 + 1; /* KISS kludge */ | 154 | pkt_len = s->hdlcrx.len - 2 + 1; /* KISS kludge */ |
155 | if (!(skb = dev_alloc_skb(pkt_len))) { | 155 | if (!(skb = dev_alloc_skb(pkt_len))) { |
156 | printk("%s: memory squeeze, dropping packet\n", dev->name); | 156 | printk("%s: memory squeeze, dropping packet\n", dev->name); |
157 | s->stats.rx_dropped++; | 157 | dev->stats.rx_dropped++; |
158 | return; | 158 | return; |
159 | } | 159 | } |
160 | cp = skb_put(skb, pkt_len); | 160 | cp = skb_put(skb, pkt_len); |
@@ -162,7 +162,7 @@ static void hdlc_rx_flag(struct net_device *dev, struct hdlcdrv_state *s) | |||
162 | memcpy(cp, s->hdlcrx.buffer, pkt_len - 1); | 162 | memcpy(cp, s->hdlcrx.buffer, pkt_len - 1); |
163 | skb->protocol = ax25_type_trans(skb, dev); | 163 | skb->protocol = ax25_type_trans(skb, dev); |
164 | netif_rx(skb); | 164 | netif_rx(skb); |
165 | s->stats.rx_packets++; | 165 | dev->stats.rx_packets++; |
166 | } | 166 | } |
167 | 167 | ||
168 | void hdlcdrv_receiver(struct net_device *dev, struct hdlcdrv_state *s) | 168 | void hdlcdrv_receiver(struct net_device *dev, struct hdlcdrv_state *s) |
@@ -326,7 +326,7 @@ void hdlcdrv_transmitter(struct net_device *dev, struct hdlcdrv_state *s) | |||
326 | s->hdlctx.len = pkt_len+2; /* the appended CRC */ | 326 | s->hdlctx.len = pkt_len+2; /* the appended CRC */ |
327 | s->hdlctx.tx_state = 2; | 327 | s->hdlctx.tx_state = 2; |
328 | s->hdlctx.bitstream = 0; | 328 | s->hdlctx.bitstream = 0; |
329 | s->stats.tx_packets++; | 329 | dev->stats.tx_packets++; |
330 | break; | 330 | break; |
331 | case 2: | 331 | case 2: |
332 | if (!s->hdlctx.len) { | 332 | if (!s->hdlctx.len) { |
@@ -427,19 +427,6 @@ static int hdlcdrv_set_mac_address(struct net_device *dev, void *addr) | |||
427 | } | 427 | } |
428 | 428 | ||
429 | /* --------------------------------------------------------------------- */ | 429 | /* --------------------------------------------------------------------- */ |
430 | |||
431 | static struct net_device_stats *hdlcdrv_get_stats(struct net_device *dev) | ||
432 | { | ||
433 | struct hdlcdrv_state *sm = netdev_priv(dev); | ||
434 | |||
435 | /* | ||
436 | * Get the current statistics. This may be called with the | ||
437 | * card open or closed. | ||
438 | */ | ||
439 | return &sm->stats; | ||
440 | } | ||
441 | |||
442 | /* --------------------------------------------------------------------- */ | ||
443 | /* | 430 | /* |
444 | * Open/initialize the board. This is called (in the current kernel) | 431 | * Open/initialize the board. This is called (in the current kernel) |
445 | * sometime after booting when the 'ifconfig' program is run. | 432 | * sometime after booting when the 'ifconfig' program is run. |
@@ -568,10 +555,10 @@ static int hdlcdrv_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) | |||
568 | bi.data.cs.ptt = hdlcdrv_ptt(s); | 555 | bi.data.cs.ptt = hdlcdrv_ptt(s); |
569 | bi.data.cs.dcd = s->hdlcrx.dcd; | 556 | bi.data.cs.dcd = s->hdlcrx.dcd; |
570 | bi.data.cs.ptt_keyed = s->ptt_keyed; | 557 | bi.data.cs.ptt_keyed = s->ptt_keyed; |
571 | bi.data.cs.tx_packets = s->stats.tx_packets; | 558 | bi.data.cs.tx_packets = dev->stats.tx_packets; |
572 | bi.data.cs.tx_errors = s->stats.tx_errors; | 559 | bi.data.cs.tx_errors = dev->stats.tx_errors; |
573 | bi.data.cs.rx_packets = s->stats.rx_packets; | 560 | bi.data.cs.rx_packets = dev->stats.rx_packets; |
574 | bi.data.cs.rx_errors = s->stats.rx_errors; | 561 | bi.data.cs.rx_errors = dev->stats.rx_errors; |
575 | break; | 562 | break; |
576 | 563 | ||
577 | case HDLCDRVCTL_OLDGETSTAT: | 564 | case HDLCDRVCTL_OLDGETSTAT: |
@@ -630,6 +617,14 @@ static int hdlcdrv_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) | |||
630 | 617 | ||
631 | /* --------------------------------------------------------------------- */ | 618 | /* --------------------------------------------------------------------- */ |
632 | 619 | ||
620 | static const struct net_device_ops hdlcdrv_netdev = { | ||
621 | .ndo_open = hdlcdrv_open, | ||
622 | .ndo_stop = hdlcdrv_close, | ||
623 | .ndo_start_xmit = hdlcdrv_send_packet, | ||
624 | .ndo_do_ioctl = hdlcdrv_ioctl, | ||
625 | .ndo_set_mac_address = hdlcdrv_set_mac_address, | ||
626 | }; | ||
627 | |||
633 | /* | 628 | /* |
634 | * Initialize fields in hdlcdrv | 629 | * Initialize fields in hdlcdrv |
635 | */ | 630 | */ |
@@ -669,21 +664,13 @@ static void hdlcdrv_setup(struct net_device *dev) | |||
669 | s->bitbuf_hdlc.shreg = 0x80; | 664 | s->bitbuf_hdlc.shreg = 0x80; |
670 | #endif /* HDLCDRV_DEBUG */ | 665 | #endif /* HDLCDRV_DEBUG */ |
671 | 666 | ||
672 | /* | ||
673 | * initialize the device struct | ||
674 | */ | ||
675 | dev->open = hdlcdrv_open; | ||
676 | dev->stop = hdlcdrv_close; | ||
677 | dev->do_ioctl = hdlcdrv_ioctl; | ||
678 | dev->hard_start_xmit = hdlcdrv_send_packet; | ||
679 | dev->get_stats = hdlcdrv_get_stats; | ||
680 | 667 | ||
681 | /* Fill in the fields of the device structure */ | 668 | /* Fill in the fields of the device structure */ |
682 | 669 | ||
683 | s->skb = NULL; | 670 | s->skb = NULL; |
684 | 671 | ||
672 | dev->netdev_ops = &hdlcdrv_netdev; | ||
685 | dev->header_ops = &ax25_header_ops; | 673 | dev->header_ops = &ax25_header_ops; |
686 | dev->set_mac_address = hdlcdrv_set_mac_address; | ||
687 | 674 | ||
688 | dev->type = ARPHRD_AX25; /* AF_AX25 device */ | 675 | dev->type = ARPHRD_AX25; /* AF_AX25 device */ |
689 | dev->hard_header_len = AX25_MAX_HEADER_LEN + AX25_BPQ_HEADER_LEN; | 676 | dev->hard_header_len = AX25_MAX_HEADER_LEN + AX25_BPQ_HEADER_LEN; |
diff --git a/drivers/net/hamradio/mkiss.c b/drivers/net/hamradio/mkiss.c index bbdb311b8420..ed5b37d43334 100644 --- a/drivers/net/hamradio/mkiss.c +++ b/drivers/net/hamradio/mkiss.c | |||
@@ -59,8 +59,6 @@ struct mkiss { | |||
59 | unsigned char *xhead; /* pointer to next byte to XMIT */ | 59 | unsigned char *xhead; /* pointer to next byte to XMIT */ |
60 | int xleft; /* bytes left in XMIT queue */ | 60 | int xleft; /* bytes left in XMIT queue */ |
61 | 61 | ||
62 | struct net_device_stats stats; | ||
63 | |||
64 | /* Detailed SLIP statistics. */ | 62 | /* Detailed SLIP statistics. */ |
65 | int mtu; /* Our mtu (to spot changes!) */ | 63 | int mtu; /* Our mtu (to spot changes!) */ |
66 | int buffsize; /* Max buffers sizes */ | 64 | int buffsize; /* Max buffers sizes */ |
@@ -253,7 +251,7 @@ static void ax_bump(struct mkiss *ax) | |||
253 | if (ax->rbuff[0] > 0x0f) { | 251 | if (ax->rbuff[0] > 0x0f) { |
254 | if (ax->rbuff[0] & 0x80) { | 252 | if (ax->rbuff[0] & 0x80) { |
255 | if (check_crc_16(ax->rbuff, ax->rcount) < 0) { | 253 | if (check_crc_16(ax->rbuff, ax->rcount) < 0) { |
256 | ax->stats.rx_errors++; | 254 | ax->dev->stats.rx_errors++; |
257 | spin_unlock_bh(&ax->buflock); | 255 | spin_unlock_bh(&ax->buflock); |
258 | 256 | ||
259 | return; | 257 | return; |
@@ -268,7 +266,7 @@ static void ax_bump(struct mkiss *ax) | |||
268 | *ax->rbuff &= ~0x80; | 266 | *ax->rbuff &= ~0x80; |
269 | } else if (ax->rbuff[0] & 0x20) { | 267 | } else if (ax->rbuff[0] & 0x20) { |
270 | if (check_crc_flex(ax->rbuff, ax->rcount) < 0) { | 268 | if (check_crc_flex(ax->rbuff, ax->rcount) < 0) { |
271 | ax->stats.rx_errors++; | 269 | ax->dev->stats.rx_errors++; |
272 | spin_unlock_bh(&ax->buflock); | 270 | spin_unlock_bh(&ax->buflock); |
273 | return; | 271 | return; |
274 | } | 272 | } |
@@ -295,7 +293,7 @@ static void ax_bump(struct mkiss *ax) | |||
295 | if ((skb = dev_alloc_skb(count)) == NULL) { | 293 | if ((skb = dev_alloc_skb(count)) == NULL) { |
296 | printk(KERN_ERR "mkiss: %s: memory squeeze, dropping packet.\n", | 294 | printk(KERN_ERR "mkiss: %s: memory squeeze, dropping packet.\n", |
297 | ax->dev->name); | 295 | ax->dev->name); |
298 | ax->stats.rx_dropped++; | 296 | ax->dev->stats.rx_dropped++; |
299 | spin_unlock_bh(&ax->buflock); | 297 | spin_unlock_bh(&ax->buflock); |
300 | return; | 298 | return; |
301 | } | 299 | } |
@@ -303,8 +301,8 @@ static void ax_bump(struct mkiss *ax) | |||
303 | memcpy(skb_put(skb,count), ax->rbuff, count); | 301 | memcpy(skb_put(skb,count), ax->rbuff, count); |
304 | skb->protocol = ax25_type_trans(skb, ax->dev); | 302 | skb->protocol = ax25_type_trans(skb, ax->dev); |
305 | netif_rx(skb); | 303 | netif_rx(skb); |
306 | ax->stats.rx_packets++; | 304 | ax->dev->stats.rx_packets++; |
307 | ax->stats.rx_bytes += count; | 305 | ax->dev->stats.rx_bytes += count; |
308 | spin_unlock_bh(&ax->buflock); | 306 | spin_unlock_bh(&ax->buflock); |
309 | } | 307 | } |
310 | 308 | ||
@@ -344,7 +342,7 @@ static void kiss_unesc(struct mkiss *ax, unsigned char s) | |||
344 | return; | 342 | return; |
345 | } | 343 | } |
346 | 344 | ||
347 | ax->stats.rx_over_errors++; | 345 | ax->dev->stats.rx_over_errors++; |
348 | set_bit(AXF_ERROR, &ax->flags); | 346 | set_bit(AXF_ERROR, &ax->flags); |
349 | } | 347 | } |
350 | spin_unlock_bh(&ax->buflock); | 348 | spin_unlock_bh(&ax->buflock); |
@@ -406,7 +404,7 @@ static void ax_changedmtu(struct mkiss *ax) | |||
406 | memcpy(ax->xbuff, ax->xhead, ax->xleft); | 404 | memcpy(ax->xbuff, ax->xhead, ax->xleft); |
407 | } else { | 405 | } else { |
408 | ax->xleft = 0; | 406 | ax->xleft = 0; |
409 | ax->stats.tx_dropped++; | 407 | dev->stats.tx_dropped++; |
410 | } | 408 | } |
411 | } | 409 | } |
412 | 410 | ||
@@ -417,7 +415,7 @@ static void ax_changedmtu(struct mkiss *ax) | |||
417 | memcpy(ax->rbuff, orbuff, ax->rcount); | 415 | memcpy(ax->rbuff, orbuff, ax->rcount); |
418 | } else { | 416 | } else { |
419 | ax->rcount = 0; | 417 | ax->rcount = 0; |
420 | ax->stats.rx_over_errors++; | 418 | dev->stats.rx_over_errors++; |
421 | set_bit(AXF_ERROR, &ax->flags); | 419 | set_bit(AXF_ERROR, &ax->flags); |
422 | } | 420 | } |
423 | } | 421 | } |
@@ -444,7 +442,7 @@ static void ax_encaps(struct net_device *dev, unsigned char *icp, int len) | |||
444 | if (len > ax->mtu) { /* Sigh, shouldn't occur BUT ... */ | 442 | if (len > ax->mtu) { /* Sigh, shouldn't occur BUT ... */ |
445 | len = ax->mtu; | 443 | len = ax->mtu; |
446 | printk(KERN_ERR "mkiss: %s: truncating oversized transmit packet!\n", ax->dev->name); | 444 | printk(KERN_ERR "mkiss: %s: truncating oversized transmit packet!\n", ax->dev->name); |
447 | ax->stats.tx_dropped++; | 445 | dev->stats.tx_dropped++; |
448 | netif_start_queue(dev); | 446 | netif_start_queue(dev); |
449 | return; | 447 | return; |
450 | } | 448 | } |
@@ -518,8 +516,8 @@ static void ax_encaps(struct net_device *dev, unsigned char *icp, int len) | |||
518 | 516 | ||
519 | set_bit(TTY_DO_WRITE_WAKEUP, &ax->tty->flags); | 517 | set_bit(TTY_DO_WRITE_WAKEUP, &ax->tty->flags); |
520 | actual = ax->tty->ops->write(ax->tty, ax->xbuff, count); | 518 | actual = ax->tty->ops->write(ax->tty, ax->xbuff, count); |
521 | ax->stats.tx_packets++; | 519 | dev->stats.tx_packets++; |
522 | ax->stats.tx_bytes += actual; | 520 | dev->stats.tx_bytes += actual; |
523 | 521 | ||
524 | ax->dev->trans_start = jiffies; | 522 | ax->dev->trans_start = jiffies; |
525 | ax->xleft = count - actual; | 523 | ax->xleft = count - actual; |
@@ -664,32 +662,28 @@ static int ax_close(struct net_device *dev) | |||
664 | return 0; | 662 | return 0; |
665 | } | 663 | } |
666 | 664 | ||
667 | static struct net_device_stats *ax_get_stats(struct net_device *dev) | ||
668 | { | ||
669 | struct mkiss *ax = netdev_priv(dev); | ||
670 | |||
671 | return &ax->stats; | ||
672 | } | ||
673 | |||
674 | static const struct header_ops ax_header_ops = { | 665 | static const struct header_ops ax_header_ops = { |
675 | .create = ax_header, | 666 | .create = ax_header, |
676 | .rebuild = ax_rebuild_header, | 667 | .rebuild = ax_rebuild_header, |
677 | }; | 668 | }; |
678 | 669 | ||
670 | static const struct net_device_ops ax_netdev_ops = { | ||
671 | .ndo_open = ax_open_dev, | ||
672 | .ndo_stop = ax_close, | ||
673 | .ndo_start_xmit = ax_xmit, | ||
674 | .ndo_set_mac_address = ax_set_mac_address, | ||
675 | }; | ||
676 | |||
679 | static void ax_setup(struct net_device *dev) | 677 | static void ax_setup(struct net_device *dev) |
680 | { | 678 | { |
681 | /* Finish setting up the DEVICE info. */ | 679 | /* Finish setting up the DEVICE info. */ |
682 | dev->mtu = AX_MTU; | 680 | dev->mtu = AX_MTU; |
683 | dev->hard_start_xmit = ax_xmit; | ||
684 | dev->open = ax_open_dev; | ||
685 | dev->stop = ax_close; | ||
686 | dev->get_stats = ax_get_stats; | ||
687 | dev->set_mac_address = ax_set_mac_address; | ||
688 | dev->hard_header_len = 0; | 681 | dev->hard_header_len = 0; |
689 | dev->addr_len = 0; | 682 | dev->addr_len = 0; |
690 | dev->type = ARPHRD_AX25; | 683 | dev->type = ARPHRD_AX25; |
691 | dev->tx_queue_len = 10; | 684 | dev->tx_queue_len = 10; |
692 | dev->header_ops = &ax_header_ops; | 685 | dev->header_ops = &ax_header_ops; |
686 | dev->netdev_ops = &ax_netdev_ops; | ||
693 | 687 | ||
694 | 688 | ||
695 | memcpy(dev->broadcast, &ax25_bcast, AX25_ADDR_LEN); | 689 | memcpy(dev->broadcast, &ax25_bcast, AX25_ADDR_LEN); |
@@ -929,7 +923,7 @@ static void mkiss_receive_buf(struct tty_struct *tty, const unsigned char *cp, | |||
929 | while (count--) { | 923 | while (count--) { |
930 | if (fp != NULL && *fp++) { | 924 | if (fp != NULL && *fp++) { |
931 | if (!test_and_set_bit(AXF_ERROR, &ax->flags)) | 925 | if (!test_and_set_bit(AXF_ERROR, &ax->flags)) |
932 | ax->stats.rx_errors++; | 926 | ax->dev->stats.rx_errors++; |
933 | cp++; | 927 | cp++; |
934 | continue; | 928 | continue; |
935 | } | 929 | } |
diff --git a/drivers/net/hamradio/scc.c b/drivers/net/hamradio/scc.c index c011af7088ea..2acb18f06972 100644 --- a/drivers/net/hamradio/scc.c +++ b/drivers/net/hamradio/scc.c | |||
@@ -1542,23 +1542,24 @@ static int scc_net_alloc(const char *name, struct scc_channel *scc) | |||
1542 | /* * Network driver methods * */ | 1542 | /* * Network driver methods * */ |
1543 | /* ******************************************************************** */ | 1543 | /* ******************************************************************** */ |
1544 | 1544 | ||
1545 | static const struct net_device_ops scc_netdev_ops = { | ||
1546 | .ndo_open = scc_net_open, | ||
1547 | .ndo_stop = scc_net_close, | ||
1548 | .ndo_start_xmit = scc_net_tx, | ||
1549 | .ndo_set_mac_address = scc_net_set_mac_address, | ||
1550 | .ndo_get_stats = scc_net_get_stats, | ||
1551 | .ndo_do_ioctl = scc_net_ioctl, | ||
1552 | }; | ||
1553 | |||
1545 | /* ----> Initialize device <----- */ | 1554 | /* ----> Initialize device <----- */ |
1546 | 1555 | ||
1547 | static void scc_net_setup(struct net_device *dev) | 1556 | static void scc_net_setup(struct net_device *dev) |
1548 | { | 1557 | { |
1549 | dev->tx_queue_len = 16; /* should be enough... */ | 1558 | dev->tx_queue_len = 16; /* should be enough... */ |
1550 | 1559 | ||
1551 | dev->open = scc_net_open; | 1560 | dev->netdev_ops = &scc_netdev_ops; |
1552 | dev->stop = scc_net_close; | ||
1553 | |||
1554 | dev->hard_start_xmit = scc_net_tx; | ||
1555 | dev->header_ops = &ax25_header_ops; | 1561 | dev->header_ops = &ax25_header_ops; |
1556 | 1562 | ||
1557 | dev->set_mac_address = scc_net_set_mac_address; | ||
1558 | dev->get_stats = scc_net_get_stats; | ||
1559 | dev->do_ioctl = scc_net_ioctl; | ||
1560 | dev->tx_timeout = NULL; | ||
1561 | |||
1562 | memcpy(dev->broadcast, &ax25_bcast, AX25_ADDR_LEN); | 1563 | memcpy(dev->broadcast, &ax25_bcast, AX25_ADDR_LEN); |
1563 | memcpy(dev->dev_addr, &ax25_defaddr, AX25_ADDR_LEN); | 1564 | memcpy(dev->dev_addr, &ax25_defaddr, AX25_ADDR_LEN); |
1564 | 1565 | ||
@@ -2073,7 +2074,7 @@ static int scc_net_seq_show(struct seq_file *seq, void *v) | |||
2073 | return 0; | 2074 | return 0; |
2074 | } | 2075 | } |
2075 | 2076 | ||
2076 | static struct seq_operations scc_net_seq_ops = { | 2077 | static const struct seq_operations scc_net_seq_ops = { |
2077 | .start = scc_net_seq_start, | 2078 | .start = scc_net_seq_start, |
2078 | .next = scc_net_seq_next, | 2079 | .next = scc_net_seq_next, |
2079 | .stop = scc_net_seq_stop, | 2080 | .stop = scc_net_seq_stop, |
diff --git a/drivers/net/hamradio/yam.c b/drivers/net/hamradio/yam.c index 5407f7486c9c..82a8be7613d6 100644 --- a/drivers/net/hamradio/yam.c +++ b/drivers/net/hamradio/yam.c | |||
@@ -115,10 +115,6 @@ struct yam_port { | |||
115 | 115 | ||
116 | struct net_device *dev; | 116 | struct net_device *dev; |
117 | 117 | ||
118 | /* Stats section */ | ||
119 | |||
120 | struct net_device_stats stats; | ||
121 | |||
122 | int nb_rxint; | 118 | int nb_rxint; |
123 | int nb_mdint; | 119 | int nb_mdint; |
124 | 120 | ||
@@ -507,7 +503,7 @@ static inline void yam_rx_flag(struct net_device *dev, struct yam_port *yp) | |||
507 | } else { | 503 | } else { |
508 | if (!(skb = dev_alloc_skb(pkt_len))) { | 504 | if (!(skb = dev_alloc_skb(pkt_len))) { |
509 | printk(KERN_WARNING "%s: memory squeeze, dropping packet\n", dev->name); | 505 | printk(KERN_WARNING "%s: memory squeeze, dropping packet\n", dev->name); |
510 | ++yp->stats.rx_dropped; | 506 | ++dev->stats.rx_dropped; |
511 | } else { | 507 | } else { |
512 | unsigned char *cp; | 508 | unsigned char *cp; |
513 | cp = skb_put(skb, pkt_len); | 509 | cp = skb_put(skb, pkt_len); |
@@ -515,7 +511,7 @@ static inline void yam_rx_flag(struct net_device *dev, struct yam_port *yp) | |||
515 | memcpy(cp, yp->rx_buf, pkt_len - 1); | 511 | memcpy(cp, yp->rx_buf, pkt_len - 1); |
516 | skb->protocol = ax25_type_trans(skb, dev); | 512 | skb->protocol = ax25_type_trans(skb, dev); |
517 | netif_rx(skb); | 513 | netif_rx(skb); |
518 | ++yp->stats.rx_packets; | 514 | ++dev->stats.rx_packets; |
519 | } | 515 | } |
520 | } | 516 | } |
521 | } | 517 | } |
@@ -677,7 +673,7 @@ static void yam_tx_byte(struct net_device *dev, struct yam_port *yp) | |||
677 | yp->tx_count = 1; | 673 | yp->tx_count = 1; |
678 | yp->tx_state = TX_HEAD; | 674 | yp->tx_state = TX_HEAD; |
679 | } | 675 | } |
680 | ++yp->stats.tx_packets; | 676 | ++dev->stats.tx_packets; |
681 | break; | 677 | break; |
682 | case TX_TAIL: | 678 | case TX_TAIL: |
683 | if (--yp->tx_count <= 0) { | 679 | if (--yp->tx_count <= 0) { |
@@ -716,7 +712,7 @@ static irqreturn_t yam_interrupt(int irq, void *dev_id) | |||
716 | handled = 1; | 712 | handled = 1; |
717 | 713 | ||
718 | if (lsr & LSR_OE) | 714 | if (lsr & LSR_OE) |
719 | ++yp->stats.rx_fifo_errors; | 715 | ++dev->stats.rx_fifo_errors; |
720 | 716 | ||
721 | yp->dcd = (msr & RX_DCD) ? 1 : 0; | 717 | yp->dcd = (msr & RX_DCD) ? 1 : 0; |
722 | 718 | ||
@@ -778,16 +774,16 @@ static int yam_seq_show(struct seq_file *seq, void *v) | |||
778 | seq_printf(seq, " TxTail %u\n", yp->txtail); | 774 | seq_printf(seq, " TxTail %u\n", yp->txtail); |
779 | seq_printf(seq, " SlotTime %u\n", yp->slot); | 775 | seq_printf(seq, " SlotTime %u\n", yp->slot); |
780 | seq_printf(seq, " Persist %u\n", yp->pers); | 776 | seq_printf(seq, " Persist %u\n", yp->pers); |
781 | seq_printf(seq, " TxFrames %lu\n", yp->stats.tx_packets); | 777 | seq_printf(seq, " TxFrames %lu\n", dev->stats.tx_packets); |
782 | seq_printf(seq, " RxFrames %lu\n", yp->stats.rx_packets); | 778 | seq_printf(seq, " RxFrames %lu\n", dev->stats.rx_packets); |
783 | seq_printf(seq, " TxInt %u\n", yp->nb_mdint); | 779 | seq_printf(seq, " TxInt %u\n", yp->nb_mdint); |
784 | seq_printf(seq, " RxInt %u\n", yp->nb_rxint); | 780 | seq_printf(seq, " RxInt %u\n", yp->nb_rxint); |
785 | seq_printf(seq, " RxOver %lu\n", yp->stats.rx_fifo_errors); | 781 | seq_printf(seq, " RxOver %lu\n", dev->stats.rx_fifo_errors); |
786 | seq_printf(seq, "\n"); | 782 | seq_printf(seq, "\n"); |
787 | return 0; | 783 | return 0; |
788 | } | 784 | } |
789 | 785 | ||
790 | static struct seq_operations yam_seqops = { | 786 | static const struct seq_operations yam_seqops = { |
791 | .start = yam_seq_start, | 787 | .start = yam_seq_start, |
792 | .next = yam_seq_next, | 788 | .next = yam_seq_next, |
793 | .stop = yam_seq_stop, | 789 | .stop = yam_seq_stop, |
@@ -812,26 +808,6 @@ static const struct file_operations yam_info_fops = { | |||
812 | 808 | ||
813 | /* --------------------------------------------------------------------- */ | 809 | /* --------------------------------------------------------------------- */ |
814 | 810 | ||
815 | static struct net_device_stats *yam_get_stats(struct net_device *dev) | ||
816 | { | ||
817 | struct yam_port *yp; | ||
818 | |||
819 | if (!dev) | ||
820 | return NULL; | ||
821 | |||
822 | yp = netdev_priv(dev); | ||
823 | if (yp->magic != YAM_MAGIC) | ||
824 | return NULL; | ||
825 | |||
826 | /* | ||
827 | * Get the current statistics. This may be called with the | ||
828 | * card open or closed. | ||
829 | */ | ||
830 | return &yp->stats; | ||
831 | } | ||
832 | |||
833 | /* --------------------------------------------------------------------- */ | ||
834 | |||
835 | static int yam_open(struct net_device *dev) | 811 | static int yam_open(struct net_device *dev) |
836 | { | 812 | { |
837 | struct yam_port *yp = netdev_priv(dev); | 813 | struct yam_port *yp = netdev_priv(dev); |
@@ -878,9 +854,9 @@ static int yam_open(struct net_device *dev) | |||
878 | /* Reset overruns for all ports - FPGA programming makes overruns */ | 854 | /* Reset overruns for all ports - FPGA programming makes overruns */ |
879 | for (i = 0; i < NR_PORTS; i++) { | 855 | for (i = 0; i < NR_PORTS; i++) { |
880 | struct net_device *dev = yam_devs[i]; | 856 | struct net_device *dev = yam_devs[i]; |
881 | struct yam_port *yp = netdev_priv(dev); | 857 | |
882 | inb(LSR(dev->base_addr)); | 858 | inb(LSR(dev->base_addr)); |
883 | yp->stats.rx_fifo_errors = 0; | 859 | dev->stats.rx_fifo_errors = 0; |
884 | } | 860 | } |
885 | 861 | ||
886 | printk(KERN_INFO "%s at iobase 0x%lx irq %u uart %s\n", dev->name, dev->base_addr, dev->irq, | 862 | printk(KERN_INFO "%s at iobase 0x%lx irq %u uart %s\n", dev->name, dev->base_addr, dev->irq, |
@@ -1068,6 +1044,14 @@ static int yam_set_mac_address(struct net_device *dev, void *addr) | |||
1068 | 1044 | ||
1069 | /* --------------------------------------------------------------------- */ | 1045 | /* --------------------------------------------------------------------- */ |
1070 | 1046 | ||
1047 | static const struct net_device_ops yam_netdev_ops = { | ||
1048 | .ndo_open = yam_open, | ||
1049 | .ndo_stop = yam_close, | ||
1050 | .ndo_start_xmit = yam_send_packet, | ||
1051 | .ndo_do_ioctl = yam_ioctl, | ||
1052 | .ndo_set_mac_address = yam_set_mac_address, | ||
1053 | }; | ||
1054 | |||
1071 | static void yam_setup(struct net_device *dev) | 1055 | static void yam_setup(struct net_device *dev) |
1072 | { | 1056 | { |
1073 | struct yam_port *yp = netdev_priv(dev); | 1057 | struct yam_port *yp = netdev_priv(dev); |
@@ -1088,18 +1072,11 @@ static void yam_setup(struct net_device *dev) | |||
1088 | dev->base_addr = yp->iobase; | 1072 | dev->base_addr = yp->iobase; |
1089 | dev->irq = yp->irq; | 1073 | dev->irq = yp->irq; |
1090 | 1074 | ||
1091 | dev->open = yam_open; | ||
1092 | dev->stop = yam_close; | ||
1093 | dev->do_ioctl = yam_ioctl; | ||
1094 | dev->hard_start_xmit = yam_send_packet; | ||
1095 | dev->get_stats = yam_get_stats; | ||
1096 | |||
1097 | skb_queue_head_init(&yp->send_queue); | 1075 | skb_queue_head_init(&yp->send_queue); |
1098 | 1076 | ||
1077 | dev->netdev_ops = &yam_netdev_ops; | ||
1099 | dev->header_ops = &ax25_header_ops; | 1078 | dev->header_ops = &ax25_header_ops; |
1100 | 1079 | ||
1101 | dev->set_mac_address = yam_set_mac_address; | ||
1102 | |||
1103 | dev->type = ARPHRD_AX25; | 1080 | dev->type = ARPHRD_AX25; |
1104 | dev->hard_header_len = AX25_MAX_HEADER_LEN; | 1081 | dev->hard_header_len = AX25_MAX_HEADER_LEN; |
1105 | dev->mtu = AX25_MTU; | 1082 | dev->mtu = AX25_MTU; |
diff --git a/drivers/net/ibmveth.c b/drivers/net/ibmveth.c index dfa6348ac1dc..5c6315df86b9 100644 --- a/drivers/net/ibmveth.c +++ b/drivers/net/ibmveth.c | |||
@@ -1028,10 +1028,10 @@ static int ibmveth_poll(struct napi_struct *napi, int budget) | |||
1028 | 1028 | ||
1029 | ibmveth_assert(lpar_rc == H_SUCCESS); | 1029 | ibmveth_assert(lpar_rc == H_SUCCESS); |
1030 | 1030 | ||
1031 | netif_rx_complete(napi); | 1031 | napi_complete(napi); |
1032 | 1032 | ||
1033 | if (ibmveth_rxq_pending_buffer(adapter) && | 1033 | if (ibmveth_rxq_pending_buffer(adapter) && |
1034 | netif_rx_reschedule(napi)) { | 1034 | napi_reschedule(napi)) { |
1035 | lpar_rc = h_vio_signal(adapter->vdev->unit_address, | 1035 | lpar_rc = h_vio_signal(adapter->vdev->unit_address, |
1036 | VIO_IRQ_DISABLE); | 1036 | VIO_IRQ_DISABLE); |
1037 | goto restart_poll; | 1037 | goto restart_poll; |
@@ -1047,11 +1047,11 @@ static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance) | |||
1047 | struct ibmveth_adapter *adapter = netdev_priv(netdev); | 1047 | struct ibmveth_adapter *adapter = netdev_priv(netdev); |
1048 | unsigned long lpar_rc; | 1048 | unsigned long lpar_rc; |
1049 | 1049 | ||
1050 | if (netif_rx_schedule_prep(&adapter->napi)) { | 1050 | if (napi_schedule_prep(&adapter->napi)) { |
1051 | lpar_rc = h_vio_signal(adapter->vdev->unit_address, | 1051 | lpar_rc = h_vio_signal(adapter->vdev->unit_address, |
1052 | VIO_IRQ_DISABLE); | 1052 | VIO_IRQ_DISABLE); |
1053 | ibmveth_assert(lpar_rc == H_SUCCESS); | 1053 | ibmveth_assert(lpar_rc == H_SUCCESS); |
1054 | __netif_rx_schedule(&adapter->napi); | 1054 | __napi_schedule(&adapter->napi); |
1055 | } | 1055 | } |
1056 | return IRQ_HANDLED; | 1056 | return IRQ_HANDLED; |
1057 | } | 1057 | } |
diff --git a/drivers/net/igb/e1000_82575.c b/drivers/net/igb/e1000_82575.c index 13ca73f96ec6..f5e4cad7971a 100644 --- a/drivers/net/igb/e1000_82575.c +++ b/drivers/net/igb/e1000_82575.c | |||
@@ -1110,6 +1110,13 @@ static s32 igb_setup_fiber_serdes_link_82575(struct e1000_hw *hw) | |||
1110 | E1000_CTRL_SWDPIN1; | 1110 | E1000_CTRL_SWDPIN1; |
1111 | wr32(E1000_CTRL, reg); | 1111 | wr32(E1000_CTRL, reg); |
1112 | 1112 | ||
1113 | /* Power on phy for 82576 fiber adapters */ | ||
1114 | if (hw->mac.type == e1000_82576) { | ||
1115 | reg = rd32(E1000_CTRL_EXT); | ||
1116 | reg &= ~E1000_CTRL_EXT_SDP7_DATA; | ||
1117 | wr32(E1000_CTRL_EXT, reg); | ||
1118 | } | ||
1119 | |||
1113 | /* Set switch control to serdes energy detect */ | 1120 | /* Set switch control to serdes energy detect */ |
1114 | reg = rd32(E1000_CONNSW); | 1121 | reg = rd32(E1000_CONNSW); |
1115 | reg |= E1000_CONNSW_ENRGSRC; | 1122 | reg |= E1000_CONNSW_ENRGSRC; |
diff --git a/drivers/net/igb/igb.h b/drivers/net/igb/igb.h index aebef8e48e76..30657ddf4842 100644 --- a/drivers/net/igb/igb.h +++ b/drivers/net/igb/igb.h | |||
@@ -36,12 +36,6 @@ | |||
36 | 36 | ||
37 | struct igb_adapter; | 37 | struct igb_adapter; |
38 | 38 | ||
39 | #ifdef CONFIG_IGB_LRO | ||
40 | #include <linux/inet_lro.h> | ||
41 | #define MAX_LRO_AGGR 32 | ||
42 | #define MAX_LRO_DESCRIPTORS 8 | ||
43 | #endif | ||
44 | |||
45 | /* Interrupt defines */ | 39 | /* Interrupt defines */ |
46 | #define IGB_MIN_DYN_ITR 3000 | 40 | #define IGB_MIN_DYN_ITR 3000 |
47 | #define IGB_MAX_DYN_ITR 96000 | 41 | #define IGB_MAX_DYN_ITR 96000 |
@@ -176,10 +170,6 @@ struct igb_ring { | |||
176 | struct napi_struct napi; | 170 | struct napi_struct napi; |
177 | int set_itr; | 171 | int set_itr; |
178 | struct igb_ring *buddy; | 172 | struct igb_ring *buddy; |
179 | #ifdef CONFIG_IGB_LRO | ||
180 | struct net_lro_mgr lro_mgr; | ||
181 | bool lro_used; | ||
182 | #endif | ||
183 | }; | 173 | }; |
184 | }; | 174 | }; |
185 | 175 | ||
@@ -288,12 +278,6 @@ struct igb_adapter { | |||
288 | int need_ioport; | 278 | int need_ioport; |
289 | 279 | ||
290 | struct igb_ring *multi_tx_table[IGB_MAX_TX_QUEUES]; | 280 | struct igb_ring *multi_tx_table[IGB_MAX_TX_QUEUES]; |
291 | #ifdef CONFIG_IGB_LRO | ||
292 | unsigned int lro_max_aggr; | ||
293 | unsigned int lro_aggregated; | ||
294 | unsigned int lro_flushed; | ||
295 | unsigned int lro_no_desc; | ||
296 | #endif | ||
297 | unsigned int tx_ring_count; | 281 | unsigned int tx_ring_count; |
298 | unsigned int rx_ring_count; | 282 | unsigned int rx_ring_count; |
299 | }; | 283 | }; |
diff --git a/drivers/net/igb/igb_ethtool.c b/drivers/net/igb/igb_ethtool.c index 3c831f1472ad..4606e63fc6f5 100644 --- a/drivers/net/igb/igb_ethtool.c +++ b/drivers/net/igb/igb_ethtool.c | |||
@@ -93,11 +93,6 @@ static const struct igb_stats igb_gstrings_stats[] = { | |||
93 | { "tx_smbus", IGB_STAT(stats.mgptc) }, | 93 | { "tx_smbus", IGB_STAT(stats.mgptc) }, |
94 | { "rx_smbus", IGB_STAT(stats.mgprc) }, | 94 | { "rx_smbus", IGB_STAT(stats.mgprc) }, |
95 | { "dropped_smbus", IGB_STAT(stats.mgpdc) }, | 95 | { "dropped_smbus", IGB_STAT(stats.mgpdc) }, |
96 | #ifdef CONFIG_IGB_LRO | ||
97 | { "lro_aggregated", IGB_STAT(lro_aggregated) }, | ||
98 | { "lro_flushed", IGB_STAT(lro_flushed) }, | ||
99 | { "lro_no_desc", IGB_STAT(lro_no_desc) }, | ||
100 | #endif | ||
101 | }; | 96 | }; |
102 | 97 | ||
103 | #define IGB_QUEUE_STATS_LEN \ | 98 | #define IGB_QUEUE_STATS_LEN \ |
@@ -1921,18 +1916,6 @@ static void igb_get_ethtool_stats(struct net_device *netdev, | |||
1921 | int stat_count = sizeof(struct igb_queue_stats) / sizeof(u64); | 1916 | int stat_count = sizeof(struct igb_queue_stats) / sizeof(u64); |
1922 | int j; | 1917 | int j; |
1923 | int i; | 1918 | int i; |
1924 | #ifdef CONFIG_IGB_LRO | ||
1925 | int aggregated = 0, flushed = 0, no_desc = 0; | ||
1926 | |||
1927 | for (i = 0; i < adapter->num_rx_queues; i++) { | ||
1928 | aggregated += adapter->rx_ring[i].lro_mgr.stats.aggregated; | ||
1929 | flushed += adapter->rx_ring[i].lro_mgr.stats.flushed; | ||
1930 | no_desc += adapter->rx_ring[i].lro_mgr.stats.no_desc; | ||
1931 | } | ||
1932 | adapter->lro_aggregated = aggregated; | ||
1933 | adapter->lro_flushed = flushed; | ||
1934 | adapter->lro_no_desc = no_desc; | ||
1935 | #endif | ||
1936 | 1919 | ||
1937 | igb_update_stats(adapter); | 1920 | igb_update_stats(adapter); |
1938 | for (i = 0; i < IGB_GLOBAL_STATS_LEN; i++) { | 1921 | for (i = 0; i < IGB_GLOBAL_STATS_LEN; i++) { |
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c index a50db5398fa5..8b80fe343435 100644 --- a/drivers/net/igb/igb_main.c +++ b/drivers/net/igb/igb_main.c | |||
@@ -115,9 +115,6 @@ static bool igb_clean_tx_irq(struct igb_ring *); | |||
115 | static int igb_poll(struct napi_struct *, int); | 115 | static int igb_poll(struct napi_struct *, int); |
116 | static bool igb_clean_rx_irq_adv(struct igb_ring *, int *, int); | 116 | static bool igb_clean_rx_irq_adv(struct igb_ring *, int *, int); |
117 | static void igb_alloc_rx_buffers_adv(struct igb_ring *, int); | 117 | static void igb_alloc_rx_buffers_adv(struct igb_ring *, int); |
118 | #ifdef CONFIG_IGB_LRO | ||
119 | static int igb_get_skb_hdr(struct sk_buff *skb, void **, void **, u64 *, void *); | ||
120 | #endif | ||
121 | static int igb_ioctl(struct net_device *, struct ifreq *, int cmd); | 118 | static int igb_ioctl(struct net_device *, struct ifreq *, int cmd); |
122 | static void igb_tx_timeout(struct net_device *); | 119 | static void igb_tx_timeout(struct net_device *); |
123 | static void igb_reset_task(struct work_struct *); | 120 | static void igb_reset_task(struct work_struct *); |
@@ -1189,7 +1186,7 @@ static int __devinit igb_probe(struct pci_dev *pdev, | |||
1189 | netdev->features |= NETIF_F_TSO6; | 1186 | netdev->features |= NETIF_F_TSO6; |
1190 | 1187 | ||
1191 | #ifdef CONFIG_IGB_LRO | 1188 | #ifdef CONFIG_IGB_LRO |
1192 | netdev->features |= NETIF_F_LRO; | 1189 | netdev->features |= NETIF_F_GRO; |
1193 | #endif | 1190 | #endif |
1194 | 1191 | ||
1195 | netdev->vlan_features |= NETIF_F_TSO; | 1192 | netdev->vlan_features |= NETIF_F_TSO; |
@@ -1200,7 +1197,6 @@ static int __devinit igb_probe(struct pci_dev *pdev, | |||
1200 | if (pci_using_dac) | 1197 | if (pci_using_dac) |
1201 | netdev->features |= NETIF_F_HIGHDMA; | 1198 | netdev->features |= NETIF_F_HIGHDMA; |
1202 | 1199 | ||
1203 | netdev->features |= NETIF_F_LLTX; | ||
1204 | adapter->en_mng_pt = igb_enable_mng_pass_thru(&adapter->hw); | 1200 | adapter->en_mng_pt = igb_enable_mng_pass_thru(&adapter->hw); |
1205 | 1201 | ||
1206 | /* before reading the NVM, reset the controller to put the device in a | 1202 | /* before reading the NVM, reset the controller to put the device in a |
@@ -1738,14 +1734,6 @@ int igb_setup_rx_resources(struct igb_adapter *adapter, | |||
1738 | struct pci_dev *pdev = adapter->pdev; | 1734 | struct pci_dev *pdev = adapter->pdev; |
1739 | int size, desc_len; | 1735 | int size, desc_len; |
1740 | 1736 | ||
1741 | #ifdef CONFIG_IGB_LRO | ||
1742 | size = sizeof(struct net_lro_desc) * MAX_LRO_DESCRIPTORS; | ||
1743 | rx_ring->lro_mgr.lro_arr = vmalloc(size); | ||
1744 | if (!rx_ring->lro_mgr.lro_arr) | ||
1745 | goto err; | ||
1746 | memset(rx_ring->lro_mgr.lro_arr, 0, size); | ||
1747 | #endif | ||
1748 | |||
1749 | size = sizeof(struct igb_buffer) * rx_ring->count; | 1737 | size = sizeof(struct igb_buffer) * rx_ring->count; |
1750 | rx_ring->buffer_info = vmalloc(size); | 1738 | rx_ring->buffer_info = vmalloc(size); |
1751 | if (!rx_ring->buffer_info) | 1739 | if (!rx_ring->buffer_info) |
@@ -1772,10 +1760,6 @@ int igb_setup_rx_resources(struct igb_adapter *adapter, | |||
1772 | return 0; | 1760 | return 0; |
1773 | 1761 | ||
1774 | err: | 1762 | err: |
1775 | #ifdef CONFIG_IGB_LRO | ||
1776 | vfree(rx_ring->lro_mgr.lro_arr); | ||
1777 | rx_ring->lro_mgr.lro_arr = NULL; | ||
1778 | #endif | ||
1779 | vfree(rx_ring->buffer_info); | 1763 | vfree(rx_ring->buffer_info); |
1780 | dev_err(&adapter->pdev->dev, "Unable to allocate memory for " | 1764 | dev_err(&adapter->pdev->dev, "Unable to allocate memory for " |
1781 | "the receive descriptor ring\n"); | 1765 | "the receive descriptor ring\n"); |
@@ -1929,16 +1913,6 @@ static void igb_configure_rx(struct igb_adapter *adapter) | |||
1929 | rxdctl |= IGB_RX_HTHRESH << 8; | 1913 | rxdctl |= IGB_RX_HTHRESH << 8; |
1930 | rxdctl |= IGB_RX_WTHRESH << 16; | 1914 | rxdctl |= IGB_RX_WTHRESH << 16; |
1931 | wr32(E1000_RXDCTL(j), rxdctl); | 1915 | wr32(E1000_RXDCTL(j), rxdctl); |
1932 | #ifdef CONFIG_IGB_LRO | ||
1933 | /* Intitial LRO Settings */ | ||
1934 | ring->lro_mgr.max_aggr = MAX_LRO_AGGR; | ||
1935 | ring->lro_mgr.max_desc = MAX_LRO_DESCRIPTORS; | ||
1936 | ring->lro_mgr.get_skb_header = igb_get_skb_hdr; | ||
1937 | ring->lro_mgr.features = LRO_F_NAPI | LRO_F_EXTRACT_VLAN_ID; | ||
1938 | ring->lro_mgr.dev = adapter->netdev; | ||
1939 | ring->lro_mgr.ip_summed = CHECKSUM_UNNECESSARY; | ||
1940 | ring->lro_mgr.ip_summed_aggr = CHECKSUM_UNNECESSARY; | ||
1941 | #endif | ||
1942 | } | 1916 | } |
1943 | 1917 | ||
1944 | if (adapter->num_rx_queues > 1) { | 1918 | if (adapter->num_rx_queues > 1) { |
@@ -2127,11 +2101,6 @@ void igb_free_rx_resources(struct igb_ring *rx_ring) | |||
2127 | vfree(rx_ring->buffer_info); | 2101 | vfree(rx_ring->buffer_info); |
2128 | rx_ring->buffer_info = NULL; | 2102 | rx_ring->buffer_info = NULL; |
2129 | 2103 | ||
2130 | #ifdef CONFIG_IGB_LRO | ||
2131 | vfree(rx_ring->lro_mgr.lro_arr); | ||
2132 | rx_ring->lro_mgr.lro_arr = NULL; | ||
2133 | #endif | ||
2134 | |||
2135 | pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma); | 2104 | pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma); |
2136 | 2105 | ||
2137 | rx_ring->desc = NULL; | 2106 | rx_ring->desc = NULL; |
@@ -2779,12 +2748,12 @@ static inline bool igb_tx_csum_adv(struct igb_adapter *adapter, | |||
2779 | 2748 | ||
2780 | if (skb->ip_summed == CHECKSUM_PARTIAL) { | 2749 | if (skb->ip_summed == CHECKSUM_PARTIAL) { |
2781 | switch (skb->protocol) { | 2750 | switch (skb->protocol) { |
2782 | case __constant_htons(ETH_P_IP): | 2751 | case cpu_to_be16(ETH_P_IP): |
2783 | tu_cmd |= E1000_ADVTXD_TUCMD_IPV4; | 2752 | tu_cmd |= E1000_ADVTXD_TUCMD_IPV4; |
2784 | if (ip_hdr(skb)->protocol == IPPROTO_TCP) | 2753 | if (ip_hdr(skb)->protocol == IPPROTO_TCP) |
2785 | tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP; | 2754 | tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP; |
2786 | break; | 2755 | break; |
2787 | case __constant_htons(ETH_P_IPV6): | 2756 | case cpu_to_be16(ETH_P_IPV6): |
2788 | /* XXX what about other V6 headers?? */ | 2757 | /* XXX what about other V6 headers?? */ |
2789 | if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) | 2758 | if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) |
2790 | tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP; | 2759 | tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP; |
@@ -3385,8 +3354,8 @@ static irqreturn_t igb_msix_rx(int irq, void *data) | |||
3385 | 3354 | ||
3386 | igb_write_itr(rx_ring); | 3355 | igb_write_itr(rx_ring); |
3387 | 3356 | ||
3388 | if (netif_rx_schedule_prep(&rx_ring->napi)) | 3357 | if (napi_schedule_prep(&rx_ring->napi)) |
3389 | __netif_rx_schedule(&rx_ring->napi); | 3358 | __napi_schedule(&rx_ring->napi); |
3390 | 3359 | ||
3391 | #ifdef CONFIG_IGB_DCA | 3360 | #ifdef CONFIG_IGB_DCA |
3392 | if (rx_ring->adapter->flags & IGB_FLAG_DCA_ENABLED) | 3361 | if (rx_ring->adapter->flags & IGB_FLAG_DCA_ENABLED) |
@@ -3535,7 +3504,7 @@ static irqreturn_t igb_intr_msi(int irq, void *data) | |||
3535 | mod_timer(&adapter->watchdog_timer, jiffies + 1); | 3504 | mod_timer(&adapter->watchdog_timer, jiffies + 1); |
3536 | } | 3505 | } |
3537 | 3506 | ||
3538 | netif_rx_schedule(&adapter->rx_ring[0].napi); | 3507 | napi_schedule(&adapter->rx_ring[0].napi); |
3539 | 3508 | ||
3540 | return IRQ_HANDLED; | 3509 | return IRQ_HANDLED; |
3541 | } | 3510 | } |
@@ -3573,7 +3542,7 @@ static irqreturn_t igb_intr(int irq, void *data) | |||
3573 | mod_timer(&adapter->watchdog_timer, jiffies + 1); | 3542 | mod_timer(&adapter->watchdog_timer, jiffies + 1); |
3574 | } | 3543 | } |
3575 | 3544 | ||
3576 | netif_rx_schedule(&adapter->rx_ring[0].napi); | 3545 | napi_schedule(&adapter->rx_ring[0].napi); |
3577 | 3546 | ||
3578 | return IRQ_HANDLED; | 3547 | return IRQ_HANDLED; |
3579 | } | 3548 | } |
@@ -3608,7 +3577,7 @@ static int igb_poll(struct napi_struct *napi, int budget) | |||
3608 | !netif_running(netdev)) { | 3577 | !netif_running(netdev)) { |
3609 | if (adapter->itr_setting & 3) | 3578 | if (adapter->itr_setting & 3) |
3610 | igb_set_itr(adapter); | 3579 | igb_set_itr(adapter); |
3611 | netif_rx_complete(napi); | 3580 | napi_complete(napi); |
3612 | if (!test_bit(__IGB_DOWN, &adapter->state)) | 3581 | if (!test_bit(__IGB_DOWN, &adapter->state)) |
3613 | igb_irq_enable(adapter); | 3582 | igb_irq_enable(adapter); |
3614 | return 0; | 3583 | return 0; |
@@ -3634,7 +3603,7 @@ static int igb_clean_rx_ring_msix(struct napi_struct *napi, int budget) | |||
3634 | 3603 | ||
3635 | /* If not enough Rx work done, exit the polling mode */ | 3604 | /* If not enough Rx work done, exit the polling mode */ |
3636 | if ((work_done == 0) || !netif_running(netdev)) { | 3605 | if ((work_done == 0) || !netif_running(netdev)) { |
3637 | netif_rx_complete(napi); | 3606 | napi_complete(napi); |
3638 | 3607 | ||
3639 | if (adapter->itr_setting & 3) { | 3608 | if (adapter->itr_setting & 3) { |
3640 | if (adapter->num_rx_queues == 1) | 3609 | if (adapter->num_rx_queues == 1) |
@@ -3764,39 +3733,6 @@ static bool igb_clean_tx_irq(struct igb_ring *tx_ring) | |||
3764 | return (count < tx_ring->count); | 3733 | return (count < tx_ring->count); |
3765 | } | 3734 | } |
3766 | 3735 | ||
3767 | #ifdef CONFIG_IGB_LRO | ||
3768 | /** | ||
3769 | * igb_get_skb_hdr - helper function for LRO header processing | ||
3770 | * @skb: pointer to sk_buff to be added to LRO packet | ||
3771 | * @iphdr: pointer to ip header structure | ||
3772 | * @tcph: pointer to tcp header structure | ||
3773 | * @hdr_flags: pointer to header flags | ||
3774 | * @priv: pointer to the receive descriptor for the current sk_buff | ||
3775 | **/ | ||
3776 | static int igb_get_skb_hdr(struct sk_buff *skb, void **iphdr, void **tcph, | ||
3777 | u64 *hdr_flags, void *priv) | ||
3778 | { | ||
3779 | union e1000_adv_rx_desc *rx_desc = priv; | ||
3780 | u16 pkt_type = rx_desc->wb.lower.lo_dword.pkt_info & | ||
3781 | (E1000_RXDADV_PKTTYPE_IPV4 | E1000_RXDADV_PKTTYPE_TCP); | ||
3782 | |||
3783 | /* Verify that this is a valid IPv4 TCP packet */ | ||
3784 | if (pkt_type != (E1000_RXDADV_PKTTYPE_IPV4 | | ||
3785 | E1000_RXDADV_PKTTYPE_TCP)) | ||
3786 | return -1; | ||
3787 | |||
3788 | /* Set network headers */ | ||
3789 | skb_reset_network_header(skb); | ||
3790 | skb_set_transport_header(skb, ip_hdrlen(skb)); | ||
3791 | *iphdr = ip_hdr(skb); | ||
3792 | *tcph = tcp_hdr(skb); | ||
3793 | *hdr_flags = LRO_IPV4 | LRO_TCP; | ||
3794 | |||
3795 | return 0; | ||
3796 | |||
3797 | } | ||
3798 | #endif /* CONFIG_IGB_LRO */ | ||
3799 | |||
3800 | /** | 3736 | /** |
3801 | * igb_receive_skb - helper function to handle rx indications | 3737 | * igb_receive_skb - helper function to handle rx indications |
3802 | * @ring: pointer to receive ring receving this packet | 3738 | * @ring: pointer to receive ring receving this packet |
@@ -3811,28 +3747,21 @@ static void igb_receive_skb(struct igb_ring *ring, u8 status, | |||
3811 | struct igb_adapter * adapter = ring->adapter; | 3747 | struct igb_adapter * adapter = ring->adapter; |
3812 | bool vlan_extracted = (adapter->vlgrp && (status & E1000_RXD_STAT_VP)); | 3748 | bool vlan_extracted = (adapter->vlgrp && (status & E1000_RXD_STAT_VP)); |
3813 | 3749 | ||
3814 | #ifdef CONFIG_IGB_LRO | 3750 | skb_record_rx_queue(skb, ring->queue_index); |
3815 | if (adapter->netdev->features & NETIF_F_LRO && | 3751 | if (skb->ip_summed == CHECKSUM_UNNECESSARY) { |
3816 | skb->ip_summed == CHECKSUM_UNNECESSARY) { | ||
3817 | if (vlan_extracted) | 3752 | if (vlan_extracted) |
3818 | lro_vlan_hwaccel_receive_skb(&ring->lro_mgr, skb, | 3753 | vlan_gro_receive(&ring->napi, adapter->vlgrp, |
3819 | adapter->vlgrp, | 3754 | le16_to_cpu(rx_desc->wb.upper.vlan), |
3820 | le16_to_cpu(rx_desc->wb.upper.vlan), | 3755 | skb); |
3821 | rx_desc); | ||
3822 | else | 3756 | else |
3823 | lro_receive_skb(&ring->lro_mgr,skb, rx_desc); | 3757 | napi_gro_receive(&ring->napi, skb); |
3824 | ring->lro_used = 1; | ||
3825 | } else { | 3758 | } else { |
3826 | #endif | ||
3827 | if (vlan_extracted) | 3759 | if (vlan_extracted) |
3828 | vlan_hwaccel_receive_skb(skb, adapter->vlgrp, | 3760 | vlan_hwaccel_receive_skb(skb, adapter->vlgrp, |
3829 | le16_to_cpu(rx_desc->wb.upper.vlan)); | 3761 | le16_to_cpu(rx_desc->wb.upper.vlan)); |
3830 | else | 3762 | else |
3831 | |||
3832 | netif_receive_skb(skb); | 3763 | netif_receive_skb(skb); |
3833 | #ifdef CONFIG_IGB_LRO | ||
3834 | } | 3764 | } |
3835 | #endif | ||
3836 | } | 3765 | } |
3837 | 3766 | ||
3838 | 3767 | ||
@@ -3987,13 +3916,6 @@ next_desc: | |||
3987 | rx_ring->next_to_clean = i; | 3916 | rx_ring->next_to_clean = i; |
3988 | cleaned_count = IGB_DESC_UNUSED(rx_ring); | 3917 | cleaned_count = IGB_DESC_UNUSED(rx_ring); |
3989 | 3918 | ||
3990 | #ifdef CONFIG_IGB_LRO | ||
3991 | if (rx_ring->lro_used) { | ||
3992 | lro_flush_all(&rx_ring->lro_mgr); | ||
3993 | rx_ring->lro_used = 0; | ||
3994 | } | ||
3995 | #endif | ||
3996 | |||
3997 | if (cleaned_count) | 3919 | if (cleaned_count) |
3998 | igb_alloc_rx_buffers_adv(rx_ring, cleaned_count); | 3920 | igb_alloc_rx_buffers_adv(rx_ring, cleaned_count); |
3999 | 3921 | ||
diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c index eee28d395682..e2ef16b29700 100644 --- a/drivers/net/ixgb/ixgb_main.c +++ b/drivers/net/ixgb/ixgb_main.c | |||
@@ -1721,14 +1721,14 @@ ixgb_intr(int irq, void *data) | |||
1721 | if (!test_bit(__IXGB_DOWN, &adapter->flags)) | 1721 | if (!test_bit(__IXGB_DOWN, &adapter->flags)) |
1722 | mod_timer(&adapter->watchdog_timer, jiffies); | 1722 | mod_timer(&adapter->watchdog_timer, jiffies); |
1723 | 1723 | ||
1724 | if (netif_rx_schedule_prep(&adapter->napi)) { | 1724 | if (napi_schedule_prep(&adapter->napi)) { |
1725 | 1725 | ||
1726 | /* Disable interrupts and register for poll. The flush | 1726 | /* Disable interrupts and register for poll. The flush |
1727 | of the posted write is intentionally left out. | 1727 | of the posted write is intentionally left out. |
1728 | */ | 1728 | */ |
1729 | 1729 | ||
1730 | IXGB_WRITE_REG(&adapter->hw, IMC, ~0); | 1730 | IXGB_WRITE_REG(&adapter->hw, IMC, ~0); |
1731 | __netif_rx_schedule(&adapter->napi); | 1731 | __napi_schedule(&adapter->napi); |
1732 | } | 1732 | } |
1733 | return IRQ_HANDLED; | 1733 | return IRQ_HANDLED; |
1734 | } | 1734 | } |
@@ -1749,7 +1749,7 @@ ixgb_clean(struct napi_struct *napi, int budget) | |||
1749 | 1749 | ||
1750 | /* If budget not fully consumed, exit the polling mode */ | 1750 | /* If budget not fully consumed, exit the polling mode */ |
1751 | if (work_done < budget) { | 1751 | if (work_done < budget) { |
1752 | netif_rx_complete(napi); | 1752 | napi_complete(napi); |
1753 | if (!test_bit(__IXGB_DOWN, &adapter->flags)) | 1753 | if (!test_bit(__IXGB_DOWN, &adapter->flags)) |
1754 | ixgb_irq_enable(adapter); | 1754 | ixgb_irq_enable(adapter); |
1755 | } | 1755 | } |
diff --git a/drivers/net/ixgbe/Makefile b/drivers/net/ixgbe/Makefile index 6e7ef765bcd8..f6061950f5d1 100644 --- a/drivers/net/ixgbe/Makefile +++ b/drivers/net/ixgbe/Makefile | |||
@@ -1,7 +1,7 @@ | |||
1 | ################################################################################ | 1 | ################################################################################ |
2 | # | 2 | # |
3 | # Intel 10 Gigabit PCI Express Linux driver | 3 | # Intel 10 Gigabit PCI Express Linux driver |
4 | # Copyright(c) 1999 - 2007 Intel Corporation. | 4 | # Copyright(c) 1999 - 2009 Intel Corporation. |
5 | # | 5 | # |
6 | # This program is free software; you can redistribute it and/or modify it | 6 | # This program is free software; you can redistribute it and/or modify it |
7 | # under the terms and conditions of the GNU General Public License, | 7 | # under the terms and conditions of the GNU General Public License, |
diff --git a/drivers/net/ixgbe/ixgbe.h b/drivers/net/ixgbe/ixgbe.h index e112008f39c1..0ea791ae0d14 100644 --- a/drivers/net/ixgbe/ixgbe.h +++ b/drivers/net/ixgbe/ixgbe.h | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************************* | 1 | /******************************************************************************* |
2 | 2 | ||
3 | Intel 10 Gigabit PCI Express Linux driver | 3 | Intel 10 Gigabit PCI Express Linux driver |
4 | Copyright(c) 1999 - 2008 Intel Corporation. | 4 | Copyright(c) 1999 - 2009 Intel Corporation. |
5 | 5 | ||
6 | This program is free software; you can redistribute it and/or modify it | 6 | This program is free software; you can redistribute it and/or modify it |
7 | under the terms and conditions of the GNU General Public License, | 7 | under the terms and conditions of the GNU General Public License, |
@@ -31,7 +31,6 @@ | |||
31 | #include <linux/types.h> | 31 | #include <linux/types.h> |
32 | #include <linux/pci.h> | 32 | #include <linux/pci.h> |
33 | #include <linux/netdevice.h> | 33 | #include <linux/netdevice.h> |
34 | #include <linux/inet_lro.h> | ||
35 | #include <linux/aer.h> | 34 | #include <linux/aer.h> |
36 | 35 | ||
37 | #include "ixgbe_type.h" | 36 | #include "ixgbe_type.h" |
@@ -88,9 +87,6 @@ | |||
88 | #define IXGBE_TX_FLAGS_VLAN_PRIO_MASK 0x0000e000 | 87 | #define IXGBE_TX_FLAGS_VLAN_PRIO_MASK 0x0000e000 |
89 | #define IXGBE_TX_FLAGS_VLAN_SHIFT 16 | 88 | #define IXGBE_TX_FLAGS_VLAN_SHIFT 16 |
90 | 89 | ||
91 | #define IXGBE_MAX_LRO_DESCRIPTORS 8 | ||
92 | #define IXGBE_MAX_LRO_AGGREGATE 32 | ||
93 | |||
94 | /* wrapper around a pointer to a socket buffer, | 90 | /* wrapper around a pointer to a socket buffer, |
95 | * so a DMA handle can be stored along with the buffer */ | 91 | * so a DMA handle can be stored along with the buffer */ |
96 | struct ixgbe_tx_buffer { | 92 | struct ixgbe_tx_buffer { |
@@ -142,8 +138,6 @@ struct ixgbe_ring { | |||
142 | /* cpu for tx queue */ | 138 | /* cpu for tx queue */ |
143 | int cpu; | 139 | int cpu; |
144 | #endif | 140 | #endif |
145 | struct net_lro_mgr lro_mgr; | ||
146 | bool lro_used; | ||
147 | struct ixgbe_queue_stats stats; | 141 | struct ixgbe_queue_stats stats; |
148 | u16 v_idx; /* maps directly to the index for this ring in the hardware | 142 | u16 v_idx; /* maps directly to the index for this ring in the hardware |
149 | * vector array, can also be used for finding the bit in EICR | 143 | * vector array, can also be used for finding the bit in EICR |
@@ -210,9 +204,13 @@ struct ixgbe_q_vector { | |||
210 | #define OTHER_VECTOR 1 | 204 | #define OTHER_VECTOR 1 |
211 | #define NON_Q_VECTORS (OTHER_VECTOR) | 205 | #define NON_Q_VECTORS (OTHER_VECTOR) |
212 | 206 | ||
213 | #define MAX_MSIX_Q_VECTORS 16 | 207 | #define MAX_MSIX_VECTORS_82598 18 |
208 | #define MAX_MSIX_Q_VECTORS_82598 16 | ||
209 | |||
210 | #define MAX_MSIX_Q_VECTORS MAX_MSIX_Q_VECTORS_82598 | ||
211 | #define MAX_MSIX_COUNT MAX_MSIX_VECTORS_82598 | ||
212 | |||
214 | #define MIN_MSIX_Q_VECTORS 2 | 213 | #define MIN_MSIX_Q_VECTORS 2 |
215 | #define MAX_MSIX_COUNT (MAX_MSIX_Q_VECTORS + NON_Q_VECTORS) | ||
216 | #define MIN_MSIX_COUNT (MIN_MSIX_Q_VECTORS + NON_Q_VECTORS) | 214 | #define MIN_MSIX_COUNT (MIN_MSIX_Q_VECTORS + NON_Q_VECTORS) |
217 | 215 | ||
218 | /* board specific private data structure */ | 216 | /* board specific private data structure */ |
@@ -250,6 +248,7 @@ struct ixgbe_adapter { | |||
250 | u64 hw_csum_rx_good; | 248 | u64 hw_csum_rx_good; |
251 | u64 non_eop_descs; | 249 | u64 non_eop_descs; |
252 | int num_msix_vectors; | 250 | int num_msix_vectors; |
251 | int max_msix_q_vectors; /* true count of q_vectors for device */ | ||
253 | struct ixgbe_ring_feature ring_feature[3]; | 252 | struct ixgbe_ring_feature ring_feature[3]; |
254 | struct msix_entry *msix_entries; | 253 | struct msix_entry *msix_entries; |
255 | 254 | ||
@@ -301,9 +300,6 @@ struct ixgbe_adapter { | |||
301 | 300 | ||
302 | unsigned long state; | 301 | unsigned long state; |
303 | u64 tx_busy; | 302 | u64 tx_busy; |
304 | u64 lro_aggregated; | ||
305 | u64 lro_flushed; | ||
306 | u64 lro_no_desc; | ||
307 | unsigned int tx_ring_count; | 303 | unsigned int tx_ring_count; |
308 | unsigned int rx_ring_count; | 304 | unsigned int rx_ring_count; |
309 | 305 | ||
diff --git a/drivers/net/ixgbe/ixgbe_82598.c b/drivers/net/ixgbe/ixgbe_82598.c index ad5699d9ab0d..8e7315e0a7fa 100644 --- a/drivers/net/ixgbe/ixgbe_82598.c +++ b/drivers/net/ixgbe/ixgbe_82598.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************************* | 1 | /******************************************************************************* |
2 | 2 | ||
3 | Intel 10 Gigabit PCI Express Linux driver | 3 | Intel 10 Gigabit PCI Express Linux driver |
4 | Copyright(c) 1999 - 2008 Intel Corporation. | 4 | Copyright(c) 1999 - 2009 Intel Corporation. |
5 | 5 | ||
6 | This program is free software; you can redistribute it and/or modify it | 6 | This program is free software; you can redistribute it and/or modify it |
7 | under the terms and conditions of the GNU General Public License, | 7 | under the terms and conditions of the GNU General Public License, |
@@ -50,6 +50,27 @@ static s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset, | |||
50 | u8 *eeprom_data); | 50 | u8 *eeprom_data); |
51 | 51 | ||
52 | /** | 52 | /** |
53 | * ixgbe_get_pcie_msix_count_82598 - Gets MSI-X vector count | ||
54 | * @hw: pointer to hardware structure | ||
55 | * | ||
56 | * Read PCIe configuration space, and get the MSI-X vector count from | ||
57 | * the capabilities table. | ||
58 | **/ | ||
59 | u16 ixgbe_get_pcie_msix_count_82598(struct ixgbe_hw *hw) | ||
60 | { | ||
61 | struct ixgbe_adapter *adapter = hw->back; | ||
62 | u16 msix_count; | ||
63 | pci_read_config_word(adapter->pdev, IXGBE_PCIE_MSIX_82598_CAPS, | ||
64 | &msix_count); | ||
65 | msix_count &= IXGBE_PCIE_MSIX_TBL_SZ_MASK; | ||
66 | |||
67 | /* MSI-X count is zero-based in HW, so increment to give proper value */ | ||
68 | msix_count++; | ||
69 | |||
70 | return msix_count; | ||
71 | } | ||
72 | |||
73 | /** | ||
53 | */ | 74 | */ |
54 | static s32 ixgbe_get_invariants_82598(struct ixgbe_hw *hw) | 75 | static s32 ixgbe_get_invariants_82598(struct ixgbe_hw *hw) |
55 | { | 76 | { |
@@ -106,6 +127,7 @@ static s32 ixgbe_get_invariants_82598(struct ixgbe_hw *hw) | |||
106 | mac->num_rar_entries = IXGBE_82598_RAR_ENTRIES; | 127 | mac->num_rar_entries = IXGBE_82598_RAR_ENTRIES; |
107 | mac->max_rx_queues = IXGBE_82598_MAX_RX_QUEUES; | 128 | mac->max_rx_queues = IXGBE_82598_MAX_RX_QUEUES; |
108 | mac->max_tx_queues = IXGBE_82598_MAX_TX_QUEUES; | 129 | mac->max_tx_queues = IXGBE_82598_MAX_TX_QUEUES; |
130 | mac->max_msix_vectors = ixgbe_get_pcie_msix_count_82598(hw); | ||
109 | 131 | ||
110 | out: | 132 | out: |
111 | return ret_val; | 133 | return ret_val; |
@@ -213,6 +235,10 @@ static enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw) | |||
213 | 235 | ||
214 | /* Media type for I82598 is based on device ID */ | 236 | /* Media type for I82598 is based on device ID */ |
215 | switch (hw->device_id) { | 237 | switch (hw->device_id) { |
238 | case IXGBE_DEV_ID_82598: | ||
239 | case IXGBE_DEV_ID_82598_BX: | ||
240 | media_type = ixgbe_media_type_backplane; | ||
241 | break; | ||
216 | case IXGBE_DEV_ID_82598AF_DUAL_PORT: | 242 | case IXGBE_DEV_ID_82598AF_DUAL_PORT: |
217 | case IXGBE_DEV_ID_82598AF_SINGLE_PORT: | 243 | case IXGBE_DEV_ID_82598AF_SINGLE_PORT: |
218 | case IXGBE_DEV_ID_82598EB_CX4: | 244 | case IXGBE_DEV_ID_82598EB_CX4: |
@@ -1002,6 +1028,13 @@ static s32 ixgbe_get_supported_physical_layer_82598(struct ixgbe_hw *hw) | |||
1002 | s32 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN; | 1028 | s32 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN; |
1003 | 1029 | ||
1004 | switch (hw->device_id) { | 1030 | switch (hw->device_id) { |
1031 | case IXGBE_DEV_ID_82598: | ||
1032 | /* Default device ID is mezzanine card KX/KX4 */ | ||
1033 | physical_layer = (IXGBE_PHYSICAL_LAYER_10GBASE_KX4 | | ||
1034 | IXGBE_PHYSICAL_LAYER_1000BASE_KX); | ||
1035 | break; | ||
1036 | case IXGBE_DEV_ID_82598_BX: | ||
1037 | physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_BX; | ||
1005 | case IXGBE_DEV_ID_82598EB_CX4: | 1038 | case IXGBE_DEV_ID_82598EB_CX4: |
1006 | case IXGBE_DEV_ID_82598_CX4_DUAL_PORT: | 1039 | case IXGBE_DEV_ID_82598_CX4_DUAL_PORT: |
1007 | physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_CX4; | 1040 | physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_CX4; |
diff --git a/drivers/net/ixgbe/ixgbe_common.c b/drivers/net/ixgbe/ixgbe_common.c index f67c68404bb3..05f0e872947f 100644 --- a/drivers/net/ixgbe/ixgbe_common.c +++ b/drivers/net/ixgbe/ixgbe_common.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************************* | 1 | /******************************************************************************* |
2 | 2 | ||
3 | Intel 10 Gigabit PCI Express Linux driver | 3 | Intel 10 Gigabit PCI Express Linux driver |
4 | Copyright(c) 1999 - 2008 Intel Corporation. | 4 | Copyright(c) 1999 - 2009 Intel Corporation. |
5 | 5 | ||
6 | This program is free software; you can redistribute it and/or modify it | 6 | This program is free software; you can redistribute it and/or modify it |
7 | under the terms and conditions of the GNU General Public License, | 7 | under the terms and conditions of the GNU General Public License, |
diff --git a/drivers/net/ixgbe/ixgbe_common.h b/drivers/net/ixgbe/ixgbe_common.h index 192f8d012911..0b5ba5755805 100644 --- a/drivers/net/ixgbe/ixgbe_common.h +++ b/drivers/net/ixgbe/ixgbe_common.h | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************************* | 1 | /******************************************************************************* |
2 | 2 | ||
3 | Intel 10 Gigabit PCI Express Linux driver | 3 | Intel 10 Gigabit PCI Express Linux driver |
4 | Copyright(c) 1999 - 2008 Intel Corporation. | 4 | Copyright(c) 1999 - 2009 Intel Corporation. |
5 | 5 | ||
6 | This program is free software; you can redistribute it and/or modify it | 6 | This program is free software; you can redistribute it and/or modify it |
7 | under the terms and conditions of the GNU General Public License, | 7 | under the terms and conditions of the GNU General Public License, |
diff --git a/drivers/net/ixgbe/ixgbe_dcb.c b/drivers/net/ixgbe/ixgbe_dcb.c index e2e28ac63dec..2a60c89ab346 100644 --- a/drivers/net/ixgbe/ixgbe_dcb.c +++ b/drivers/net/ixgbe/ixgbe_dcb.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************************* | 1 | /******************************************************************************* |
2 | 2 | ||
3 | Intel 10 Gigabit PCI Express Linux driver | 3 | Intel 10 Gigabit PCI Express Linux driver |
4 | Copyright(c) 1999 - 2007 Intel Corporation. | 4 | Copyright(c) 1999 - 2009 Intel Corporation. |
5 | 5 | ||
6 | This program is free software; you can redistribute it and/or modify it | 6 | This program is free software; you can redistribute it and/or modify it |
7 | under the terms and conditions of the GNU General Public License, | 7 | under the terms and conditions of the GNU General Public License, |
diff --git a/drivers/net/ixgbe/ixgbe_dcb.h b/drivers/net/ixgbe/ixgbe_dcb.h index 75f6efe1e369..0da5c6d5bcaf 100644 --- a/drivers/net/ixgbe/ixgbe_dcb.h +++ b/drivers/net/ixgbe/ixgbe_dcb.h | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************************* | 1 | /******************************************************************************* |
2 | 2 | ||
3 | Intel 10 Gigabit PCI Express Linux driver | 3 | Intel 10 Gigabit PCI Express Linux driver |
4 | Copyright(c) 1999 - 2007 Intel Corporation. | 4 | Copyright(c) 1999 - 2009 Intel Corporation. |
5 | 5 | ||
6 | This program is free software; you can redistribute it and/or modify it | 6 | This program is free software; you can redistribute it and/or modify it |
7 | under the terms and conditions of the GNU General Public License, | 7 | under the terms and conditions of the GNU General Public License, |
diff --git a/drivers/net/ixgbe/ixgbe_dcb_82598.c b/drivers/net/ixgbe/ixgbe_dcb_82598.c index 2c046b0b5d28..560321148935 100644 --- a/drivers/net/ixgbe/ixgbe_dcb_82598.c +++ b/drivers/net/ixgbe/ixgbe_dcb_82598.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************************* | 1 | /******************************************************************************* |
2 | 2 | ||
3 | Intel 10 Gigabit PCI Express Linux driver | 3 | Intel 10 Gigabit PCI Express Linux driver |
4 | Copyright(c) 1999 - 2007 Intel Corporation. | 4 | Copyright(c) 1999 - 2009 Intel Corporation. |
5 | 5 | ||
6 | This program is free software; you can redistribute it and/or modify it | 6 | This program is free software; you can redistribute it and/or modify it |
7 | under the terms and conditions of the GNU General Public License, | 7 | under the terms and conditions of the GNU General Public License, |
diff --git a/drivers/net/ixgbe/ixgbe_dcb_82598.h b/drivers/net/ixgbe/ixgbe_dcb_82598.h index 1e6a313719d7..ebbe53c352a7 100644 --- a/drivers/net/ixgbe/ixgbe_dcb_82598.h +++ b/drivers/net/ixgbe/ixgbe_dcb_82598.h | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************************* | 1 | /******************************************************************************* |
2 | 2 | ||
3 | Intel 10 Gigabit PCI Express Linux driver | 3 | Intel 10 Gigabit PCI Express Linux driver |
4 | Copyright(c) 1999 - 2007 Intel Corporation. | 4 | Copyright(c) 1999 - 2009 Intel Corporation. |
5 | 5 | ||
6 | This program is free software; you can redistribute it and/or modify it | 6 | This program is free software; you can redistribute it and/or modify it |
7 | under the terms and conditions of the GNU General Public License, | 7 | under the terms and conditions of the GNU General Public License, |
diff --git a/drivers/net/ixgbe/ixgbe_dcb_nl.c b/drivers/net/ixgbe/ixgbe_dcb_nl.c index 4129976953f5..dd9d1d63a59c 100644 --- a/drivers/net/ixgbe/ixgbe_dcb_nl.c +++ b/drivers/net/ixgbe/ixgbe_dcb_nl.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************************* | 1 | /******************************************************************************* |
2 | 2 | ||
3 | Intel 10 Gigabit PCI Express Linux driver | 3 | Intel 10 Gigabit PCI Express Linux driver |
4 | Copyright(c) 1999 - 2008 Intel Corporation. | 4 | Copyright(c) 1999 - 2009 Intel Corporation. |
5 | 5 | ||
6 | This program is free software; you can redistribute it and/or modify it | 6 | This program is free software; you can redistribute it and/or modify it |
7 | under the terms and conditions of the GNU General Public License, | 7 | under the terms and conditions of the GNU General Public License, |
diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c index 67f87a79154d..14e661e0a250 100644 --- a/drivers/net/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ixgbe/ixgbe_ethtool.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************************* | 1 | /******************************************************************************* |
2 | 2 | ||
3 | Intel 10 Gigabit PCI Express Linux driver | 3 | Intel 10 Gigabit PCI Express Linux driver |
4 | Copyright(c) 1999 - 2008 Intel Corporation. | 4 | Copyright(c) 1999 - 2009 Intel Corporation. |
5 | 5 | ||
6 | This program is free software; you can redistribute it and/or modify it | 6 | This program is free software; you can redistribute it and/or modify it |
7 | under the terms and conditions of the GNU General Public License, | 7 | under the terms and conditions of the GNU General Public License, |
@@ -89,8 +89,6 @@ static struct ixgbe_stats ixgbe_gstrings_stats[] = { | |||
89 | {"rx_header_split", IXGBE_STAT(rx_hdr_split)}, | 89 | {"rx_header_split", IXGBE_STAT(rx_hdr_split)}, |
90 | {"alloc_rx_page_failed", IXGBE_STAT(alloc_rx_page_failed)}, | 90 | {"alloc_rx_page_failed", IXGBE_STAT(alloc_rx_page_failed)}, |
91 | {"alloc_rx_buff_failed", IXGBE_STAT(alloc_rx_buff_failed)}, | 91 | {"alloc_rx_buff_failed", IXGBE_STAT(alloc_rx_buff_failed)}, |
92 | {"lro_aggregated", IXGBE_STAT(lro_aggregated)}, | ||
93 | {"lro_flushed", IXGBE_STAT(lro_flushed)}, | ||
94 | }; | 92 | }; |
95 | 93 | ||
96 | #define IXGBE_QUEUE_STATS_LEN \ | 94 | #define IXGBE_QUEUE_STATS_LEN \ |
@@ -132,6 +130,26 @@ static int ixgbe_get_settings(struct net_device *netdev, | |||
132 | ecmd->advertising |= ADVERTISED_1000baseT_Full; | 130 | ecmd->advertising |= ADVERTISED_1000baseT_Full; |
133 | 131 | ||
134 | ecmd->port = PORT_TP; | 132 | ecmd->port = PORT_TP; |
133 | } else if (hw->phy.media_type == ixgbe_media_type_backplane) { | ||
134 | /* Set as FIBRE until SERDES defined in kernel */ | ||
135 | switch (hw->device_id) { | ||
136 | case IXGBE_DEV_ID_82598: | ||
137 | ecmd->supported |= (SUPPORTED_1000baseT_Full | | ||
138 | SUPPORTED_FIBRE); | ||
139 | ecmd->advertising = (ADVERTISED_10000baseT_Full | | ||
140 | ADVERTISED_1000baseT_Full | | ||
141 | ADVERTISED_FIBRE); | ||
142 | ecmd->port = PORT_FIBRE; | ||
143 | break; | ||
144 | case IXGBE_DEV_ID_82598_BX: | ||
145 | ecmd->supported = (SUPPORTED_1000baseT_Full | | ||
146 | SUPPORTED_FIBRE); | ||
147 | ecmd->advertising = (ADVERTISED_1000baseT_Full | | ||
148 | ADVERTISED_FIBRE); | ||
149 | ecmd->port = PORT_FIBRE; | ||
150 | ecmd->autoneg = AUTONEG_DISABLE; | ||
151 | break; | ||
152 | } | ||
135 | } else { | 153 | } else { |
136 | ecmd->supported |= SUPPORTED_FIBRE; | 154 | ecmd->supported |= SUPPORTED_FIBRE; |
137 | ecmd->advertising = (ADVERTISED_10000baseT_Full | | 155 | ecmd->advertising = (ADVERTISED_10000baseT_Full | |
@@ -808,15 +826,6 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev, | |||
808 | int stat_count = sizeof(struct ixgbe_queue_stats) / sizeof(u64); | 826 | int stat_count = sizeof(struct ixgbe_queue_stats) / sizeof(u64); |
809 | int j, k; | 827 | int j, k; |
810 | int i; | 828 | int i; |
811 | u64 aggregated = 0, flushed = 0, no_desc = 0; | ||
812 | for (i = 0; i < adapter->num_rx_queues; i++) { | ||
813 | aggregated += adapter->rx_ring[i].lro_mgr.stats.aggregated; | ||
814 | flushed += adapter->rx_ring[i].lro_mgr.stats.flushed; | ||
815 | no_desc += adapter->rx_ring[i].lro_mgr.stats.no_desc; | ||
816 | } | ||
817 | adapter->lro_aggregated = aggregated; | ||
818 | adapter->lro_flushed = flushed; | ||
819 | adapter->lro_no_desc = no_desc; | ||
820 | 829 | ||
821 | ixgbe_update_stats(adapter); | 830 | ixgbe_update_stats(adapter); |
822 | for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) { | 831 | for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) { |
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c index d2f4d5f508b7..ed8d14163c1d 100644 --- a/drivers/net/ixgbe/ixgbe_main.c +++ b/drivers/net/ixgbe/ixgbe_main.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************************* | 1 | /******************************************************************************* |
2 | 2 | ||
3 | Intel 10 Gigabit PCI Express Linux driver | 3 | Intel 10 Gigabit PCI Express Linux driver |
4 | Copyright(c) 1999 - 2008 Intel Corporation. | 4 | Copyright(c) 1999 - 2009 Intel Corporation. |
5 | 5 | ||
6 | This program is free software; you can redistribute it and/or modify it | 6 | This program is free software; you can redistribute it and/or modify it |
7 | under the terms and conditions of the GNU General Public License, | 7 | under the terms and conditions of the GNU General Public License, |
@@ -47,9 +47,9 @@ char ixgbe_driver_name[] = "ixgbe"; | |||
47 | static const char ixgbe_driver_string[] = | 47 | static const char ixgbe_driver_string[] = |
48 | "Intel(R) 10 Gigabit PCI Express Network Driver"; | 48 | "Intel(R) 10 Gigabit PCI Express Network Driver"; |
49 | 49 | ||
50 | #define DRV_VERSION "1.3.30-k2" | 50 | #define DRV_VERSION "1.3.56-k2" |
51 | const char ixgbe_driver_version[] = DRV_VERSION; | 51 | const char ixgbe_driver_version[] = DRV_VERSION; |
52 | static char ixgbe_copyright[] = "Copyright (c) 1999-2007 Intel Corporation."; | 52 | static char ixgbe_copyright[] = "Copyright (c) 1999-2009 Intel Corporation."; |
53 | 53 | ||
54 | static const struct ixgbe_info *ixgbe_info_tbl[] = { | 54 | static const struct ixgbe_info *ixgbe_info_tbl[] = { |
55 | [board_82598] = &ixgbe_82598_info, | 55 | [board_82598] = &ixgbe_82598_info, |
@@ -64,6 +64,8 @@ static const struct ixgbe_info *ixgbe_info_tbl[] = { | |||
64 | * Class, Class Mask, private data (not used) } | 64 | * Class, Class Mask, private data (not used) } |
65 | */ | 65 | */ |
66 | static struct pci_device_id ixgbe_pci_tbl[] = { | 66 | static struct pci_device_id ixgbe_pci_tbl[] = { |
67 | {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598), | ||
68 | board_82598 }, | ||
67 | {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_DUAL_PORT), | 69 | {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_DUAL_PORT), |
68 | board_82598 }, | 70 | board_82598 }, |
69 | {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_SINGLE_PORT), | 71 | {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_SINGLE_PORT), |
@@ -82,6 +84,8 @@ static struct pci_device_id ixgbe_pci_tbl[] = { | |||
82 | board_82598 }, | 84 | board_82598 }, |
83 | {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_SFP_LOM), | 85 | {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_SFP_LOM), |
84 | board_82598 }, | 86 | board_82598 }, |
87 | {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_BX), | ||
88 | board_82598 }, | ||
85 | 89 | ||
86 | /* required last entry */ | 90 | /* required last entry */ |
87 | {0, } | 91 | {0, } |
@@ -403,23 +407,21 @@ static int __ixgbe_notify_dca(struct device *dev, void *data) | |||
403 | * @rx_ring: rx descriptor ring (for a specific queue) to setup | 407 | * @rx_ring: rx descriptor ring (for a specific queue) to setup |
404 | * @rx_desc: rx descriptor | 408 | * @rx_desc: rx descriptor |
405 | **/ | 409 | **/ |
406 | static void ixgbe_receive_skb(struct ixgbe_adapter *adapter, | 410 | static void ixgbe_receive_skb(struct ixgbe_q_vector *q_vector, |
407 | struct sk_buff *skb, u8 status, | 411 | struct sk_buff *skb, u8 status, |
408 | struct ixgbe_ring *ring, | ||
409 | union ixgbe_adv_rx_desc *rx_desc) | 412 | union ixgbe_adv_rx_desc *rx_desc) |
410 | { | 413 | { |
414 | struct ixgbe_adapter *adapter = q_vector->adapter; | ||
415 | struct napi_struct *napi = &q_vector->napi; | ||
411 | bool is_vlan = (status & IXGBE_RXD_STAT_VP); | 416 | bool is_vlan = (status & IXGBE_RXD_STAT_VP); |
412 | u16 tag = le16_to_cpu(rx_desc->wb.upper.vlan); | 417 | u16 tag = le16_to_cpu(rx_desc->wb.upper.vlan); |
413 | 418 | ||
414 | if (adapter->netdev->features & NETIF_F_LRO && | 419 | skb_record_rx_queue(skb, q_vector - &adapter->q_vector[0]); |
415 | skb->ip_summed == CHECKSUM_UNNECESSARY) { | 420 | if (skb->ip_summed == CHECKSUM_UNNECESSARY) { |
416 | if (adapter->vlgrp && is_vlan && (tag != 0)) | 421 | if (adapter->vlgrp && is_vlan && (tag != 0)) |
417 | lro_vlan_hwaccel_receive_skb(&ring->lro_mgr, skb, | 422 | vlan_gro_receive(napi, adapter->vlgrp, tag, skb); |
418 | adapter->vlgrp, tag, | ||
419 | rx_desc); | ||
420 | else | 423 | else |
421 | lro_receive_skb(&ring->lro_mgr, skb, rx_desc); | 424 | napi_gro_receive(napi, skb); |
422 | ring->lro_used = true; | ||
423 | } else { | 425 | } else { |
424 | if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL)) { | 426 | if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL)) { |
425 | if (adapter->vlgrp && is_vlan && (tag != 0)) | 427 | if (adapter->vlgrp && is_vlan && (tag != 0)) |
@@ -574,10 +576,11 @@ static inline u16 ixgbe_get_pkt_info(union ixgbe_adv_rx_desc *rx_desc) | |||
574 | return rx_desc->wb.lower.lo_dword.hs_rss.pkt_info; | 576 | return rx_desc->wb.lower.lo_dword.hs_rss.pkt_info; |
575 | } | 577 | } |
576 | 578 | ||
577 | static bool ixgbe_clean_rx_irq(struct ixgbe_adapter *adapter, | 579 | static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, |
578 | struct ixgbe_ring *rx_ring, | 580 | struct ixgbe_ring *rx_ring, |
579 | int *work_done, int work_to_do) | 581 | int *work_done, int work_to_do) |
580 | { | 582 | { |
583 | struct ixgbe_adapter *adapter = q_vector->adapter; | ||
581 | struct pci_dev *pdev = adapter->pdev; | 584 | struct pci_dev *pdev = adapter->pdev; |
582 | union ixgbe_adv_rx_desc *rx_desc, *next_rxd; | 585 | union ixgbe_adv_rx_desc *rx_desc, *next_rxd; |
583 | struct ixgbe_rx_buffer *rx_buffer_info, *next_buffer; | 586 | struct ixgbe_rx_buffer *rx_buffer_info, *next_buffer; |
@@ -678,7 +681,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_adapter *adapter, | |||
678 | total_rx_packets++; | 681 | total_rx_packets++; |
679 | 682 | ||
680 | skb->protocol = eth_type_trans(skb, adapter->netdev); | 683 | skb->protocol = eth_type_trans(skb, adapter->netdev); |
681 | ixgbe_receive_skb(adapter, skb, staterr, rx_ring, rx_desc); | 684 | ixgbe_receive_skb(q_vector, skb, staterr, rx_desc); |
682 | 685 | ||
683 | next_desc: | 686 | next_desc: |
684 | rx_desc->wb.upper.status_error = 0; | 687 | rx_desc->wb.upper.status_error = 0; |
@@ -696,11 +699,6 @@ next_desc: | |||
696 | staterr = le32_to_cpu(rx_desc->wb.upper.status_error); | 699 | staterr = le32_to_cpu(rx_desc->wb.upper.status_error); |
697 | } | 700 | } |
698 | 701 | ||
699 | if (rx_ring->lro_used) { | ||
700 | lro_flush_all(&rx_ring->lro_mgr); | ||
701 | rx_ring->lro_used = false; | ||
702 | } | ||
703 | |||
704 | rx_ring->next_to_clean = i; | 702 | rx_ring->next_to_clean = i; |
705 | cleaned_count = IXGBE_DESC_UNUSED(rx_ring); | 703 | cleaned_count = IXGBE_DESC_UNUSED(rx_ring); |
706 | 704 | ||
@@ -1015,7 +1013,7 @@ static irqreturn_t ixgbe_msix_clean_rx(int irq, void *data) | |||
1015 | rx_ring = &(adapter->rx_ring[r_idx]); | 1013 | rx_ring = &(adapter->rx_ring[r_idx]); |
1016 | /* disable interrupts on this vector only */ | 1014 | /* disable interrupts on this vector only */ |
1017 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, rx_ring->v_idx); | 1015 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, rx_ring->v_idx); |
1018 | netif_rx_schedule(&q_vector->napi); | 1016 | napi_schedule(&q_vector->napi); |
1019 | 1017 | ||
1020 | return IRQ_HANDLED; | 1018 | return IRQ_HANDLED; |
1021 | } | 1019 | } |
@@ -1052,11 +1050,11 @@ static int ixgbe_clean_rxonly(struct napi_struct *napi, int budget) | |||
1052 | ixgbe_update_rx_dca(adapter, rx_ring); | 1050 | ixgbe_update_rx_dca(adapter, rx_ring); |
1053 | #endif | 1051 | #endif |
1054 | 1052 | ||
1055 | ixgbe_clean_rx_irq(adapter, rx_ring, &work_done, budget); | 1053 | ixgbe_clean_rx_irq(q_vector, rx_ring, &work_done, budget); |
1056 | 1054 | ||
1057 | /* If all Rx work done, exit the polling mode */ | 1055 | /* If all Rx work done, exit the polling mode */ |
1058 | if (work_done < budget) { | 1056 | if (work_done < budget) { |
1059 | netif_rx_complete(napi); | 1057 | napi_complete(napi); |
1060 | if (adapter->itr_setting & 3) | 1058 | if (adapter->itr_setting & 3) |
1061 | ixgbe_set_itr_msix(q_vector); | 1059 | ixgbe_set_itr_msix(q_vector); |
1062 | if (!test_bit(__IXGBE_DOWN, &adapter->state)) | 1060 | if (!test_bit(__IXGBE_DOWN, &adapter->state)) |
@@ -1095,7 +1093,7 @@ static int ixgbe_clean_rxonly_many(struct napi_struct *napi, int budget) | |||
1095 | if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) | 1093 | if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) |
1096 | ixgbe_update_rx_dca(adapter, rx_ring); | 1094 | ixgbe_update_rx_dca(adapter, rx_ring); |
1097 | #endif | 1095 | #endif |
1098 | ixgbe_clean_rx_irq(adapter, rx_ring, &work_done, budget); | 1096 | ixgbe_clean_rx_irq(q_vector, rx_ring, &work_done, budget); |
1099 | enable_mask |= rx_ring->v_idx; | 1097 | enable_mask |= rx_ring->v_idx; |
1100 | r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues, | 1098 | r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues, |
1101 | r_idx + 1); | 1099 | r_idx + 1); |
@@ -1105,7 +1103,7 @@ static int ixgbe_clean_rxonly_many(struct napi_struct *napi, int budget) | |||
1105 | rx_ring = &(adapter->rx_ring[r_idx]); | 1103 | rx_ring = &(adapter->rx_ring[r_idx]); |
1106 | /* If all Rx work done, exit the polling mode */ | 1104 | /* If all Rx work done, exit the polling mode */ |
1107 | if (work_done < budget) { | 1105 | if (work_done < budget) { |
1108 | netif_rx_complete(napi); | 1106 | napi_complete(napi); |
1109 | if (adapter->itr_setting & 3) | 1107 | if (adapter->itr_setting & 3) |
1110 | ixgbe_set_itr_msix(q_vector); | 1108 | ixgbe_set_itr_msix(q_vector); |
1111 | if (!test_bit(__IXGBE_DOWN, &adapter->state)) | 1109 | if (!test_bit(__IXGBE_DOWN, &adapter->state)) |
@@ -1381,13 +1379,13 @@ static irqreturn_t ixgbe_intr(int irq, void *data) | |||
1381 | 1379 | ||
1382 | ixgbe_check_fan_failure(adapter, eicr); | 1380 | ixgbe_check_fan_failure(adapter, eicr); |
1383 | 1381 | ||
1384 | if (netif_rx_schedule_prep(&adapter->q_vector[0].napi)) { | 1382 | if (napi_schedule_prep(&adapter->q_vector[0].napi)) { |
1385 | adapter->tx_ring[0].total_packets = 0; | 1383 | adapter->tx_ring[0].total_packets = 0; |
1386 | adapter->tx_ring[0].total_bytes = 0; | 1384 | adapter->tx_ring[0].total_bytes = 0; |
1387 | adapter->rx_ring[0].total_packets = 0; | 1385 | adapter->rx_ring[0].total_packets = 0; |
1388 | adapter->rx_ring[0].total_bytes = 0; | 1386 | adapter->rx_ring[0].total_bytes = 0; |
1389 | /* would disable interrupts here but EIAM disabled it */ | 1387 | /* would disable interrupts here but EIAM disabled it */ |
1390 | __netif_rx_schedule(&adapter->q_vector[0].napi); | 1388 | __napi_schedule(&adapter->q_vector[0].napi); |
1391 | } | 1389 | } |
1392 | 1390 | ||
1393 | return IRQ_HANDLED; | 1391 | return IRQ_HANDLED; |
@@ -1568,33 +1566,6 @@ static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter, int index) | |||
1568 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_SRRCTL(index), srrctl); | 1566 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_SRRCTL(index), srrctl); |
1569 | } | 1567 | } |
1570 | 1568 | ||
1571 | /** | ||
1572 | * ixgbe_get_skb_hdr - helper function for LRO header processing | ||
1573 | * @skb: pointer to sk_buff to be added to LRO packet | ||
1574 | * @iphdr: pointer to ip header structure | ||
1575 | * @tcph: pointer to tcp header structure | ||
1576 | * @hdr_flags: pointer to header flags | ||
1577 | * @priv: private data | ||
1578 | **/ | ||
1579 | static int ixgbe_get_skb_hdr(struct sk_buff *skb, void **iphdr, void **tcph, | ||
1580 | u64 *hdr_flags, void *priv) | ||
1581 | { | ||
1582 | union ixgbe_adv_rx_desc *rx_desc = priv; | ||
1583 | |||
1584 | /* Verify that this is a valid IPv4 TCP packet */ | ||
1585 | if (!((ixgbe_get_pkt_info(rx_desc) & IXGBE_RXDADV_PKTTYPE_IPV4) && | ||
1586 | (ixgbe_get_pkt_info(rx_desc) & IXGBE_RXDADV_PKTTYPE_TCP))) | ||
1587 | return -1; | ||
1588 | |||
1589 | /* Set network headers */ | ||
1590 | skb_reset_network_header(skb); | ||
1591 | skb_set_transport_header(skb, ip_hdrlen(skb)); | ||
1592 | *iphdr = ip_hdr(skb); | ||
1593 | *tcph = tcp_hdr(skb); | ||
1594 | *hdr_flags = LRO_IPV4 | LRO_TCP; | ||
1595 | return 0; | ||
1596 | } | ||
1597 | |||
1598 | #define PAGE_USE_COUNT(S) (((S) >> PAGE_SHIFT) + \ | 1569 | #define PAGE_USE_COUNT(S) (((S) >> PAGE_SHIFT) + \ |
1599 | (((S) & (PAGE_SIZE - 1)) ? 1 : 0)) | 1570 | (((S) & (PAGE_SIZE - 1)) ? 1 : 0)) |
1600 | 1571 | ||
@@ -1666,16 +1637,6 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter) | |||
1666 | adapter->rx_ring[i].head = IXGBE_RDH(j); | 1637 | adapter->rx_ring[i].head = IXGBE_RDH(j); |
1667 | adapter->rx_ring[i].tail = IXGBE_RDT(j); | 1638 | adapter->rx_ring[i].tail = IXGBE_RDT(j); |
1668 | adapter->rx_ring[i].rx_buf_len = rx_buf_len; | 1639 | adapter->rx_ring[i].rx_buf_len = rx_buf_len; |
1669 | /* Intitial LRO Settings */ | ||
1670 | adapter->rx_ring[i].lro_mgr.max_aggr = IXGBE_MAX_LRO_AGGREGATE; | ||
1671 | adapter->rx_ring[i].lro_mgr.max_desc = IXGBE_MAX_LRO_DESCRIPTORS; | ||
1672 | adapter->rx_ring[i].lro_mgr.get_skb_header = ixgbe_get_skb_hdr; | ||
1673 | adapter->rx_ring[i].lro_mgr.features = LRO_F_EXTRACT_VLAN_ID; | ||
1674 | if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL)) | ||
1675 | adapter->rx_ring[i].lro_mgr.features |= LRO_F_NAPI; | ||
1676 | adapter->rx_ring[i].lro_mgr.dev = adapter->netdev; | ||
1677 | adapter->rx_ring[i].lro_mgr.ip_summed = CHECKSUM_UNNECESSARY; | ||
1678 | adapter->rx_ring[i].lro_mgr.ip_summed_aggr = CHECKSUM_UNNECESSARY; | ||
1679 | 1640 | ||
1680 | ixgbe_configure_srrctl(adapter, j); | 1641 | ixgbe_configure_srrctl(adapter, j); |
1681 | } | 1642 | } |
@@ -2310,14 +2271,14 @@ static int ixgbe_poll(struct napi_struct *napi, int budget) | |||
2310 | #endif | 2271 | #endif |
2311 | 2272 | ||
2312 | tx_cleaned = ixgbe_clean_tx_irq(adapter, adapter->tx_ring); | 2273 | tx_cleaned = ixgbe_clean_tx_irq(adapter, adapter->tx_ring); |
2313 | ixgbe_clean_rx_irq(adapter, adapter->rx_ring, &work_done, budget); | 2274 | ixgbe_clean_rx_irq(q_vector, adapter->rx_ring, &work_done, budget); |
2314 | 2275 | ||
2315 | if (tx_cleaned) | 2276 | if (tx_cleaned) |
2316 | work_done = budget; | 2277 | work_done = budget; |
2317 | 2278 | ||
2318 | /* If budget not fully consumed, exit the polling mode */ | 2279 | /* If budget not fully consumed, exit the polling mode */ |
2319 | if (work_done < budget) { | 2280 | if (work_done < budget) { |
2320 | netif_rx_complete(napi); | 2281 | napi_complete(napi); |
2321 | if (adapter->itr_setting & 3) | 2282 | if (adapter->itr_setting & 3) |
2322 | ixgbe_set_itr(adapter); | 2283 | ixgbe_set_itr(adapter); |
2323 | if (!test_bit(__IXGBE_DOWN, &adapter->state)) | 2284 | if (!test_bit(__IXGBE_DOWN, &adapter->state)) |
@@ -2460,7 +2421,13 @@ static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter, | |||
2460 | ixgbe_set_num_queues(adapter); | 2421 | ixgbe_set_num_queues(adapter); |
2461 | } else { | 2422 | } else { |
2462 | adapter->flags |= IXGBE_FLAG_MSIX_ENABLED; /* Woot! */ | 2423 | adapter->flags |= IXGBE_FLAG_MSIX_ENABLED; /* Woot! */ |
2463 | adapter->num_msix_vectors = vectors; | 2424 | /* |
2425 | * Adjust for only the vectors we'll use, which is minimum | ||
2426 | * of max_msix_q_vectors + NON_Q_VECTORS, or the number of | ||
2427 | * vectors we were allocated. | ||
2428 | */ | ||
2429 | adapter->num_msix_vectors = min(vectors, | ||
2430 | adapter->max_msix_q_vectors + NON_Q_VECTORS); | ||
2464 | } | 2431 | } |
2465 | } | 2432 | } |
2466 | 2433 | ||
@@ -2785,6 +2752,7 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter) | |||
2785 | adapter->ring_feature[RING_F_RSS].indices = rss; | 2752 | adapter->ring_feature[RING_F_RSS].indices = rss; |
2786 | adapter->flags |= IXGBE_FLAG_RSS_ENABLED; | 2753 | adapter->flags |= IXGBE_FLAG_RSS_ENABLED; |
2787 | adapter->ring_feature[RING_F_DCB].indices = IXGBE_MAX_DCB_INDICES; | 2754 | adapter->ring_feature[RING_F_DCB].indices = IXGBE_MAX_DCB_INDICES; |
2755 | adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82598; | ||
2788 | 2756 | ||
2789 | #ifdef CONFIG_IXGBE_DCB | 2757 | #ifdef CONFIG_IXGBE_DCB |
2790 | /* Configure DCB traffic classes */ | 2758 | /* Configure DCB traffic classes */ |
@@ -2926,12 +2894,6 @@ int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter, | |||
2926 | struct pci_dev *pdev = adapter->pdev; | 2894 | struct pci_dev *pdev = adapter->pdev; |
2927 | int size; | 2895 | int size; |
2928 | 2896 | ||
2929 | size = sizeof(struct net_lro_desc) * IXGBE_MAX_LRO_DESCRIPTORS; | ||
2930 | rx_ring->lro_mgr.lro_arr = vmalloc(size); | ||
2931 | if (!rx_ring->lro_mgr.lro_arr) | ||
2932 | return -ENOMEM; | ||
2933 | memset(rx_ring->lro_mgr.lro_arr, 0, size); | ||
2934 | |||
2935 | size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count; | 2897 | size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count; |
2936 | rx_ring->rx_buffer_info = vmalloc(size); | 2898 | rx_ring->rx_buffer_info = vmalloc(size); |
2937 | if (!rx_ring->rx_buffer_info) { | 2899 | if (!rx_ring->rx_buffer_info) { |
@@ -2960,8 +2922,6 @@ int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter, | |||
2960 | return 0; | 2922 | return 0; |
2961 | 2923 | ||
2962 | alloc_failed: | 2924 | alloc_failed: |
2963 | vfree(rx_ring->lro_mgr.lro_arr); | ||
2964 | rx_ring->lro_mgr.lro_arr = NULL; | ||
2965 | return -ENOMEM; | 2925 | return -ENOMEM; |
2966 | } | 2926 | } |
2967 | 2927 | ||
@@ -3039,9 +2999,6 @@ void ixgbe_free_rx_resources(struct ixgbe_adapter *adapter, | |||
3039 | { | 2999 | { |
3040 | struct pci_dev *pdev = adapter->pdev; | 3000 | struct pci_dev *pdev = adapter->pdev; |
3041 | 3001 | ||
3042 | vfree(rx_ring->lro_mgr.lro_arr); | ||
3043 | rx_ring->lro_mgr.lro_arr = NULL; | ||
3044 | |||
3045 | ixgbe_clean_rx_ring(adapter, rx_ring); | 3002 | ixgbe_clean_rx_ring(adapter, rx_ring); |
3046 | 3003 | ||
3047 | vfree(rx_ring->rx_buffer_info); | 3004 | vfree(rx_ring->rx_buffer_info); |
@@ -3619,13 +3576,13 @@ static bool ixgbe_tx_csum(struct ixgbe_adapter *adapter, | |||
3619 | 3576 | ||
3620 | if (skb->ip_summed == CHECKSUM_PARTIAL) { | 3577 | if (skb->ip_summed == CHECKSUM_PARTIAL) { |
3621 | switch (skb->protocol) { | 3578 | switch (skb->protocol) { |
3622 | case __constant_htons(ETH_P_IP): | 3579 | case cpu_to_be16(ETH_P_IP): |
3623 | type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4; | 3580 | type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4; |
3624 | if (ip_hdr(skb)->protocol == IPPROTO_TCP) | 3581 | if (ip_hdr(skb)->protocol == IPPROTO_TCP) |
3625 | type_tucmd_mlhl |= | 3582 | type_tucmd_mlhl |= |
3626 | IXGBE_ADVTXD_TUCMD_L4T_TCP; | 3583 | IXGBE_ADVTXD_TUCMD_L4T_TCP; |
3627 | break; | 3584 | break; |
3628 | case __constant_htons(ETH_P_IPV6): | 3585 | case cpu_to_be16(ETH_P_IPV6): |
3629 | /* XXX what about other V6 headers?? */ | 3586 | /* XXX what about other V6 headers?? */ |
3630 | if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) | 3587 | if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) |
3631 | type_tucmd_mlhl |= | 3588 | type_tucmd_mlhl |= |
@@ -3956,16 +3913,27 @@ static void ixgbe_netpoll(struct net_device *netdev) | |||
3956 | **/ | 3913 | **/ |
3957 | static int ixgbe_link_config(struct ixgbe_hw *hw) | 3914 | static int ixgbe_link_config(struct ixgbe_hw *hw) |
3958 | { | 3915 | { |
3959 | u32 autoneg = IXGBE_LINK_SPEED_10GB_FULL; | 3916 | u32 autoneg; |
3917 | bool link_up = false; | ||
3918 | u32 ret = IXGBE_ERR_LINK_SETUP; | ||
3919 | |||
3920 | if (hw->mac.ops.check_link) | ||
3921 | ret = hw->mac.ops.check_link(hw, &autoneg, &link_up, false); | ||
3960 | 3922 | ||
3961 | /* must always autoneg for both 1G and 10G link */ | 3923 | if (ret || !link_up) |
3962 | hw->mac.autoneg = true; | 3924 | goto link_cfg_out; |
3963 | 3925 | ||
3964 | if ((hw->mac.type == ixgbe_mac_82598EB) && | 3926 | if (hw->mac.ops.get_link_capabilities) |
3965 | (hw->phy.media_type == ixgbe_media_type_copper)) | 3927 | ret = hw->mac.ops.get_link_capabilities(hw, &autoneg, |
3966 | autoneg = IXGBE_LINK_SPEED_82598_AUTONEG; | 3928 | &hw->mac.autoneg); |
3929 | if (ret) | ||
3930 | goto link_cfg_out; | ||
3967 | 3931 | ||
3968 | return hw->mac.ops.setup_link_speed(hw, autoneg, true, true); | 3932 | if (hw->mac.ops.setup_link_speed) |
3933 | ret = hw->mac.ops.setup_link_speed(hw, autoneg, true, true); | ||
3934 | |||
3935 | link_cfg_out: | ||
3936 | return ret; | ||
3969 | } | 3937 | } |
3970 | 3938 | ||
3971 | static const struct net_device_ops ixgbe_netdev_ops = { | 3939 | static const struct net_device_ops ixgbe_netdev_ops = { |
@@ -4141,7 +4109,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, | |||
4141 | netdev->features |= NETIF_F_IPV6_CSUM; | 4109 | netdev->features |= NETIF_F_IPV6_CSUM; |
4142 | netdev->features |= NETIF_F_TSO; | 4110 | netdev->features |= NETIF_F_TSO; |
4143 | netdev->features |= NETIF_F_TSO6; | 4111 | netdev->features |= NETIF_F_TSO6; |
4144 | netdev->features |= NETIF_F_LRO; | 4112 | netdev->features |= NETIF_F_GRO; |
4145 | 4113 | ||
4146 | netdev->vlan_features |= NETIF_F_TSO; | 4114 | netdev->vlan_features |= NETIF_F_TSO; |
4147 | netdev->vlan_features |= NETIF_F_TSO6; | 4115 | netdev->vlan_features |= NETIF_F_TSO6; |
diff --git a/drivers/net/ixgbe/ixgbe_phy.c b/drivers/net/ixgbe/ixgbe_phy.c index 5a8669aedf64..77ec26f5650a 100644 --- a/drivers/net/ixgbe/ixgbe_phy.c +++ b/drivers/net/ixgbe/ixgbe_phy.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************************* | 1 | /******************************************************************************* |
2 | 2 | ||
3 | Intel 10 Gigabit PCI Express Linux driver | 3 | Intel 10 Gigabit PCI Express Linux driver |
4 | Copyright(c) 1999 - 2008 Intel Corporation. | 4 | Copyright(c) 1999 - 2009 Intel Corporation. |
5 | 5 | ||
6 | This program is free software; you can redistribute it and/or modify it | 6 | This program is free software; you can redistribute it and/or modify it |
7 | under the terms and conditions of the GNU General Public License, | 7 | under the terms and conditions of the GNU General Public License, |
diff --git a/drivers/net/ixgbe/ixgbe_phy.h b/drivers/net/ixgbe/ixgbe_phy.h index 43a97bc420f5..539a3061eb29 100644 --- a/drivers/net/ixgbe/ixgbe_phy.h +++ b/drivers/net/ixgbe/ixgbe_phy.h | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************************* | 1 | /******************************************************************************* |
2 | 2 | ||
3 | Intel 10 Gigabit PCI Express Linux driver | 3 | Intel 10 Gigabit PCI Express Linux driver |
4 | Copyright(c) 1999 - 2008 Intel Corporation. | 4 | Copyright(c) 1999 - 2009 Intel Corporation. |
5 | 5 | ||
6 | This program is free software; you can redistribute it and/or modify it | 6 | This program is free software; you can redistribute it and/or modify it |
7 | under the terms and conditions of the GNU General Public License, | 7 | under the terms and conditions of the GNU General Public License, |
diff --git a/drivers/net/ixgbe/ixgbe_type.h b/drivers/net/ixgbe/ixgbe_type.h index f011c57c9205..c49ba8a17f1b 100644 --- a/drivers/net/ixgbe/ixgbe_type.h +++ b/drivers/net/ixgbe/ixgbe_type.h | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************************* | 1 | /******************************************************************************* |
2 | 2 | ||
3 | Intel 10 Gigabit PCI Express Linux driver | 3 | Intel 10 Gigabit PCI Express Linux driver |
4 | Copyright(c) 1999 - 2008 Intel Corporation. | 4 | Copyright(c) 1999 - 2009 Intel Corporation. |
5 | 5 | ||
6 | This program is free software; you can redistribute it and/or modify it | 6 | This program is free software; you can redistribute it and/or modify it |
7 | under the terms and conditions of the GNU General Public License, | 7 | under the terms and conditions of the GNU General Public License, |
@@ -34,6 +34,8 @@ | |||
34 | #define IXGBE_INTEL_VENDOR_ID 0x8086 | 34 | #define IXGBE_INTEL_VENDOR_ID 0x8086 |
35 | 35 | ||
36 | /* Device IDs */ | 36 | /* Device IDs */ |
37 | #define IXGBE_DEV_ID_82598 0x10B6 | ||
38 | #define IXGBE_DEV_ID_82598_BX 0x1508 | ||
37 | #define IXGBE_DEV_ID_82598AF_DUAL_PORT 0x10C6 | 39 | #define IXGBE_DEV_ID_82598AF_DUAL_PORT 0x10C6 |
38 | #define IXGBE_DEV_ID_82598AF_SINGLE_PORT 0x10C7 | 40 | #define IXGBE_DEV_ID_82598AF_SINGLE_PORT 0x10C7 |
39 | #define IXGBE_DEV_ID_82598EB_SFP_LOM 0x10DB | 41 | #define IXGBE_DEV_ID_82598EB_SFP_LOM 0x10DB |
@@ -819,6 +821,10 @@ | |||
819 | #define IXGBE_FW_PTR 0x0F | 821 | #define IXGBE_FW_PTR 0x0F |
820 | #define IXGBE_PBANUM0_PTR 0x15 | 822 | #define IXGBE_PBANUM0_PTR 0x15 |
821 | #define IXGBE_PBANUM1_PTR 0x16 | 823 | #define IXGBE_PBANUM1_PTR 0x16 |
824 | #define IXGBE_PCIE_MSIX_82598_CAPS 0x62 | ||
825 | |||
826 | /* MSI-X capability fields masks */ | ||
827 | #define IXGBE_PCIE_MSIX_TBL_SZ_MASK 0x7FF | ||
822 | 828 | ||
823 | /* Legacy EEPROM word offsets */ | 829 | /* Legacy EEPROM word offsets */ |
824 | #define IXGBE_ISCSI_BOOT_CAPS 0x0033 | 830 | #define IXGBE_ISCSI_BOOT_CAPS 0x0033 |
@@ -1449,6 +1455,7 @@ struct ixgbe_mac_info { | |||
1449 | u32 num_rar_entries; | 1455 | u32 num_rar_entries; |
1450 | u32 max_tx_queues; | 1456 | u32 max_tx_queues; |
1451 | u32 max_rx_queues; | 1457 | u32 max_rx_queues; |
1458 | u32 max_msix_vectors; | ||
1452 | u32 link_attach_type; | 1459 | u32 link_attach_type; |
1453 | u32 link_mode_select; | 1460 | u32 link_mode_select; |
1454 | bool link_settings_loaded; | 1461 | bool link_settings_loaded; |
diff --git a/drivers/net/ixp2000/ixpdev.c b/drivers/net/ixp2000/ixpdev.c index 014745720560..d3bf2f017cc2 100644 --- a/drivers/net/ixp2000/ixpdev.c +++ b/drivers/net/ixp2000/ixpdev.c | |||
@@ -141,7 +141,7 @@ static int ixpdev_poll(struct napi_struct *napi, int budget) | |||
141 | break; | 141 | break; |
142 | } while (ixp2000_reg_read(IXP2000_IRQ_THD_RAW_STATUS_A_0) & 0x00ff); | 142 | } while (ixp2000_reg_read(IXP2000_IRQ_THD_RAW_STATUS_A_0) & 0x00ff); |
143 | 143 | ||
144 | netif_rx_complete(napi); | 144 | napi_complete(napi); |
145 | ixp2000_reg_write(IXP2000_IRQ_THD_ENABLE_SET_A_0, 0x00ff); | 145 | ixp2000_reg_write(IXP2000_IRQ_THD_ENABLE_SET_A_0, 0x00ff); |
146 | 146 | ||
147 | return rx; | 147 | return rx; |
@@ -204,7 +204,7 @@ static irqreturn_t ixpdev_interrupt(int irq, void *dev_id) | |||
204 | 204 | ||
205 | ixp2000_reg_wrb(IXP2000_IRQ_THD_ENABLE_CLEAR_A_0, 0x00ff); | 205 | ixp2000_reg_wrb(IXP2000_IRQ_THD_ENABLE_CLEAR_A_0, 0x00ff); |
206 | if (likely(napi_schedule_prep(&ip->napi))) { | 206 | if (likely(napi_schedule_prep(&ip->napi))) { |
207 | __netif_rx_schedule(&ip->napi); | 207 | __napi_schedule(&ip->napi); |
208 | } else { | 208 | } else { |
209 | printk(KERN_CRIT "ixp2000: irq while polling!!\n"); | 209 | printk(KERN_CRIT "ixp2000: irq while polling!!\n"); |
210 | } | 210 | } |
diff --git a/drivers/net/jazzsonic.c b/drivers/net/jazzsonic.c index 334ff9e12cdd..14248cfc3dfd 100644 --- a/drivers/net/jazzsonic.c +++ b/drivers/net/jazzsonic.c | |||
@@ -131,7 +131,8 @@ static int __init sonic_probe1(struct net_device *dev) | |||
131 | if (sonic_debug && version_printed++ == 0) | 131 | if (sonic_debug && version_printed++ == 0) |
132 | printk(version); | 132 | printk(version); |
133 | 133 | ||
134 | printk(KERN_INFO "%s: Sonic ethernet found at 0x%08lx, ", lp->device->bus_id, dev->base_addr); | 134 | printk(KERN_INFO "%s: Sonic ethernet found at 0x%08lx, ", |
135 | dev_name(lp->device), dev->base_addr); | ||
135 | 136 | ||
136 | /* | 137 | /* |
137 | * Put the sonic into software reset, then | 138 | * Put the sonic into software reset, then |
@@ -156,7 +157,8 @@ static int __init sonic_probe1(struct net_device *dev) | |||
156 | if ((lp->descriptors = dma_alloc_coherent(lp->device, | 157 | if ((lp->descriptors = dma_alloc_coherent(lp->device, |
157 | SIZEOF_SONIC_DESC * SONIC_BUS_SCALE(lp->dma_bitmode), | 158 | SIZEOF_SONIC_DESC * SONIC_BUS_SCALE(lp->dma_bitmode), |
158 | &lp->descriptors_laddr, GFP_KERNEL)) == NULL) { | 159 | &lp->descriptors_laddr, GFP_KERNEL)) == NULL) { |
159 | printk(KERN_ERR "%s: couldn't alloc DMA memory for descriptors.\n", lp->device->bus_id); | 160 | printk(KERN_ERR "%s: couldn't alloc DMA memory for descriptors.\n", |
161 | dev_name(lp->device)); | ||
160 | goto out; | 162 | goto out; |
161 | } | 163 | } |
162 | 164 | ||
diff --git a/drivers/net/jme.h b/drivers/net/jme.h index 5154411b5e6b..e321c678b11c 100644 --- a/drivers/net/jme.h +++ b/drivers/net/jme.h | |||
@@ -398,15 +398,15 @@ struct jme_ring { | |||
398 | #define JME_NAPI_WEIGHT(w) int w | 398 | #define JME_NAPI_WEIGHT(w) int w |
399 | #define JME_NAPI_WEIGHT_VAL(w) w | 399 | #define JME_NAPI_WEIGHT_VAL(w) w |
400 | #define JME_NAPI_WEIGHT_SET(w, r) | 400 | #define JME_NAPI_WEIGHT_SET(w, r) |
401 | #define JME_RX_COMPLETE(dev, napis) netif_rx_complete(napis) | 401 | #define JME_RX_COMPLETE(dev, napis) napi_complete(napis) |
402 | #define JME_NAPI_ENABLE(priv) napi_enable(&priv->napi); | 402 | #define JME_NAPI_ENABLE(priv) napi_enable(&priv->napi); |
403 | #define JME_NAPI_DISABLE(priv) \ | 403 | #define JME_NAPI_DISABLE(priv) \ |
404 | if (!napi_disable_pending(&priv->napi)) \ | 404 | if (!napi_disable_pending(&priv->napi)) \ |
405 | napi_disable(&priv->napi); | 405 | napi_disable(&priv->napi); |
406 | #define JME_RX_SCHEDULE_PREP(priv) \ | 406 | #define JME_RX_SCHEDULE_PREP(priv) \ |
407 | netif_rx_schedule_prep(&priv->napi) | 407 | napi_schedule_prep(&priv->napi) |
408 | #define JME_RX_SCHEDULE(priv) \ | 408 | #define JME_RX_SCHEDULE(priv) \ |
409 | __netif_rx_schedule(&priv->napi); | 409 | __napi_schedule(&priv->napi); |
410 | 410 | ||
411 | /* | 411 | /* |
412 | * Jmac Adapter Private data | 412 | * Jmac Adapter Private data |
diff --git a/drivers/net/korina.c b/drivers/net/korina.c index 75010cac76ac..38d6649a29c4 100644 --- a/drivers/net/korina.c +++ b/drivers/net/korina.c | |||
@@ -334,7 +334,7 @@ static irqreturn_t korina_rx_dma_interrupt(int irq, void *dev_id) | |||
334 | DMA_STAT_HALT | DMA_STAT_ERR), | 334 | DMA_STAT_HALT | DMA_STAT_ERR), |
335 | &lp->rx_dma_regs->dmasm); | 335 | &lp->rx_dma_regs->dmasm); |
336 | 336 | ||
337 | netif_rx_schedule(&lp->napi); | 337 | napi_schedule(&lp->napi); |
338 | 338 | ||
339 | if (dmas & DMA_STAT_ERR) | 339 | if (dmas & DMA_STAT_ERR) |
340 | printk(KERN_ERR DRV_NAME "%s: DMA error\n", dev->name); | 340 | printk(KERN_ERR DRV_NAME "%s: DMA error\n", dev->name); |
@@ -468,7 +468,7 @@ static int korina_poll(struct napi_struct *napi, int budget) | |||
468 | 468 | ||
469 | work_done = korina_rx(dev, budget); | 469 | work_done = korina_rx(dev, budget); |
470 | if (work_done < budget) { | 470 | if (work_done < budget) { |
471 | netif_rx_complete(napi); | 471 | napi_complete(napi); |
472 | 472 | ||
473 | writel(readl(&lp->rx_dma_regs->dmasm) & | 473 | writel(readl(&lp->rx_dma_regs->dmasm) & |
474 | ~(DMA_STAT_DONE | DMA_STAT_HALT | DMA_STAT_ERR), | 474 | ~(DMA_STAT_DONE | DMA_STAT_HALT | DMA_STAT_ERR), |
diff --git a/drivers/net/macb.c b/drivers/net/macb.c index f6c4936e2fa8..872c1bdf42bd 100644 --- a/drivers/net/macb.c +++ b/drivers/net/macb.c | |||
@@ -211,10 +211,10 @@ static int macb_mii_probe(struct net_device *dev) | |||
211 | 211 | ||
212 | /* attach the mac to the phy */ | 212 | /* attach the mac to the phy */ |
213 | if (pdata && pdata->is_rmii) { | 213 | if (pdata && pdata->is_rmii) { |
214 | phydev = phy_connect(dev, phydev->dev.bus_id, | 214 | phydev = phy_connect(dev, dev_name(&phydev->dev), |
215 | &macb_handle_link_change, 0, PHY_INTERFACE_MODE_RMII); | 215 | &macb_handle_link_change, 0, PHY_INTERFACE_MODE_RMII); |
216 | } else { | 216 | } else { |
217 | phydev = phy_connect(dev, phydev->dev.bus_id, | 217 | phydev = phy_connect(dev, dev_name(&phydev->dev), |
218 | &macb_handle_link_change, 0, PHY_INTERFACE_MODE_MII); | 218 | &macb_handle_link_change, 0, PHY_INTERFACE_MODE_MII); |
219 | } | 219 | } |
220 | 220 | ||
@@ -527,7 +527,7 @@ static int macb_poll(struct napi_struct *napi, int budget) | |||
527 | * this function was called last time, and no packets | 527 | * this function was called last time, and no packets |
528 | * have been received since. | 528 | * have been received since. |
529 | */ | 529 | */ |
530 | netif_rx_complete(napi); | 530 | napi_complete(napi); |
531 | goto out; | 531 | goto out; |
532 | } | 532 | } |
533 | 533 | ||
@@ -538,13 +538,13 @@ static int macb_poll(struct napi_struct *napi, int budget) | |||
538 | dev_warn(&bp->pdev->dev, | 538 | dev_warn(&bp->pdev->dev, |
539 | "No RX buffers complete, status = %02lx\n", | 539 | "No RX buffers complete, status = %02lx\n", |
540 | (unsigned long)status); | 540 | (unsigned long)status); |
541 | netif_rx_complete(napi); | 541 | napi_complete(napi); |
542 | goto out; | 542 | goto out; |
543 | } | 543 | } |
544 | 544 | ||
545 | work_done = macb_rx(bp, budget); | 545 | work_done = macb_rx(bp, budget); |
546 | if (work_done < budget) | 546 | if (work_done < budget) |
547 | netif_rx_complete(napi); | 547 | napi_complete(napi); |
548 | 548 | ||
549 | /* | 549 | /* |
550 | * We've done what we can to clean the buffers. Make sure we | 550 | * We've done what we can to clean the buffers. Make sure we |
@@ -579,7 +579,7 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id) | |||
579 | } | 579 | } |
580 | 580 | ||
581 | if (status & MACB_RX_INT_FLAGS) { | 581 | if (status & MACB_RX_INT_FLAGS) { |
582 | if (netif_rx_schedule_prep(&bp->napi)) { | 582 | if (napi_schedule_prep(&bp->napi)) { |
583 | /* | 583 | /* |
584 | * There's no point taking any more interrupts | 584 | * There's no point taking any more interrupts |
585 | * until we have processed the buffers | 585 | * until we have processed the buffers |
@@ -587,7 +587,7 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id) | |||
587 | macb_writel(bp, IDR, MACB_RX_INT_FLAGS); | 587 | macb_writel(bp, IDR, MACB_RX_INT_FLAGS); |
588 | dev_dbg(&bp->pdev->dev, | 588 | dev_dbg(&bp->pdev->dev, |
589 | "scheduling RX softirq\n"); | 589 | "scheduling RX softirq\n"); |
590 | __netif_rx_schedule(&bp->napi); | 590 | __napi_schedule(&bp->napi); |
591 | } | 591 | } |
592 | } | 592 | } |
593 | 593 | ||
@@ -1077,7 +1077,7 @@ static void macb_get_drvinfo(struct net_device *dev, | |||
1077 | 1077 | ||
1078 | strcpy(info->driver, bp->pdev->dev.driver->name); | 1078 | strcpy(info->driver, bp->pdev->dev.driver->name); |
1079 | strcpy(info->version, "$Revision: 1.14 $"); | 1079 | strcpy(info->version, "$Revision: 1.14 $"); |
1080 | strcpy(info->bus_info, bp->pdev->dev.bus_id); | 1080 | strcpy(info->bus_info, dev_name(&bp->pdev->dev)); |
1081 | } | 1081 | } |
1082 | 1082 | ||
1083 | static struct ethtool_ops macb_ethtool_ops = { | 1083 | static struct ethtool_ops macb_ethtool_ops = { |
@@ -1234,8 +1234,8 @@ static int __init macb_probe(struct platform_device *pdev) | |||
1234 | 1234 | ||
1235 | phydev = bp->phy_dev; | 1235 | phydev = bp->phy_dev; |
1236 | printk(KERN_INFO "%s: attached PHY driver [%s] " | 1236 | printk(KERN_INFO "%s: attached PHY driver [%s] " |
1237 | "(mii_bus:phy_addr=%s, irq=%d)\n", | 1237 | "(mii_bus:phy_addr=%s, irq=%d)\n", dev->name, |
1238 | dev->name, phydev->drv->name, phydev->dev.bus_id, phydev->irq); | 1238 | phydev->drv->name, dev_name(&phydev->dev), phydev->irq); |
1239 | 1239 | ||
1240 | return 0; | 1240 | return 0; |
1241 | 1241 | ||
diff --git a/drivers/net/macsonic.c b/drivers/net/macsonic.c index 205bb05c25d6..527166e35d56 100644 --- a/drivers/net/macsonic.c +++ b/drivers/net/macsonic.c | |||
@@ -176,7 +176,8 @@ static int __init macsonic_init(struct net_device *dev) | |||
176 | if ((lp->descriptors = dma_alloc_coherent(lp->device, | 176 | if ((lp->descriptors = dma_alloc_coherent(lp->device, |
177 | SIZEOF_SONIC_DESC * SONIC_BUS_SCALE(lp->dma_bitmode), | 177 | SIZEOF_SONIC_DESC * SONIC_BUS_SCALE(lp->dma_bitmode), |
178 | &lp->descriptors_laddr, GFP_KERNEL)) == NULL) { | 178 | &lp->descriptors_laddr, GFP_KERNEL)) == NULL) { |
179 | printk(KERN_ERR "%s: couldn't alloc DMA memory for descriptors.\n", lp->device->bus_id); | 179 | printk(KERN_ERR "%s: couldn't alloc DMA memory for descriptors.\n", |
180 | dev_name(lp->device)); | ||
180 | return -ENOMEM; | 181 | return -ENOMEM; |
181 | } | 182 | } |
182 | 183 | ||
@@ -337,7 +338,7 @@ static int __init mac_onboard_sonic_probe(struct net_device *dev) | |||
337 | sonic_version_printed = 1; | 338 | sonic_version_printed = 1; |
338 | } | 339 | } |
339 | printk(KERN_INFO "%s: onboard / comm-slot SONIC at 0x%08lx\n", | 340 | printk(KERN_INFO "%s: onboard / comm-slot SONIC at 0x%08lx\n", |
340 | lp->device->bus_id, dev->base_addr); | 341 | dev_name(lp->device), dev->base_addr); |
341 | 342 | ||
342 | /* The PowerBook's SONIC is 16 bit always. */ | 343 | /* The PowerBook's SONIC is 16 bit always. */ |
343 | if (macintosh_config->ident == MAC_MODEL_PB520) { | 344 | if (macintosh_config->ident == MAC_MODEL_PB520) { |
@@ -370,10 +371,10 @@ static int __init mac_onboard_sonic_probe(struct net_device *dev) | |||
370 | } | 371 | } |
371 | printk(KERN_INFO | 372 | printk(KERN_INFO |
372 | "%s: revision 0x%04x, using %d bit DMA and register offset %d\n", | 373 | "%s: revision 0x%04x, using %d bit DMA and register offset %d\n", |
373 | lp->device->bus_id, sr, lp->dma_bitmode?32:16, lp->reg_offset); | 374 | dev_name(lp->device), sr, lp->dma_bitmode?32:16, lp->reg_offset); |
374 | 375 | ||
375 | #if 0 /* This is sometimes useful to find out how MacOS configured the card. */ | 376 | #if 0 /* This is sometimes useful to find out how MacOS configured the card. */ |
376 | printk(KERN_INFO "%s: DCR: 0x%04x, DCR2: 0x%04x\n", lp->device->bus_id, | 377 | printk(KERN_INFO "%s: DCR: 0x%04x, DCR2: 0x%04x\n", dev_name(lp->device), |
377 | SONIC_READ(SONIC_DCR) & 0xffff, SONIC_READ(SONIC_DCR2) & 0xffff); | 378 | SONIC_READ(SONIC_DCR) & 0xffff, SONIC_READ(SONIC_DCR2) & 0xffff); |
378 | #endif | 379 | #endif |
379 | 380 | ||
@@ -525,12 +526,12 @@ static int __init mac_nubus_sonic_probe(struct net_device *dev) | |||
525 | sonic_version_printed = 1; | 526 | sonic_version_printed = 1; |
526 | } | 527 | } |
527 | printk(KERN_INFO "%s: %s in slot %X\n", | 528 | printk(KERN_INFO "%s: %s in slot %X\n", |
528 | lp->device->bus_id, ndev->board->name, ndev->board->slot); | 529 | dev_name(lp->device), ndev->board->name, ndev->board->slot); |
529 | printk(KERN_INFO "%s: revision 0x%04x, using %d bit DMA and register offset %d\n", | 530 | printk(KERN_INFO "%s: revision 0x%04x, using %d bit DMA and register offset %d\n", |
530 | lp->device->bus_id, SONIC_READ(SONIC_SR), dma_bitmode?32:16, reg_offset); | 531 | dev_name(lp->device), SONIC_READ(SONIC_SR), dma_bitmode?32:16, reg_offset); |
531 | 532 | ||
532 | #if 0 /* This is sometimes useful to find out how MacOS configured the card. */ | 533 | #if 0 /* This is sometimes useful to find out how MacOS configured the card. */ |
533 | printk(KERN_INFO "%s: DCR: 0x%04x, DCR2: 0x%04x\n", lp->device->bus_id, | 534 | printk(KERN_INFO "%s: DCR: 0x%04x, DCR2: 0x%04x\n", dev_name(lp->device), |
534 | SONIC_READ(SONIC_DCR) & 0xffff, SONIC_READ(SONIC_DCR2) & 0xffff); | 535 | SONIC_READ(SONIC_DCR) & 0xffff, SONIC_READ(SONIC_DCR2) & 0xffff); |
535 | #endif | 536 | #endif |
536 | 537 | ||
diff --git a/drivers/net/mlx4/en_rx.c b/drivers/net/mlx4/en_rx.c index c61b0bdca1a4..a4130e764991 100644 --- a/drivers/net/mlx4/en_rx.c +++ b/drivers/net/mlx4/en_rx.c | |||
@@ -768,6 +768,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud | |||
768 | 768 | ||
769 | skb->ip_summed = ip_summed; | 769 | skb->ip_summed = ip_summed; |
770 | skb->protocol = eth_type_trans(skb, dev); | 770 | skb->protocol = eth_type_trans(skb, dev); |
771 | skb_record_rx_queue(skb, cq->ring); | ||
771 | 772 | ||
772 | /* Push it up the stack */ | 773 | /* Push it up the stack */ |
773 | if (priv->vlgrp && (be32_to_cpu(cqe->vlan_my_qpn) & | 774 | if (priv->vlgrp && (be32_to_cpu(cqe->vlan_my_qpn) & |
@@ -814,7 +815,7 @@ void mlx4_en_rx_irq(struct mlx4_cq *mcq) | |||
814 | struct mlx4_en_priv *priv = netdev_priv(cq->dev); | 815 | struct mlx4_en_priv *priv = netdev_priv(cq->dev); |
815 | 816 | ||
816 | if (priv->port_up) | 817 | if (priv->port_up) |
817 | netif_rx_schedule(&cq->napi); | 818 | napi_schedule(&cq->napi); |
818 | else | 819 | else |
819 | mlx4_en_arm_cq(priv, cq); | 820 | mlx4_en_arm_cq(priv, cq); |
820 | } | 821 | } |
@@ -834,7 +835,7 @@ int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget) | |||
834 | INC_PERF_COUNTER(priv->pstats.napi_quota); | 835 | INC_PERF_COUNTER(priv->pstats.napi_quota); |
835 | else { | 836 | else { |
836 | /* Done for now */ | 837 | /* Done for now */ |
837 | netif_rx_complete(napi); | 838 | napi_complete(napi); |
838 | mlx4_en_arm_cq(priv, cq); | 839 | mlx4_en_arm_cq(priv, cq); |
839 | } | 840 | } |
840 | return done; | 841 | return done; |
diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c index 5f31bbb614af..8fab31f631a0 100644 --- a/drivers/net/mv643xx_eth.c +++ b/drivers/net/mv643xx_eth.c | |||
@@ -2589,7 +2589,7 @@ static void phy_init(struct mv643xx_eth_private *mp, int speed, int duplex) | |||
2589 | 2589 | ||
2590 | phy_reset(mp); | 2590 | phy_reset(mp); |
2591 | 2591 | ||
2592 | phy_attach(mp->dev, phy->dev.bus_id, 0, PHY_INTERFACE_MODE_GMII); | 2592 | phy_attach(mp->dev, dev_name(&phy->dev), 0, PHY_INTERFACE_MODE_GMII); |
2593 | 2593 | ||
2594 | if (speed == 0) { | 2594 | if (speed == 0) { |
2595 | phy->autoneg = AUTONEG_ENABLE; | 2595 | phy->autoneg = AUTONEG_ENABLE; |
diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c index e9c1296b267e..aea9fdaa3cd5 100644 --- a/drivers/net/myri10ge/myri10ge.c +++ b/drivers/net/myri10ge/myri10ge.c | |||
@@ -1324,6 +1324,7 @@ myri10ge_rx_done(struct myri10ge_slice_state *ss, struct myri10ge_rx_buf *rx, | |||
1324 | skb_shinfo(skb)->nr_frags = 0; | 1324 | skb_shinfo(skb)->nr_frags = 0; |
1325 | } | 1325 | } |
1326 | skb->protocol = eth_type_trans(skb, dev); | 1326 | skb->protocol = eth_type_trans(skb, dev); |
1327 | skb_record_rx_queue(skb, ss - &mgp->ss[0]); | ||
1327 | 1328 | ||
1328 | if (mgp->csum_flag) { | 1329 | if (mgp->csum_flag) { |
1329 | if ((skb->protocol == htons(ETH_P_IP)) || | 1330 | if ((skb->protocol == htons(ETH_P_IP)) || |
@@ -1514,7 +1515,7 @@ static int myri10ge_poll(struct napi_struct *napi, int budget) | |||
1514 | work_done = myri10ge_clean_rx_done(ss, budget); | 1515 | work_done = myri10ge_clean_rx_done(ss, budget); |
1515 | 1516 | ||
1516 | if (work_done < budget) { | 1517 | if (work_done < budget) { |
1517 | netif_rx_complete(napi); | 1518 | napi_complete(napi); |
1518 | put_be32(htonl(3), ss->irq_claim); | 1519 | put_be32(htonl(3), ss->irq_claim); |
1519 | } | 1520 | } |
1520 | return work_done; | 1521 | return work_done; |
@@ -1532,7 +1533,7 @@ static irqreturn_t myri10ge_intr(int irq, void *arg) | |||
1532 | /* an interrupt on a non-zero receive-only slice is implicitly | 1533 | /* an interrupt on a non-zero receive-only slice is implicitly |
1533 | * valid since MSI-X irqs are not shared */ | 1534 | * valid since MSI-X irqs are not shared */ |
1534 | if ((mgp->dev->real_num_tx_queues == 1) && (ss != mgp->ss)) { | 1535 | if ((mgp->dev->real_num_tx_queues == 1) && (ss != mgp->ss)) { |
1535 | netif_rx_schedule(&ss->napi); | 1536 | napi_schedule(&ss->napi); |
1536 | return (IRQ_HANDLED); | 1537 | return (IRQ_HANDLED); |
1537 | } | 1538 | } |
1538 | 1539 | ||
@@ -1543,7 +1544,7 @@ static irqreturn_t myri10ge_intr(int irq, void *arg) | |||
1543 | /* low bit indicates receives are present, so schedule | 1544 | /* low bit indicates receives are present, so schedule |
1544 | * napi poll handler */ | 1545 | * napi poll handler */ |
1545 | if (stats->valid & 1) | 1546 | if (stats->valid & 1) |
1546 | netif_rx_schedule(&ss->napi); | 1547 | napi_schedule(&ss->napi); |
1547 | 1548 | ||
1548 | if (!mgp->msi_enabled && !mgp->msix_enabled) { | 1549 | if (!mgp->msi_enabled && !mgp->msix_enabled) { |
1549 | put_be32(0, mgp->irq_deassert); | 1550 | put_be32(0, mgp->irq_deassert); |
diff --git a/drivers/net/myri_sbus.c b/drivers/net/myri_sbus.c index 899ed065a147..88b52883acea 100644 --- a/drivers/net/myri_sbus.c +++ b/drivers/net/myri_sbus.c | |||
@@ -748,7 +748,7 @@ static int myri_rebuild_header(struct sk_buff *skb) | |||
748 | switch (eth->h_proto) | 748 | switch (eth->h_proto) |
749 | { | 749 | { |
750 | #ifdef CONFIG_INET | 750 | #ifdef CONFIG_INET |
751 | case __constant_htons(ETH_P_IP): | 751 | case cpu_to_be16(ETH_P_IP): |
752 | return arp_find(eth->h_dest, skb); | 752 | return arp_find(eth->h_dest, skb); |
753 | #endif | 753 | #endif |
754 | 754 | ||
diff --git a/drivers/net/natsemi.c b/drivers/net/natsemi.c index c5dec54251bf..c23a58624a33 100644 --- a/drivers/net/natsemi.c +++ b/drivers/net/natsemi.c | |||
@@ -2198,10 +2198,10 @@ static irqreturn_t intr_handler(int irq, void *dev_instance) | |||
2198 | 2198 | ||
2199 | prefetch(&np->rx_skbuff[np->cur_rx % RX_RING_SIZE]); | 2199 | prefetch(&np->rx_skbuff[np->cur_rx % RX_RING_SIZE]); |
2200 | 2200 | ||
2201 | if (netif_rx_schedule_prep(&np->napi)) { | 2201 | if (napi_schedule_prep(&np->napi)) { |
2202 | /* Disable interrupts and register for poll */ | 2202 | /* Disable interrupts and register for poll */ |
2203 | natsemi_irq_disable(dev); | 2203 | natsemi_irq_disable(dev); |
2204 | __netif_rx_schedule(&np->napi); | 2204 | __napi_schedule(&np->napi); |
2205 | } else | 2205 | } else |
2206 | printk(KERN_WARNING | 2206 | printk(KERN_WARNING |
2207 | "%s: Ignoring interrupt, status %#08x, mask %#08x.\n", | 2207 | "%s: Ignoring interrupt, status %#08x, mask %#08x.\n", |
@@ -2253,7 +2253,7 @@ static int natsemi_poll(struct napi_struct *napi, int budget) | |||
2253 | np->intr_status = readl(ioaddr + IntrStatus); | 2253 | np->intr_status = readl(ioaddr + IntrStatus); |
2254 | } while (np->intr_status); | 2254 | } while (np->intr_status); |
2255 | 2255 | ||
2256 | netif_rx_complete(napi); | 2256 | napi_complete(napi); |
2257 | 2257 | ||
2258 | /* Reenable interrupts providing nothing is trying to shut | 2258 | /* Reenable interrupts providing nothing is trying to shut |
2259 | * the chip down. */ | 2259 | * the chip down. */ |
diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c index 645d384fe87e..ada462e94c96 100644 --- a/drivers/net/netxen/netxen_nic_main.c +++ b/drivers/net/netxen/netxen_nic_main.c | |||
@@ -1170,7 +1170,7 @@ static bool netxen_tso_check(struct net_device *netdev, | |||
1170 | __be16 protocol = skb->protocol; | 1170 | __be16 protocol = skb->protocol; |
1171 | u16 flags = 0; | 1171 | u16 flags = 0; |
1172 | 1172 | ||
1173 | if (protocol == __constant_htons(ETH_P_8021Q)) { | 1173 | if (protocol == cpu_to_be16(ETH_P_8021Q)) { |
1174 | struct vlan_ethhdr *vh = (struct vlan_ethhdr *)skb->data; | 1174 | struct vlan_ethhdr *vh = (struct vlan_ethhdr *)skb->data; |
1175 | protocol = vh->h_vlan_encapsulated_proto; | 1175 | protocol = vh->h_vlan_encapsulated_proto; |
1176 | flags = FLAGS_VLAN_TAGGED; | 1176 | flags = FLAGS_VLAN_TAGGED; |
@@ -1183,21 +1183,21 @@ static bool netxen_tso_check(struct net_device *netdev, | |||
1183 | desc->total_hdr_length = | 1183 | desc->total_hdr_length = |
1184 | skb_transport_offset(skb) + tcp_hdrlen(skb); | 1184 | skb_transport_offset(skb) + tcp_hdrlen(skb); |
1185 | 1185 | ||
1186 | opcode = (protocol == __constant_htons(ETH_P_IPV6)) ? | 1186 | opcode = (protocol == cpu_to_be16(ETH_P_IPV6)) ? |
1187 | TX_TCP_LSO6 : TX_TCP_LSO; | 1187 | TX_TCP_LSO6 : TX_TCP_LSO; |
1188 | tso = true; | 1188 | tso = true; |
1189 | 1189 | ||
1190 | } else if (skb->ip_summed == CHECKSUM_PARTIAL) { | 1190 | } else if (skb->ip_summed == CHECKSUM_PARTIAL) { |
1191 | u8 l4proto; | 1191 | u8 l4proto; |
1192 | 1192 | ||
1193 | if (protocol == __constant_htons(ETH_P_IP)) { | 1193 | if (protocol == cpu_to_be16(ETH_P_IP)) { |
1194 | l4proto = ip_hdr(skb)->protocol; | 1194 | l4proto = ip_hdr(skb)->protocol; |
1195 | 1195 | ||
1196 | if (l4proto == IPPROTO_TCP) | 1196 | if (l4proto == IPPROTO_TCP) |
1197 | opcode = TX_TCP_PKT; | 1197 | opcode = TX_TCP_PKT; |
1198 | else if(l4proto == IPPROTO_UDP) | 1198 | else if(l4proto == IPPROTO_UDP) |
1199 | opcode = TX_UDP_PKT; | 1199 | opcode = TX_UDP_PKT; |
1200 | } else if (protocol == __constant_htons(ETH_P_IPV6)) { | 1200 | } else if (protocol == cpu_to_be16(ETH_P_IPV6)) { |
1201 | l4proto = ipv6_hdr(skb)->nexthdr; | 1201 | l4proto = ipv6_hdr(skb)->nexthdr; |
1202 | 1202 | ||
1203 | if (l4proto == IPPROTO_TCP) | 1203 | if (l4proto == IPPROTO_TCP) |
@@ -1640,7 +1640,7 @@ static int netxen_nic_poll(struct napi_struct *napi, int budget) | |||
1640 | } | 1640 | } |
1641 | 1641 | ||
1642 | if ((work_done < budget) && tx_complete) { | 1642 | if ((work_done < budget) && tx_complete) { |
1643 | netif_rx_complete(&adapter->napi); | 1643 | napi_complete(&adapter->napi); |
1644 | netxen_nic_enable_int(adapter); | 1644 | netxen_nic_enable_int(adapter); |
1645 | } | 1645 | } |
1646 | 1646 | ||
diff --git a/drivers/net/niu.c b/drivers/net/niu.c index 0c0b752315ca..c26325ded20e 100644 --- a/drivers/net/niu.c +++ b/drivers/net/niu.c | |||
@@ -3390,6 +3390,7 @@ static int niu_process_rx_pkt(struct niu *np, struct rx_ring_info *rp) | |||
3390 | rp->rx_bytes += skb->len; | 3390 | rp->rx_bytes += skb->len; |
3391 | 3391 | ||
3392 | skb->protocol = eth_type_trans(skb, np->dev); | 3392 | skb->protocol = eth_type_trans(skb, np->dev); |
3393 | skb_record_rx_queue(skb, rp->rx_channel); | ||
3393 | netif_receive_skb(skb); | 3394 | netif_receive_skb(skb); |
3394 | 3395 | ||
3395 | return num_rcr; | 3396 | return num_rcr; |
@@ -3669,7 +3670,7 @@ static int niu_poll(struct napi_struct *napi, int budget) | |||
3669 | work_done = niu_poll_core(np, lp, budget); | 3670 | work_done = niu_poll_core(np, lp, budget); |
3670 | 3671 | ||
3671 | if (work_done < budget) { | 3672 | if (work_done < budget) { |
3672 | netif_rx_complete(napi); | 3673 | napi_complete(napi); |
3673 | niu_ldg_rearm(np, lp, 1); | 3674 | niu_ldg_rearm(np, lp, 1); |
3674 | } | 3675 | } |
3675 | return work_done; | 3676 | return work_done; |
@@ -4088,12 +4089,12 @@ static void __niu_fastpath_interrupt(struct niu *np, int ldg, u64 v0) | |||
4088 | static void niu_schedule_napi(struct niu *np, struct niu_ldg *lp, | 4089 | static void niu_schedule_napi(struct niu *np, struct niu_ldg *lp, |
4089 | u64 v0, u64 v1, u64 v2) | 4090 | u64 v0, u64 v1, u64 v2) |
4090 | { | 4091 | { |
4091 | if (likely(netif_rx_schedule_prep(&lp->napi))) { | 4092 | if (likely(napi_schedule_prep(&lp->napi))) { |
4092 | lp->v0 = v0; | 4093 | lp->v0 = v0; |
4093 | lp->v1 = v1; | 4094 | lp->v1 = v1; |
4094 | lp->v2 = v2; | 4095 | lp->v2 = v2; |
4095 | __niu_fastpath_interrupt(np, lp->ldg_num, v0); | 4096 | __niu_fastpath_interrupt(np, lp->ldg_num, v0); |
4096 | __netif_rx_schedule(&lp->napi); | 4097 | __napi_schedule(&lp->napi); |
4097 | } | 4098 | } |
4098 | } | 4099 | } |
4099 | 4100 | ||
@@ -6446,11 +6447,11 @@ static u64 niu_compute_tx_flags(struct sk_buff *skb, struct ethhdr *ehdr, | |||
6446 | 6447 | ||
6447 | ipv6 = ihl = 0; | 6448 | ipv6 = ihl = 0; |
6448 | switch (skb->protocol) { | 6449 | switch (skb->protocol) { |
6449 | case __constant_htons(ETH_P_IP): | 6450 | case cpu_to_be16(ETH_P_IP): |
6450 | ip_proto = ip_hdr(skb)->protocol; | 6451 | ip_proto = ip_hdr(skb)->protocol; |
6451 | ihl = ip_hdr(skb)->ihl; | 6452 | ihl = ip_hdr(skb)->ihl; |
6452 | break; | 6453 | break; |
6453 | case __constant_htons(ETH_P_IPV6): | 6454 | case cpu_to_be16(ETH_P_IPV6): |
6454 | ip_proto = ipv6_hdr(skb)->nexthdr; | 6455 | ip_proto = ipv6_hdr(skb)->nexthdr; |
6455 | ihl = (40 >> 2); | 6456 | ihl = (40 >> 2); |
6456 | ipv6 = 1; | 6457 | ipv6 = 1; |
diff --git a/drivers/net/pasemi_mac.c b/drivers/net/pasemi_mac.c index d0349e7d73ea..5eeb5a87b738 100644 --- a/drivers/net/pasemi_mac.c +++ b/drivers/net/pasemi_mac.c | |||
@@ -970,7 +970,7 @@ static irqreturn_t pasemi_mac_rx_intr(int irq, void *data) | |||
970 | if (*chan->status & PAS_STATUS_ERROR) | 970 | if (*chan->status & PAS_STATUS_ERROR) |
971 | reg |= PAS_IOB_DMA_RXCH_RESET_DINTC; | 971 | reg |= PAS_IOB_DMA_RXCH_RESET_DINTC; |
972 | 972 | ||
973 | netif_rx_schedule(&mac->napi); | 973 | napi_schedule(&mac->napi); |
974 | 974 | ||
975 | write_iob_reg(PAS_IOB_DMA_RXCH_RESET(chan->chno), reg); | 975 | write_iob_reg(PAS_IOB_DMA_RXCH_RESET(chan->chno), reg); |
976 | 976 | ||
@@ -1010,7 +1010,7 @@ static irqreturn_t pasemi_mac_tx_intr(int irq, void *data) | |||
1010 | 1010 | ||
1011 | mod_timer(&txring->clean_timer, jiffies + (TX_CLEAN_INTERVAL)*2); | 1011 | mod_timer(&txring->clean_timer, jiffies + (TX_CLEAN_INTERVAL)*2); |
1012 | 1012 | ||
1013 | netif_rx_schedule(&mac->napi); | 1013 | napi_schedule(&mac->napi); |
1014 | 1014 | ||
1015 | if (reg) | 1015 | if (reg) |
1016 | write_iob_reg(PAS_IOB_DMA_TXCH_RESET(chan->chno), reg); | 1016 | write_iob_reg(PAS_IOB_DMA_TXCH_RESET(chan->chno), reg); |
@@ -1639,7 +1639,7 @@ static int pasemi_mac_poll(struct napi_struct *napi, int budget) | |||
1639 | pkts = pasemi_mac_clean_rx(rx_ring(mac), budget); | 1639 | pkts = pasemi_mac_clean_rx(rx_ring(mac), budget); |
1640 | if (pkts < budget) { | 1640 | if (pkts < budget) { |
1641 | /* all done, no more packets present */ | 1641 | /* all done, no more packets present */ |
1642 | netif_rx_complete(napi); | 1642 | napi_complete(napi); |
1643 | 1643 | ||
1644 | pasemi_mac_restart_rx_intr(mac); | 1644 | pasemi_mac_restart_rx_intr(mac); |
1645 | pasemi_mac_restart_tx_intr(mac); | 1645 | pasemi_mac_restart_tx_intr(mac); |
diff --git a/drivers/net/pcnet32.c b/drivers/net/pcnet32.c index 665a4286da39..80124fac65fa 100644 --- a/drivers/net/pcnet32.c +++ b/drivers/net/pcnet32.c | |||
@@ -1397,7 +1397,7 @@ static int pcnet32_poll(struct napi_struct *napi, int budget) | |||
1397 | if (work_done < budget) { | 1397 | if (work_done < budget) { |
1398 | spin_lock_irqsave(&lp->lock, flags); | 1398 | spin_lock_irqsave(&lp->lock, flags); |
1399 | 1399 | ||
1400 | __netif_rx_complete(napi); | 1400 | __napi_complete(napi); |
1401 | 1401 | ||
1402 | /* clear interrupt masks */ | 1402 | /* clear interrupt masks */ |
1403 | val = lp->a.read_csr(ioaddr, CSR3); | 1403 | val = lp->a.read_csr(ioaddr, CSR3); |
@@ -2592,14 +2592,14 @@ pcnet32_interrupt(int irq, void *dev_id) | |||
2592 | dev->name, csr0); | 2592 | dev->name, csr0); |
2593 | /* unlike for the lance, there is no restart needed */ | 2593 | /* unlike for the lance, there is no restart needed */ |
2594 | } | 2594 | } |
2595 | if (netif_rx_schedule_prep(&lp->napi)) { | 2595 | if (napi_schedule_prep(&lp->napi)) { |
2596 | u16 val; | 2596 | u16 val; |
2597 | /* set interrupt masks */ | 2597 | /* set interrupt masks */ |
2598 | val = lp->a.read_csr(ioaddr, CSR3); | 2598 | val = lp->a.read_csr(ioaddr, CSR3); |
2599 | val |= 0x5f00; | 2599 | val |= 0x5f00; |
2600 | lp->a.write_csr(ioaddr, CSR3, val); | 2600 | lp->a.write_csr(ioaddr, CSR3, val); |
2601 | mmiowb(); | 2601 | mmiowb(); |
2602 | __netif_rx_schedule(&lp->napi); | 2602 | __napi_schedule(&lp->napi); |
2603 | break; | 2603 | break; |
2604 | } | 2604 | } |
2605 | csr0 = lp->a.read_csr(ioaddr, CSR0); | 2605 | csr0 = lp->a.read_csr(ioaddr, CSR0); |
diff --git a/drivers/net/phy/mdio-gpio.c b/drivers/net/phy/mdio-gpio.c index a439ebeb4319..3f460c564927 100644 --- a/drivers/net/phy/mdio-gpio.c +++ b/drivers/net/phy/mdio-gpio.c | |||
@@ -200,16 +200,21 @@ static int __devinit mdio_ofgpio_probe(struct of_device *ofdev, | |||
200 | { | 200 | { |
201 | struct device_node *np = NULL; | 201 | struct device_node *np = NULL; |
202 | struct mdio_gpio_platform_data *pdata; | 202 | struct mdio_gpio_platform_data *pdata; |
203 | int ret; | ||
203 | 204 | ||
204 | pdata = kzalloc(sizeof(*pdata), GFP_KERNEL); | 205 | pdata = kzalloc(sizeof(*pdata), GFP_KERNEL); |
205 | if (!pdata) | 206 | if (!pdata) |
206 | return -ENOMEM; | 207 | return -ENOMEM; |
207 | 208 | ||
208 | pdata->mdc = of_get_gpio(ofdev->node, 0); | 209 | ret = of_get_gpio(ofdev->node, 0); |
209 | pdata->mdio = of_get_gpio(ofdev->node, 1); | 210 | if (ret < 0) |
210 | |||
211 | if (pdata->mdc < 0 || pdata->mdio < 0) | ||
212 | goto out_free; | 211 | goto out_free; |
212 | pdata->mdc = ret; | ||
213 | |||
214 | ret = of_get_gpio(ofdev->node, 1); | ||
215 | if (ret < 0) | ||
216 | goto out_free; | ||
217 | pdata->mdio = ret; | ||
213 | 218 | ||
214 | while ((np = of_get_next_child(ofdev->node, np))) | 219 | while ((np = of_get_next_child(ofdev->node, np))) |
215 | if (!strcmp(np->type, "ethernet-phy")) | 220 | if (!strcmp(np->type, "ethernet-phy")) |
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c index 811a637695ca..bb29ae3ff17d 100644 --- a/drivers/net/phy/mdio_bus.c +++ b/drivers/net/phy/mdio_bus.c | |||
@@ -21,6 +21,7 @@ | |||
21 | #include <linux/interrupt.h> | 21 | #include <linux/interrupt.h> |
22 | #include <linux/init.h> | 22 | #include <linux/init.h> |
23 | #include <linux/delay.h> | 23 | #include <linux/delay.h> |
24 | #include <linux/device.h> | ||
24 | #include <linux/netdevice.h> | 25 | #include <linux/netdevice.h> |
25 | #include <linux/etherdevice.h> | 26 | #include <linux/etherdevice.h> |
26 | #include <linux/skbuff.h> | 27 | #include <linux/skbuff.h> |
@@ -286,33 +287,58 @@ static int mdio_bus_match(struct device *dev, struct device_driver *drv) | |||
286 | (phydev->phy_id & phydrv->phy_id_mask)); | 287 | (phydev->phy_id & phydrv->phy_id_mask)); |
287 | } | 288 | } |
288 | 289 | ||
290 | static bool mdio_bus_phy_may_suspend(struct phy_device *phydev) | ||
291 | { | ||
292 | struct device_driver *drv = phydev->dev.driver; | ||
293 | struct phy_driver *phydrv = to_phy_driver(drv); | ||
294 | struct net_device *netdev = phydev->attached_dev; | ||
295 | |||
296 | if (!drv || !phydrv->suspend) | ||
297 | return false; | ||
298 | |||
299 | /* PHY not attached? May suspend. */ | ||
300 | if (!netdev) | ||
301 | return true; | ||
302 | |||
303 | /* | ||
304 | * Don't suspend PHY if the attched netdev parent may wakeup. | ||
305 | * The parent may point to a PCI device, as in tg3 driver. | ||
306 | */ | ||
307 | if (netdev->dev.parent && device_may_wakeup(netdev->dev.parent)) | ||
308 | return false; | ||
309 | |||
310 | /* | ||
311 | * Also don't suspend PHY if the netdev itself may wakeup. This | ||
312 | * is the case for devices w/o underlaying pwr. mgmt. aware bus, | ||
313 | * e.g. SoC devices. | ||
314 | */ | ||
315 | if (device_may_wakeup(&netdev->dev)) | ||
316 | return false; | ||
317 | |||
318 | return true; | ||
319 | } | ||
320 | |||
289 | /* Suspend and resume. Copied from platform_suspend and | 321 | /* Suspend and resume. Copied from platform_suspend and |
290 | * platform_resume | 322 | * platform_resume |
291 | */ | 323 | */ |
292 | static int mdio_bus_suspend(struct device * dev, pm_message_t state) | 324 | static int mdio_bus_suspend(struct device * dev, pm_message_t state) |
293 | { | 325 | { |
294 | int ret = 0; | 326 | struct phy_driver *phydrv = to_phy_driver(dev->driver); |
295 | struct device_driver *drv = dev->driver; | ||
296 | struct phy_driver *phydrv = to_phy_driver(drv); | ||
297 | struct phy_device *phydev = to_phy_device(dev); | 327 | struct phy_device *phydev = to_phy_device(dev); |
298 | 328 | ||
299 | if (drv && phydrv->suspend && !device_may_wakeup(phydev->dev.parent)) | 329 | if (!mdio_bus_phy_may_suspend(phydev)) |
300 | ret = phydrv->suspend(phydev); | 330 | return 0; |
301 | 331 | return phydrv->suspend(phydev); | |
302 | return ret; | ||
303 | } | 332 | } |
304 | 333 | ||
305 | static int mdio_bus_resume(struct device * dev) | 334 | static int mdio_bus_resume(struct device * dev) |
306 | { | 335 | { |
307 | int ret = 0; | 336 | struct phy_driver *phydrv = to_phy_driver(dev->driver); |
308 | struct device_driver *drv = dev->driver; | ||
309 | struct phy_driver *phydrv = to_phy_driver(drv); | ||
310 | struct phy_device *phydev = to_phy_device(dev); | 337 | struct phy_device *phydev = to_phy_device(dev); |
311 | 338 | ||
312 | if (drv && phydrv->resume && !device_may_wakeup(phydev->dev.parent)) | 339 | if (!mdio_bus_phy_may_suspend(phydev)) |
313 | ret = phydrv->resume(phydev); | 340 | return 0; |
314 | 341 | return phydrv->resume(phydev); | |
315 | return ret; | ||
316 | } | 342 | } |
317 | 343 | ||
318 | struct bus_type mdio_bus_type = { | 344 | struct bus_type mdio_bus_type = { |
diff --git a/drivers/net/ppp_generic.c b/drivers/net/ppp_generic.c index 7b2728b8f1b7..4405a76ed3da 100644 --- a/drivers/net/ppp_generic.c +++ b/drivers/net/ppp_generic.c | |||
@@ -49,6 +49,10 @@ | |||
49 | #include <net/slhc_vj.h> | 49 | #include <net/slhc_vj.h> |
50 | #include <asm/atomic.h> | 50 | #include <asm/atomic.h> |
51 | 51 | ||
52 | #include <linux/nsproxy.h> | ||
53 | #include <net/net_namespace.h> | ||
54 | #include <net/netns/generic.h> | ||
55 | |||
52 | #define PPP_VERSION "2.4.2" | 56 | #define PPP_VERSION "2.4.2" |
53 | 57 | ||
54 | /* | 58 | /* |
@@ -131,6 +135,7 @@ struct ppp { | |||
131 | struct sock_filter *active_filter;/* filter for pkts to reset idle */ | 135 | struct sock_filter *active_filter;/* filter for pkts to reset idle */ |
132 | unsigned pass_len, active_len; | 136 | unsigned pass_len, active_len; |
133 | #endif /* CONFIG_PPP_FILTER */ | 137 | #endif /* CONFIG_PPP_FILTER */ |
138 | struct net *ppp_net; /* the net we belong to */ | ||
134 | }; | 139 | }; |
135 | 140 | ||
136 | /* | 141 | /* |
@@ -155,6 +160,7 @@ struct channel { | |||
155 | struct rw_semaphore chan_sem; /* protects `chan' during chan ioctl */ | 160 | struct rw_semaphore chan_sem; /* protects `chan' during chan ioctl */ |
156 | spinlock_t downl; /* protects `chan', file.xq dequeue */ | 161 | spinlock_t downl; /* protects `chan', file.xq dequeue */ |
157 | struct ppp *ppp; /* ppp unit we're connected to */ | 162 | struct ppp *ppp; /* ppp unit we're connected to */ |
163 | struct net *chan_net; /* the net channel belongs to */ | ||
158 | struct list_head clist; /* link in list of channels per unit */ | 164 | struct list_head clist; /* link in list of channels per unit */ |
159 | rwlock_t upl; /* protects `ppp' */ | 165 | rwlock_t upl; /* protects `ppp' */ |
160 | #ifdef CONFIG_PPP_MULTILINK | 166 | #ifdef CONFIG_PPP_MULTILINK |
@@ -173,26 +179,35 @@ struct channel { | |||
173 | * channel.downl. | 179 | * channel.downl. |
174 | */ | 180 | */ |
175 | 181 | ||
176 | /* | ||
177 | * all_ppp_mutex protects the all_ppp_units mapping. | ||
178 | * It also ensures that finding a ppp unit in the all_ppp_units map | ||
179 | * and updating its file.refcnt field is atomic. | ||
180 | */ | ||
181 | static DEFINE_MUTEX(all_ppp_mutex); | ||
182 | static atomic_t ppp_unit_count = ATOMIC_INIT(0); | 182 | static atomic_t ppp_unit_count = ATOMIC_INIT(0); |
183 | static DEFINE_IDR(ppp_units_idr); | ||
184 | |||
185 | /* | ||
186 | * all_channels_lock protects all_channels and last_channel_index, | ||
187 | * and the atomicity of find a channel and updating its file.refcnt | ||
188 | * field. | ||
189 | */ | ||
190 | static DEFINE_SPINLOCK(all_channels_lock); | ||
191 | static LIST_HEAD(all_channels); | ||
192 | static LIST_HEAD(new_channels); | ||
193 | static int last_channel_index; | ||
194 | static atomic_t channel_count = ATOMIC_INIT(0); | 183 | static atomic_t channel_count = ATOMIC_INIT(0); |
195 | 184 | ||
185 | /* per-net private data for this module */ | ||
186 | static unsigned int ppp_net_id; | ||
187 | struct ppp_net { | ||
188 | /* units to ppp mapping */ | ||
189 | struct idr units_idr; | ||
190 | |||
191 | /* | ||
192 | * all_ppp_mutex protects the units_idr mapping. | ||
193 | * It also ensures that finding a ppp unit in the units_idr | ||
194 | * map and updating its file.refcnt field is atomic. | ||
195 | */ | ||
196 | struct mutex all_ppp_mutex; | ||
197 | |||
198 | /* channels */ | ||
199 | struct list_head all_channels; | ||
200 | struct list_head new_channels; | ||
201 | int last_channel_index; | ||
202 | |||
203 | /* | ||
204 | * all_channels_lock protects all_channels and | ||
205 | * last_channel_index, and the atomicity of find | ||
206 | * a channel and updating its file.refcnt field. | ||
207 | */ | ||
208 | spinlock_t all_channels_lock; | ||
209 | }; | ||
210 | |||
196 | /* Get the PPP protocol number from a skb */ | 211 | /* Get the PPP protocol number from a skb */ |
197 | #define PPP_PROTO(skb) (((skb)->data[0] << 8) + (skb)->data[1]) | 212 | #define PPP_PROTO(skb) (((skb)->data[0] << 8) + (skb)->data[1]) |
198 | 213 | ||
@@ -216,8 +231,8 @@ static atomic_t channel_count = ATOMIC_INIT(0); | |||
216 | #define seq_after(a, b) ((s32)((a) - (b)) > 0) | 231 | #define seq_after(a, b) ((s32)((a) - (b)) > 0) |
217 | 232 | ||
218 | /* Prototypes. */ | 233 | /* Prototypes. */ |
219 | static int ppp_unattached_ioctl(struct ppp_file *pf, struct file *file, | 234 | static int ppp_unattached_ioctl(struct net *net, struct ppp_file *pf, |
220 | unsigned int cmd, unsigned long arg); | 235 | struct file *file, unsigned int cmd, unsigned long arg); |
221 | static void ppp_xmit_process(struct ppp *ppp); | 236 | static void ppp_xmit_process(struct ppp *ppp); |
222 | static void ppp_send_frame(struct ppp *ppp, struct sk_buff *skb); | 237 | static void ppp_send_frame(struct ppp *ppp, struct sk_buff *skb); |
223 | static void ppp_push(struct ppp *ppp); | 238 | static void ppp_push(struct ppp *ppp); |
@@ -240,12 +255,12 @@ static void ppp_ccp_peek(struct ppp *ppp, struct sk_buff *skb, int inbound); | |||
240 | static void ppp_ccp_closed(struct ppp *ppp); | 255 | static void ppp_ccp_closed(struct ppp *ppp); |
241 | static struct compressor *find_compressor(int type); | 256 | static struct compressor *find_compressor(int type); |
242 | static void ppp_get_stats(struct ppp *ppp, struct ppp_stats *st); | 257 | static void ppp_get_stats(struct ppp *ppp, struct ppp_stats *st); |
243 | static struct ppp *ppp_create_interface(int unit, int *retp); | 258 | static struct ppp *ppp_create_interface(struct net *net, int unit, int *retp); |
244 | static void init_ppp_file(struct ppp_file *pf, int kind); | 259 | static void init_ppp_file(struct ppp_file *pf, int kind); |
245 | static void ppp_shutdown_interface(struct ppp *ppp); | 260 | static void ppp_shutdown_interface(struct ppp *ppp); |
246 | static void ppp_destroy_interface(struct ppp *ppp); | 261 | static void ppp_destroy_interface(struct ppp *ppp); |
247 | static struct ppp *ppp_find_unit(int unit); | 262 | static struct ppp *ppp_find_unit(struct ppp_net *pn, int unit); |
248 | static struct channel *ppp_find_channel(int unit); | 263 | static struct channel *ppp_find_channel(struct ppp_net *pn, int unit); |
249 | static int ppp_connect_channel(struct channel *pch, int unit); | 264 | static int ppp_connect_channel(struct channel *pch, int unit); |
250 | static int ppp_disconnect_channel(struct channel *pch); | 265 | static int ppp_disconnect_channel(struct channel *pch); |
251 | static void ppp_destroy_channel(struct channel *pch); | 266 | static void ppp_destroy_channel(struct channel *pch); |
@@ -256,6 +271,14 @@ static void *unit_find(struct idr *p, int n); | |||
256 | 271 | ||
257 | static struct class *ppp_class; | 272 | static struct class *ppp_class; |
258 | 273 | ||
274 | /* per net-namespace data */ | ||
275 | static inline struct ppp_net *ppp_pernet(struct net *net) | ||
276 | { | ||
277 | BUG_ON(!net); | ||
278 | |||
279 | return net_generic(net, ppp_net_id); | ||
280 | } | ||
281 | |||
259 | /* Translates a PPP protocol number to a NP index (NP == network protocol) */ | 282 | /* Translates a PPP protocol number to a NP index (NP == network protocol) */ |
260 | static inline int proto_to_npindex(int proto) | 283 | static inline int proto_to_npindex(int proto) |
261 | { | 284 | { |
@@ -544,7 +567,8 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg) | |||
544 | int __user *p = argp; | 567 | int __user *p = argp; |
545 | 568 | ||
546 | if (!pf) | 569 | if (!pf) |
547 | return ppp_unattached_ioctl(pf, file, cmd, arg); | 570 | return ppp_unattached_ioctl(current->nsproxy->net_ns, |
571 | pf, file, cmd, arg); | ||
548 | 572 | ||
549 | if (cmd == PPPIOCDETACH) { | 573 | if (cmd == PPPIOCDETACH) { |
550 | /* | 574 | /* |
@@ -763,12 +787,13 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg) | |||
763 | return err; | 787 | return err; |
764 | } | 788 | } |
765 | 789 | ||
766 | static int ppp_unattached_ioctl(struct ppp_file *pf, struct file *file, | 790 | static int ppp_unattached_ioctl(struct net *net, struct ppp_file *pf, |
767 | unsigned int cmd, unsigned long arg) | 791 | struct file *file, unsigned int cmd, unsigned long arg) |
768 | { | 792 | { |
769 | int unit, err = -EFAULT; | 793 | int unit, err = -EFAULT; |
770 | struct ppp *ppp; | 794 | struct ppp *ppp; |
771 | struct channel *chan; | 795 | struct channel *chan; |
796 | struct ppp_net *pn; | ||
772 | int __user *p = (int __user *)arg; | 797 | int __user *p = (int __user *)arg; |
773 | 798 | ||
774 | lock_kernel(); | 799 | lock_kernel(); |
@@ -777,7 +802,7 @@ static int ppp_unattached_ioctl(struct ppp_file *pf, struct file *file, | |||
777 | /* Create a new ppp unit */ | 802 | /* Create a new ppp unit */ |
778 | if (get_user(unit, p)) | 803 | if (get_user(unit, p)) |
779 | break; | 804 | break; |
780 | ppp = ppp_create_interface(unit, &err); | 805 | ppp = ppp_create_interface(net, unit, &err); |
781 | if (!ppp) | 806 | if (!ppp) |
782 | break; | 807 | break; |
783 | file->private_data = &ppp->file; | 808 | file->private_data = &ppp->file; |
@@ -792,29 +817,31 @@ static int ppp_unattached_ioctl(struct ppp_file *pf, struct file *file, | |||
792 | /* Attach to an existing ppp unit */ | 817 | /* Attach to an existing ppp unit */ |
793 | if (get_user(unit, p)) | 818 | if (get_user(unit, p)) |
794 | break; | 819 | break; |
795 | mutex_lock(&all_ppp_mutex); | ||
796 | err = -ENXIO; | 820 | err = -ENXIO; |
797 | ppp = ppp_find_unit(unit); | 821 | pn = ppp_pernet(net); |
822 | mutex_lock(&pn->all_ppp_mutex); | ||
823 | ppp = ppp_find_unit(pn, unit); | ||
798 | if (ppp) { | 824 | if (ppp) { |
799 | atomic_inc(&ppp->file.refcnt); | 825 | atomic_inc(&ppp->file.refcnt); |
800 | file->private_data = &ppp->file; | 826 | file->private_data = &ppp->file; |
801 | err = 0; | 827 | err = 0; |
802 | } | 828 | } |
803 | mutex_unlock(&all_ppp_mutex); | 829 | mutex_unlock(&pn->all_ppp_mutex); |
804 | break; | 830 | break; |
805 | 831 | ||
806 | case PPPIOCATTCHAN: | 832 | case PPPIOCATTCHAN: |
807 | if (get_user(unit, p)) | 833 | if (get_user(unit, p)) |
808 | break; | 834 | break; |
809 | spin_lock_bh(&all_channels_lock); | ||
810 | err = -ENXIO; | 835 | err = -ENXIO; |
811 | chan = ppp_find_channel(unit); | 836 | pn = ppp_pernet(net); |
837 | spin_lock_bh(&pn->all_channels_lock); | ||
838 | chan = ppp_find_channel(pn, unit); | ||
812 | if (chan) { | 839 | if (chan) { |
813 | atomic_inc(&chan->file.refcnt); | 840 | atomic_inc(&chan->file.refcnt); |
814 | file->private_data = &chan->file; | 841 | file->private_data = &chan->file; |
815 | err = 0; | 842 | err = 0; |
816 | } | 843 | } |
817 | spin_unlock_bh(&all_channels_lock); | 844 | spin_unlock_bh(&pn->all_channels_lock); |
818 | break; | 845 | break; |
819 | 846 | ||
820 | default: | 847 | default: |
@@ -834,6 +861,51 @@ static const struct file_operations ppp_device_fops = { | |||
834 | .release = ppp_release | 861 | .release = ppp_release |
835 | }; | 862 | }; |
836 | 863 | ||
864 | static __net_init int ppp_init_net(struct net *net) | ||
865 | { | ||
866 | struct ppp_net *pn; | ||
867 | int err; | ||
868 | |||
869 | pn = kzalloc(sizeof(*pn), GFP_KERNEL); | ||
870 | if (!pn) | ||
871 | return -ENOMEM; | ||
872 | |||
873 | idr_init(&pn->units_idr); | ||
874 | mutex_init(&pn->all_ppp_mutex); | ||
875 | |||
876 | INIT_LIST_HEAD(&pn->all_channels); | ||
877 | INIT_LIST_HEAD(&pn->new_channels); | ||
878 | |||
879 | spin_lock_init(&pn->all_channels_lock); | ||
880 | |||
881 | err = net_assign_generic(net, ppp_net_id, pn); | ||
882 | if (err) { | ||
883 | kfree(pn); | ||
884 | return err; | ||
885 | } | ||
886 | |||
887 | return 0; | ||
888 | } | ||
889 | |||
890 | static __net_exit void ppp_exit_net(struct net *net) | ||
891 | { | ||
892 | struct ppp_net *pn; | ||
893 | |||
894 | pn = net_generic(net, ppp_net_id); | ||
895 | idr_destroy(&pn->units_idr); | ||
896 | /* | ||
897 | * if someone has cached our net then | ||
898 | * further net_generic call will return NULL | ||
899 | */ | ||
900 | net_assign_generic(net, ppp_net_id, NULL); | ||
901 | kfree(pn); | ||
902 | } | ||
903 | |||
904 | static __net_initdata struct pernet_operations ppp_net_ops = { | ||
905 | .init = ppp_init_net, | ||
906 | .exit = ppp_exit_net, | ||
907 | }; | ||
908 | |||
837 | #define PPP_MAJOR 108 | 909 | #define PPP_MAJOR 108 |
838 | 910 | ||
839 | /* Called at boot time if ppp is compiled into the kernel, | 911 | /* Called at boot time if ppp is compiled into the kernel, |
@@ -843,25 +915,36 @@ static int __init ppp_init(void) | |||
843 | int err; | 915 | int err; |
844 | 916 | ||
845 | printk(KERN_INFO "PPP generic driver version " PPP_VERSION "\n"); | 917 | printk(KERN_INFO "PPP generic driver version " PPP_VERSION "\n"); |
846 | err = register_chrdev(PPP_MAJOR, "ppp", &ppp_device_fops); | 918 | |
847 | if (!err) { | 919 | err = register_pernet_gen_device(&ppp_net_id, &ppp_net_ops); |
848 | ppp_class = class_create(THIS_MODULE, "ppp"); | 920 | if (err) { |
849 | if (IS_ERR(ppp_class)) { | 921 | printk(KERN_ERR "failed to register PPP pernet device (%d)\n", err); |
850 | err = PTR_ERR(ppp_class); | 922 | goto out; |
851 | goto out_chrdev; | ||
852 | } | ||
853 | device_create(ppp_class, NULL, MKDEV(PPP_MAJOR, 0), NULL, | ||
854 | "ppp"); | ||
855 | } | 923 | } |
856 | 924 | ||
857 | out: | 925 | err = register_chrdev(PPP_MAJOR, "ppp", &ppp_device_fops); |
858 | if (err) | 926 | if (err) { |
859 | printk(KERN_ERR "failed to register PPP device (%d)\n", err); | 927 | printk(KERN_ERR "failed to register PPP device (%d)\n", err); |
860 | return err; | 928 | goto out_net; |
929 | } | ||
930 | |||
931 | ppp_class = class_create(THIS_MODULE, "ppp"); | ||
932 | if (IS_ERR(ppp_class)) { | ||
933 | err = PTR_ERR(ppp_class); | ||
934 | goto out_chrdev; | ||
935 | } | ||
936 | |||
937 | /* not a big deal if we fail here :-) */ | ||
938 | device_create(ppp_class, NULL, MKDEV(PPP_MAJOR, 0), NULL, "ppp"); | ||
939 | |||
940 | return 0; | ||
861 | 941 | ||
862 | out_chrdev: | 942 | out_chrdev: |
863 | unregister_chrdev(PPP_MAJOR, "ppp"); | 943 | unregister_chrdev(PPP_MAJOR, "ppp"); |
864 | goto out; | 944 | out_net: |
945 | unregister_pernet_gen_device(ppp_net_id, &ppp_net_ops); | ||
946 | out: | ||
947 | return err; | ||
865 | } | 948 | } |
866 | 949 | ||
867 | /* | 950 | /* |
@@ -969,6 +1052,7 @@ static void ppp_setup(struct net_device *dev) | |||
969 | dev->tx_queue_len = 3; | 1052 | dev->tx_queue_len = 3; |
970 | dev->type = ARPHRD_PPP; | 1053 | dev->type = ARPHRD_PPP; |
971 | dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST; | 1054 | dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST; |
1055 | dev->features |= NETIF_F_NETNS_LOCAL; | ||
972 | } | 1056 | } |
973 | 1057 | ||
974 | /* | 1058 | /* |
@@ -1986,19 +2070,27 @@ ppp_mp_reconstruct(struct ppp *ppp) | |||
1986 | * Channel interface. | 2070 | * Channel interface. |
1987 | */ | 2071 | */ |
1988 | 2072 | ||
1989 | /* | 2073 | /* Create a new, unattached ppp channel. */ |
1990 | * Create a new, unattached ppp channel. | 2074 | int ppp_register_channel(struct ppp_channel *chan) |
1991 | */ | 2075 | { |
1992 | int | 2076 | return ppp_register_net_channel(current->nsproxy->net_ns, chan); |
1993 | ppp_register_channel(struct ppp_channel *chan) | 2077 | } |
2078 | |||
2079 | /* Create a new, unattached ppp channel for specified net. */ | ||
2080 | int ppp_register_net_channel(struct net *net, struct ppp_channel *chan) | ||
1994 | { | 2081 | { |
1995 | struct channel *pch; | 2082 | struct channel *pch; |
2083 | struct ppp_net *pn; | ||
1996 | 2084 | ||
1997 | pch = kzalloc(sizeof(struct channel), GFP_KERNEL); | 2085 | pch = kzalloc(sizeof(struct channel), GFP_KERNEL); |
1998 | if (!pch) | 2086 | if (!pch) |
1999 | return -ENOMEM; | 2087 | return -ENOMEM; |
2088 | |||
2089 | pn = ppp_pernet(net); | ||
2090 | |||
2000 | pch->ppp = NULL; | 2091 | pch->ppp = NULL; |
2001 | pch->chan = chan; | 2092 | pch->chan = chan; |
2093 | pch->chan_net = net; | ||
2002 | chan->ppp = pch; | 2094 | chan->ppp = pch; |
2003 | init_ppp_file(&pch->file, CHANNEL); | 2095 | init_ppp_file(&pch->file, CHANNEL); |
2004 | pch->file.hdrlen = chan->hdrlen; | 2096 | pch->file.hdrlen = chan->hdrlen; |
@@ -2008,11 +2100,13 @@ ppp_register_channel(struct ppp_channel *chan) | |||
2008 | init_rwsem(&pch->chan_sem); | 2100 | init_rwsem(&pch->chan_sem); |
2009 | spin_lock_init(&pch->downl); | 2101 | spin_lock_init(&pch->downl); |
2010 | rwlock_init(&pch->upl); | 2102 | rwlock_init(&pch->upl); |
2011 | spin_lock_bh(&all_channels_lock); | 2103 | |
2012 | pch->file.index = ++last_channel_index; | 2104 | spin_lock_bh(&pn->all_channels_lock); |
2013 | list_add(&pch->list, &new_channels); | 2105 | pch->file.index = ++pn->last_channel_index; |
2106 | list_add(&pch->list, &pn->new_channels); | ||
2014 | atomic_inc(&channel_count); | 2107 | atomic_inc(&channel_count); |
2015 | spin_unlock_bh(&all_channels_lock); | 2108 | spin_unlock_bh(&pn->all_channels_lock); |
2109 | |||
2016 | return 0; | 2110 | return 0; |
2017 | } | 2111 | } |
2018 | 2112 | ||
@@ -2053,9 +2147,11 @@ void | |||
2053 | ppp_unregister_channel(struct ppp_channel *chan) | 2147 | ppp_unregister_channel(struct ppp_channel *chan) |
2054 | { | 2148 | { |
2055 | struct channel *pch = chan->ppp; | 2149 | struct channel *pch = chan->ppp; |
2150 | struct ppp_net *pn; | ||
2056 | 2151 | ||
2057 | if (!pch) | 2152 | if (!pch) |
2058 | return; /* should never happen */ | 2153 | return; /* should never happen */ |
2154 | |||
2059 | chan->ppp = NULL; | 2155 | chan->ppp = NULL; |
2060 | 2156 | ||
2061 | /* | 2157 | /* |
@@ -2068,9 +2164,12 @@ ppp_unregister_channel(struct ppp_channel *chan) | |||
2068 | spin_unlock_bh(&pch->downl); | 2164 | spin_unlock_bh(&pch->downl); |
2069 | up_write(&pch->chan_sem); | 2165 | up_write(&pch->chan_sem); |
2070 | ppp_disconnect_channel(pch); | 2166 | ppp_disconnect_channel(pch); |
2071 | spin_lock_bh(&all_channels_lock); | 2167 | |
2168 | pn = ppp_pernet(pch->chan_net); | ||
2169 | spin_lock_bh(&pn->all_channels_lock); | ||
2072 | list_del(&pch->list); | 2170 | list_del(&pch->list); |
2073 | spin_unlock_bh(&all_channels_lock); | 2171 | spin_unlock_bh(&pn->all_channels_lock); |
2172 | |||
2074 | pch->file.dead = 1; | 2173 | pch->file.dead = 1; |
2075 | wake_up_interruptible(&pch->file.rwait); | 2174 | wake_up_interruptible(&pch->file.rwait); |
2076 | if (atomic_dec_and_test(&pch->file.refcnt)) | 2175 | if (atomic_dec_and_test(&pch->file.refcnt)) |
@@ -2395,9 +2494,10 @@ ppp_get_stats(struct ppp *ppp, struct ppp_stats *st) | |||
2395 | * unit == -1 means allocate a new number. | 2494 | * unit == -1 means allocate a new number. |
2396 | */ | 2495 | */ |
2397 | static struct ppp * | 2496 | static struct ppp * |
2398 | ppp_create_interface(int unit, int *retp) | 2497 | ppp_create_interface(struct net *net, int unit, int *retp) |
2399 | { | 2498 | { |
2400 | struct ppp *ppp; | 2499 | struct ppp *ppp; |
2500 | struct ppp_net *pn; | ||
2401 | struct net_device *dev = NULL; | 2501 | struct net_device *dev = NULL; |
2402 | int ret = -ENOMEM; | 2502 | int ret = -ENOMEM; |
2403 | int i; | 2503 | int i; |
@@ -2406,6 +2506,8 @@ ppp_create_interface(int unit, int *retp) | |||
2406 | if (!dev) | 2506 | if (!dev) |
2407 | goto out1; | 2507 | goto out1; |
2408 | 2508 | ||
2509 | pn = ppp_pernet(net); | ||
2510 | |||
2409 | ppp = netdev_priv(dev); | 2511 | ppp = netdev_priv(dev); |
2410 | ppp->dev = dev; | 2512 | ppp->dev = dev; |
2411 | ppp->mru = PPP_MRU; | 2513 | ppp->mru = PPP_MRU; |
@@ -2421,17 +2523,23 @@ ppp_create_interface(int unit, int *retp) | |||
2421 | skb_queue_head_init(&ppp->mrq); | 2523 | skb_queue_head_init(&ppp->mrq); |
2422 | #endif /* CONFIG_PPP_MULTILINK */ | 2524 | #endif /* CONFIG_PPP_MULTILINK */ |
2423 | 2525 | ||
2526 | /* | ||
2527 | * drum roll: don't forget to set | ||
2528 | * the net device is belong to | ||
2529 | */ | ||
2530 | dev_net_set(dev, net); | ||
2531 | |||
2424 | ret = -EEXIST; | 2532 | ret = -EEXIST; |
2425 | mutex_lock(&all_ppp_mutex); | 2533 | mutex_lock(&pn->all_ppp_mutex); |
2426 | 2534 | ||
2427 | if (unit < 0) { | 2535 | if (unit < 0) { |
2428 | unit = unit_get(&ppp_units_idr, ppp); | 2536 | unit = unit_get(&pn->units_idr, ppp); |
2429 | if (unit < 0) { | 2537 | if (unit < 0) { |
2430 | *retp = unit; | 2538 | *retp = unit; |
2431 | goto out2; | 2539 | goto out2; |
2432 | } | 2540 | } |
2433 | } else { | 2541 | } else { |
2434 | if (unit_find(&ppp_units_idr, unit)) | 2542 | if (unit_find(&pn->units_idr, unit)) |
2435 | goto out2; /* unit already exists */ | 2543 | goto out2; /* unit already exists */ |
2436 | /* | 2544 | /* |
2437 | * if caller need a specified unit number | 2545 | * if caller need a specified unit number |
@@ -2442,7 +2550,7 @@ ppp_create_interface(int unit, int *retp) | |||
2442 | * fair but at least pppd will ask us to allocate | 2550 | * fair but at least pppd will ask us to allocate |
2443 | * new unit in this case so user is happy :) | 2551 | * new unit in this case so user is happy :) |
2444 | */ | 2552 | */ |
2445 | unit = unit_set(&ppp_units_idr, ppp, unit); | 2553 | unit = unit_set(&pn->units_idr, ppp, unit); |
2446 | if (unit < 0) | 2554 | if (unit < 0) |
2447 | goto out2; | 2555 | goto out2; |
2448 | } | 2556 | } |
@@ -2453,20 +2561,22 @@ ppp_create_interface(int unit, int *retp) | |||
2453 | 2561 | ||
2454 | ret = register_netdev(dev); | 2562 | ret = register_netdev(dev); |
2455 | if (ret != 0) { | 2563 | if (ret != 0) { |
2456 | unit_put(&ppp_units_idr, unit); | 2564 | unit_put(&pn->units_idr, unit); |
2457 | printk(KERN_ERR "PPP: couldn't register device %s (%d)\n", | 2565 | printk(KERN_ERR "PPP: couldn't register device %s (%d)\n", |
2458 | dev->name, ret); | 2566 | dev->name, ret); |
2459 | goto out2; | 2567 | goto out2; |
2460 | } | 2568 | } |
2461 | 2569 | ||
2570 | ppp->ppp_net = net; | ||
2571 | |||
2462 | atomic_inc(&ppp_unit_count); | 2572 | atomic_inc(&ppp_unit_count); |
2463 | mutex_unlock(&all_ppp_mutex); | 2573 | mutex_unlock(&pn->all_ppp_mutex); |
2464 | 2574 | ||
2465 | *retp = 0; | 2575 | *retp = 0; |
2466 | return ppp; | 2576 | return ppp; |
2467 | 2577 | ||
2468 | out2: | 2578 | out2: |
2469 | mutex_unlock(&all_ppp_mutex); | 2579 | mutex_unlock(&pn->all_ppp_mutex); |
2470 | free_netdev(dev); | 2580 | free_netdev(dev); |
2471 | out1: | 2581 | out1: |
2472 | *retp = ret; | 2582 | *retp = ret; |
@@ -2492,7 +2602,11 @@ init_ppp_file(struct ppp_file *pf, int kind) | |||
2492 | */ | 2602 | */ |
2493 | static void ppp_shutdown_interface(struct ppp *ppp) | 2603 | static void ppp_shutdown_interface(struct ppp *ppp) |
2494 | { | 2604 | { |
2495 | mutex_lock(&all_ppp_mutex); | 2605 | struct ppp_net *pn; |
2606 | |||
2607 | pn = ppp_pernet(ppp->ppp_net); | ||
2608 | mutex_lock(&pn->all_ppp_mutex); | ||
2609 | |||
2496 | /* This will call dev_close() for us. */ | 2610 | /* This will call dev_close() for us. */ |
2497 | ppp_lock(ppp); | 2611 | ppp_lock(ppp); |
2498 | if (!ppp->closing) { | 2612 | if (!ppp->closing) { |
@@ -2502,11 +2616,12 @@ static void ppp_shutdown_interface(struct ppp *ppp) | |||
2502 | } else | 2616 | } else |
2503 | ppp_unlock(ppp); | 2617 | ppp_unlock(ppp); |
2504 | 2618 | ||
2505 | unit_put(&ppp_units_idr, ppp->file.index); | 2619 | unit_put(&pn->units_idr, ppp->file.index); |
2506 | ppp->file.dead = 1; | 2620 | ppp->file.dead = 1; |
2507 | ppp->owner = NULL; | 2621 | ppp->owner = NULL; |
2508 | wake_up_interruptible(&ppp->file.rwait); | 2622 | wake_up_interruptible(&ppp->file.rwait); |
2509 | mutex_unlock(&all_ppp_mutex); | 2623 | |
2624 | mutex_unlock(&pn->all_ppp_mutex); | ||
2510 | } | 2625 | } |
2511 | 2626 | ||
2512 | /* | 2627 | /* |
@@ -2554,9 +2669,9 @@ static void ppp_destroy_interface(struct ppp *ppp) | |||
2554 | * The caller should have locked the all_ppp_mutex. | 2669 | * The caller should have locked the all_ppp_mutex. |
2555 | */ | 2670 | */ |
2556 | static struct ppp * | 2671 | static struct ppp * |
2557 | ppp_find_unit(int unit) | 2672 | ppp_find_unit(struct ppp_net *pn, int unit) |
2558 | { | 2673 | { |
2559 | return unit_find(&ppp_units_idr, unit); | 2674 | return unit_find(&pn->units_idr, unit); |
2560 | } | 2675 | } |
2561 | 2676 | ||
2562 | /* | 2677 | /* |
@@ -2568,20 +2683,22 @@ ppp_find_unit(int unit) | |||
2568 | * when we have a lot of channels in use. | 2683 | * when we have a lot of channels in use. |
2569 | */ | 2684 | */ |
2570 | static struct channel * | 2685 | static struct channel * |
2571 | ppp_find_channel(int unit) | 2686 | ppp_find_channel(struct ppp_net *pn, int unit) |
2572 | { | 2687 | { |
2573 | struct channel *pch; | 2688 | struct channel *pch; |
2574 | 2689 | ||
2575 | list_for_each_entry(pch, &new_channels, list) { | 2690 | list_for_each_entry(pch, &pn->new_channels, list) { |
2576 | if (pch->file.index == unit) { | 2691 | if (pch->file.index == unit) { |
2577 | list_move(&pch->list, &all_channels); | 2692 | list_move(&pch->list, &pn->all_channels); |
2578 | return pch; | 2693 | return pch; |
2579 | } | 2694 | } |
2580 | } | 2695 | } |
2581 | list_for_each_entry(pch, &all_channels, list) { | 2696 | |
2697 | list_for_each_entry(pch, &pn->all_channels, list) { | ||
2582 | if (pch->file.index == unit) | 2698 | if (pch->file.index == unit) |
2583 | return pch; | 2699 | return pch; |
2584 | } | 2700 | } |
2701 | |||
2585 | return NULL; | 2702 | return NULL; |
2586 | } | 2703 | } |
2587 | 2704 | ||
@@ -2592,11 +2709,14 @@ static int | |||
2592 | ppp_connect_channel(struct channel *pch, int unit) | 2709 | ppp_connect_channel(struct channel *pch, int unit) |
2593 | { | 2710 | { |
2594 | struct ppp *ppp; | 2711 | struct ppp *ppp; |
2712 | struct ppp_net *pn; | ||
2595 | int ret = -ENXIO; | 2713 | int ret = -ENXIO; |
2596 | int hdrlen; | 2714 | int hdrlen; |
2597 | 2715 | ||
2598 | mutex_lock(&all_ppp_mutex); | 2716 | pn = ppp_pernet(pch->chan_net); |
2599 | ppp = ppp_find_unit(unit); | 2717 | |
2718 | mutex_lock(&pn->all_ppp_mutex); | ||
2719 | ppp = ppp_find_unit(pn, unit); | ||
2600 | if (!ppp) | 2720 | if (!ppp) |
2601 | goto out; | 2721 | goto out; |
2602 | write_lock_bh(&pch->upl); | 2722 | write_lock_bh(&pch->upl); |
@@ -2620,7 +2740,7 @@ ppp_connect_channel(struct channel *pch, int unit) | |||
2620 | outl: | 2740 | outl: |
2621 | write_unlock_bh(&pch->upl); | 2741 | write_unlock_bh(&pch->upl); |
2622 | out: | 2742 | out: |
2623 | mutex_unlock(&all_ppp_mutex); | 2743 | mutex_unlock(&pn->all_ppp_mutex); |
2624 | return ret; | 2744 | return ret; |
2625 | } | 2745 | } |
2626 | 2746 | ||
@@ -2677,7 +2797,7 @@ static void __exit ppp_cleanup(void) | |||
2677 | unregister_chrdev(PPP_MAJOR, "ppp"); | 2797 | unregister_chrdev(PPP_MAJOR, "ppp"); |
2678 | device_destroy(ppp_class, MKDEV(PPP_MAJOR, 0)); | 2798 | device_destroy(ppp_class, MKDEV(PPP_MAJOR, 0)); |
2679 | class_destroy(ppp_class); | 2799 | class_destroy(ppp_class); |
2680 | idr_destroy(&ppp_units_idr); | 2800 | unregister_pernet_gen_device(ppp_net_id, &ppp_net_ops); |
2681 | } | 2801 | } |
2682 | 2802 | ||
2683 | /* | 2803 | /* |
@@ -2743,6 +2863,7 @@ static void *unit_find(struct idr *p, int n) | |||
2743 | module_init(ppp_init); | 2863 | module_init(ppp_init); |
2744 | module_exit(ppp_cleanup); | 2864 | module_exit(ppp_cleanup); |
2745 | 2865 | ||
2866 | EXPORT_SYMBOL(ppp_register_net_channel); | ||
2746 | EXPORT_SYMBOL(ppp_register_channel); | 2867 | EXPORT_SYMBOL(ppp_register_channel); |
2747 | EXPORT_SYMBOL(ppp_unregister_channel); | 2868 | EXPORT_SYMBOL(ppp_unregister_channel); |
2748 | EXPORT_SYMBOL(ppp_channel_index); | 2869 | EXPORT_SYMBOL(ppp_channel_index); |
diff --git a/drivers/net/pppoe.c b/drivers/net/pppoe.c index c22b30533a14..1011fd64108b 100644 --- a/drivers/net/pppoe.c +++ b/drivers/net/pppoe.c | |||
@@ -78,38 +78,73 @@ | |||
78 | #include <linux/proc_fs.h> | 78 | #include <linux/proc_fs.h> |
79 | #include <linux/seq_file.h> | 79 | #include <linux/seq_file.h> |
80 | 80 | ||
81 | #include <linux/nsproxy.h> | ||
81 | #include <net/net_namespace.h> | 82 | #include <net/net_namespace.h> |
83 | #include <net/netns/generic.h> | ||
82 | #include <net/sock.h> | 84 | #include <net/sock.h> |
83 | 85 | ||
84 | #include <asm/uaccess.h> | 86 | #include <asm/uaccess.h> |
85 | 87 | ||
86 | #define PPPOE_HASH_BITS 4 | 88 | #define PPPOE_HASH_BITS 4 |
87 | #define PPPOE_HASH_SIZE (1<<PPPOE_HASH_BITS) | 89 | #define PPPOE_HASH_SIZE (1 << PPPOE_HASH_BITS) |
88 | 90 | #define PPPOE_HASH_MASK (PPPOE_HASH_SIZE - 1) | |
89 | static struct ppp_channel_ops pppoe_chan_ops; | ||
90 | 91 | ||
91 | static int pppoe_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg); | 92 | static int pppoe_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg); |
92 | static int pppoe_xmit(struct ppp_channel *chan, struct sk_buff *skb); | 93 | static int pppoe_xmit(struct ppp_channel *chan, struct sk_buff *skb); |
93 | static int __pppoe_xmit(struct sock *sk, struct sk_buff *skb); | 94 | static int __pppoe_xmit(struct sock *sk, struct sk_buff *skb); |
94 | 95 | ||
95 | static const struct proto_ops pppoe_ops; | 96 | static const struct proto_ops pppoe_ops; |
96 | static DEFINE_RWLOCK(pppoe_hash_lock); | ||
97 | |||
98 | static struct ppp_channel_ops pppoe_chan_ops; | 97 | static struct ppp_channel_ops pppoe_chan_ops; |
99 | 98 | ||
99 | /* per-net private data for this module */ | ||
100 | static unsigned int pppoe_net_id; | ||
101 | struct pppoe_net { | ||
102 | /* | ||
103 | * we could use _single_ hash table for all | ||
104 | * nets by injecting net id into the hash but | ||
105 | * it would increase hash chains and add | ||
106 | * a few additional math comparations messy | ||
107 | * as well, moreover in case of SMP less locking | ||
108 | * controversy here | ||
109 | */ | ||
110 | struct pppox_sock *hash_table[PPPOE_HASH_SIZE]; | ||
111 | rwlock_t hash_lock; | ||
112 | }; | ||
113 | |||
114 | /* to eliminate a race btw pppoe_flush_dev and pppoe_release */ | ||
115 | static DEFINE_SPINLOCK(flush_lock); | ||
116 | |||
117 | /* | ||
118 | * PPPoE could be in the following stages: | ||
119 | * 1) Discovery stage (to obtain remote MAC and Session ID) | ||
120 | * 2) Session stage (MAC and SID are known) | ||
121 | * | ||
122 | * Ethernet frames have a special tag for this but | ||
123 | * we use simplier approach based on session id | ||
124 | */ | ||
125 | static inline bool stage_session(__be16 sid) | ||
126 | { | ||
127 | return sid != 0; | ||
128 | } | ||
129 | |||
130 | static inline struct pppoe_net *pppoe_pernet(struct net *net) | ||
131 | { | ||
132 | BUG_ON(!net); | ||
133 | |||
134 | return net_generic(net, pppoe_net_id); | ||
135 | } | ||
136 | |||
100 | static inline int cmp_2_addr(struct pppoe_addr *a, struct pppoe_addr *b) | 137 | static inline int cmp_2_addr(struct pppoe_addr *a, struct pppoe_addr *b) |
101 | { | 138 | { |
102 | return (a->sid == b->sid && | 139 | return a->sid == b->sid && !memcmp(a->remote, b->remote, ETH_ALEN); |
103 | (memcmp(a->remote, b->remote, ETH_ALEN) == 0)); | ||
104 | } | 140 | } |
105 | 141 | ||
106 | static inline int cmp_addr(struct pppoe_addr *a, __be16 sid, char *addr) | 142 | static inline int cmp_addr(struct pppoe_addr *a, __be16 sid, char *addr) |
107 | { | 143 | { |
108 | return (a->sid == sid && | 144 | return a->sid == sid && !memcmp(a->remote, addr, ETH_ALEN); |
109 | (memcmp(a->remote,addr,ETH_ALEN) == 0)); | ||
110 | } | 145 | } |
111 | 146 | ||
112 | #if 8%PPPOE_HASH_BITS | 147 | #if 8 % PPPOE_HASH_BITS |
113 | #error 8 must be a multiple of PPPOE_HASH_BITS | 148 | #error 8 must be a multiple of PPPOE_HASH_BITS |
114 | #endif | 149 | #endif |
115 | 150 | ||
@@ -118,69 +153,71 @@ static int hash_item(__be16 sid, unsigned char *addr) | |||
118 | unsigned char hash = 0; | 153 | unsigned char hash = 0; |
119 | unsigned int i; | 154 | unsigned int i; |
120 | 155 | ||
121 | for (i = 0 ; i < ETH_ALEN ; i++) { | 156 | for (i = 0; i < ETH_ALEN; i++) |
122 | hash ^= addr[i]; | 157 | hash ^= addr[i]; |
123 | } | 158 | for (i = 0; i < sizeof(sid_t) * 8; i += 8) |
124 | for (i = 0 ; i < sizeof(sid_t)*8 ; i += 8 ){ | 159 | hash ^= (__force __u32)sid >> i; |
125 | hash ^= (__force __u32)sid>>i; | 160 | for (i = 8; (i >>= 1) >= PPPOE_HASH_BITS;) |
126 | } | 161 | hash ^= hash >> i; |
127 | for (i = 8 ; (i>>=1) >= PPPOE_HASH_BITS ; ) { | ||
128 | hash ^= hash>>i; | ||
129 | } | ||
130 | 162 | ||
131 | return hash & ( PPPOE_HASH_SIZE - 1 ); | 163 | return hash & PPPOE_HASH_MASK; |
132 | } | 164 | } |
133 | 165 | ||
134 | /* zeroed because its in .bss */ | ||
135 | static struct pppox_sock *item_hash_table[PPPOE_HASH_SIZE]; | ||
136 | |||
137 | /********************************************************************** | 166 | /********************************************************************** |
138 | * | 167 | * |
139 | * Set/get/delete/rehash items (internal versions) | 168 | * Set/get/delete/rehash items (internal versions) |
140 | * | 169 | * |
141 | **********************************************************************/ | 170 | **********************************************************************/ |
142 | static struct pppox_sock *__get_item(__be16 sid, unsigned char *addr, int ifindex) | 171 | static struct pppox_sock *__get_item(struct pppoe_net *pn, __be16 sid, |
172 | unsigned char *addr, int ifindex) | ||
143 | { | 173 | { |
144 | int hash = hash_item(sid, addr); | 174 | int hash = hash_item(sid, addr); |
145 | struct pppox_sock *ret; | 175 | struct pppox_sock *ret; |
146 | 176 | ||
147 | ret = item_hash_table[hash]; | 177 | ret = pn->hash_table[hash]; |
178 | while (ret) { | ||
179 | if (cmp_addr(&ret->pppoe_pa, sid, addr) && | ||
180 | ret->pppoe_ifindex == ifindex) | ||
181 | return ret; | ||
148 | 182 | ||
149 | while (ret && !(cmp_addr(&ret->pppoe_pa, sid, addr) && ret->pppoe_ifindex == ifindex)) | ||
150 | ret = ret->next; | 183 | ret = ret->next; |
184 | } | ||
151 | 185 | ||
152 | return ret; | 186 | return NULL; |
153 | } | 187 | } |
154 | 188 | ||
155 | static int __set_item(struct pppox_sock *po) | 189 | static int __set_item(struct pppoe_net *pn, struct pppox_sock *po) |
156 | { | 190 | { |
157 | int hash = hash_item(po->pppoe_pa.sid, po->pppoe_pa.remote); | 191 | int hash = hash_item(po->pppoe_pa.sid, po->pppoe_pa.remote); |
158 | struct pppox_sock *ret; | 192 | struct pppox_sock *ret; |
159 | 193 | ||
160 | ret = item_hash_table[hash]; | 194 | ret = pn->hash_table[hash]; |
161 | while (ret) { | 195 | while (ret) { |
162 | if (cmp_2_addr(&ret->pppoe_pa, &po->pppoe_pa) && ret->pppoe_ifindex == po->pppoe_ifindex) | 196 | if (cmp_2_addr(&ret->pppoe_pa, &po->pppoe_pa) && |
197 | ret->pppoe_ifindex == po->pppoe_ifindex) | ||
163 | return -EALREADY; | 198 | return -EALREADY; |
164 | 199 | ||
165 | ret = ret->next; | 200 | ret = ret->next; |
166 | } | 201 | } |
167 | 202 | ||
168 | po->next = item_hash_table[hash]; | 203 | po->next = pn->hash_table[hash]; |
169 | item_hash_table[hash] = po; | 204 | pn->hash_table[hash] = po; |
170 | 205 | ||
171 | return 0; | 206 | return 0; |
172 | } | 207 | } |
173 | 208 | ||
174 | static struct pppox_sock *__delete_item(__be16 sid, char *addr, int ifindex) | 209 | static struct pppox_sock *__delete_item(struct pppoe_net *pn, __be16 sid, |
210 | char *addr, int ifindex) | ||
175 | { | 211 | { |
176 | int hash = hash_item(sid, addr); | 212 | int hash = hash_item(sid, addr); |
177 | struct pppox_sock *ret, **src; | 213 | struct pppox_sock *ret, **src; |
178 | 214 | ||
179 | ret = item_hash_table[hash]; | 215 | ret = pn->hash_table[hash]; |
180 | src = &item_hash_table[hash]; | 216 | src = &pn->hash_table[hash]; |
181 | 217 | ||
182 | while (ret) { | 218 | while (ret) { |
183 | if (cmp_addr(&ret->pppoe_pa, sid, addr) && ret->pppoe_ifindex == ifindex) { | 219 | if (cmp_addr(&ret->pppoe_pa, sid, addr) && |
220 | ret->pppoe_ifindex == ifindex) { | ||
184 | *src = ret->next; | 221 | *src = ret->next; |
185 | break; | 222 | break; |
186 | } | 223 | } |
@@ -197,46 +234,54 @@ static struct pppox_sock *__delete_item(__be16 sid, char *addr, int ifindex) | |||
197 | * Set/get/delete/rehash items | 234 | * Set/get/delete/rehash items |
198 | * | 235 | * |
199 | **********************************************************************/ | 236 | **********************************************************************/ |
200 | static inline struct pppox_sock *get_item(__be16 sid, | 237 | static inline struct pppox_sock *get_item(struct pppoe_net *pn, __be16 sid, |
201 | unsigned char *addr, int ifindex) | 238 | unsigned char *addr, int ifindex) |
202 | { | 239 | { |
203 | struct pppox_sock *po; | 240 | struct pppox_sock *po; |
204 | 241 | ||
205 | read_lock_bh(&pppoe_hash_lock); | 242 | read_lock_bh(&pn->hash_lock); |
206 | po = __get_item(sid, addr, ifindex); | 243 | po = __get_item(pn, sid, addr, ifindex); |
207 | if (po) | 244 | if (po) |
208 | sock_hold(sk_pppox(po)); | 245 | sock_hold(sk_pppox(po)); |
209 | read_unlock_bh(&pppoe_hash_lock); | 246 | read_unlock_bh(&pn->hash_lock); |
210 | 247 | ||
211 | return po; | 248 | return po; |
212 | } | 249 | } |
213 | 250 | ||
214 | static inline struct pppox_sock *get_item_by_addr(struct sockaddr_pppox *sp) | 251 | static inline struct pppox_sock *get_item_by_addr(struct net *net, |
252 | struct sockaddr_pppox *sp) | ||
215 | { | 253 | { |
216 | struct net_device *dev; | 254 | struct net_device *dev; |
255 | struct pppoe_net *pn; | ||
256 | struct pppox_sock *pppox_sock; | ||
257 | |||
217 | int ifindex; | 258 | int ifindex; |
218 | 259 | ||
219 | dev = dev_get_by_name(&init_net, sp->sa_addr.pppoe.dev); | 260 | dev = dev_get_by_name(net, sp->sa_addr.pppoe.dev); |
220 | if(!dev) | 261 | if (!dev) |
221 | return NULL; | 262 | return NULL; |
263 | |||
222 | ifindex = dev->ifindex; | 264 | ifindex = dev->ifindex; |
265 | pn = net_generic(net, pppoe_net_id); | ||
266 | pppox_sock = get_item(pn, sp->sa_addr.pppoe.sid, | ||
267 | sp->sa_addr.pppoe.remote, ifindex); | ||
223 | dev_put(dev); | 268 | dev_put(dev); |
224 | return get_item(sp->sa_addr.pppoe.sid, sp->sa_addr.pppoe.remote, ifindex); | 269 | |
270 | return pppox_sock; | ||
225 | } | 271 | } |
226 | 272 | ||
227 | static inline struct pppox_sock *delete_item(__be16 sid, char *addr, int ifindex) | 273 | static inline struct pppox_sock *delete_item(struct pppoe_net *pn, __be16 sid, |
274 | char *addr, int ifindex) | ||
228 | { | 275 | { |
229 | struct pppox_sock *ret; | 276 | struct pppox_sock *ret; |
230 | 277 | ||
231 | write_lock_bh(&pppoe_hash_lock); | 278 | write_lock_bh(&pn->hash_lock); |
232 | ret = __delete_item(sid, addr, ifindex); | 279 | ret = __delete_item(pn, sid, addr, ifindex); |
233 | write_unlock_bh(&pppoe_hash_lock); | 280 | write_unlock_bh(&pn->hash_lock); |
234 | 281 | ||
235 | return ret; | 282 | return ret; |
236 | } | 283 | } |
237 | 284 | ||
238 | |||
239 | |||
240 | /*************************************************************************** | 285 | /*************************************************************************** |
241 | * | 286 | * |
242 | * Handler for device events. | 287 | * Handler for device events. |
@@ -246,25 +291,33 @@ static inline struct pppox_sock *delete_item(__be16 sid, char *addr, int ifindex | |||
246 | 291 | ||
247 | static void pppoe_flush_dev(struct net_device *dev) | 292 | static void pppoe_flush_dev(struct net_device *dev) |
248 | { | 293 | { |
249 | int hash; | 294 | struct pppoe_net *pn; |
295 | int i; | ||
296 | |||
250 | BUG_ON(dev == NULL); | 297 | BUG_ON(dev == NULL); |
251 | 298 | ||
252 | write_lock_bh(&pppoe_hash_lock); | 299 | pn = pppoe_pernet(dev_net(dev)); |
253 | for (hash = 0; hash < PPPOE_HASH_SIZE; hash++) { | 300 | if (!pn) /* already freed */ |
254 | struct pppox_sock *po = item_hash_table[hash]; | 301 | return; |
302 | |||
303 | write_lock_bh(&pn->hash_lock); | ||
304 | for (i = 0; i < PPPOE_HASH_SIZE; i++) { | ||
305 | struct pppox_sock *po = pn->hash_table[i]; | ||
255 | 306 | ||
256 | while (po != NULL) { | 307 | while (po != NULL) { |
257 | struct sock *sk = sk_pppox(po); | 308 | struct sock *sk; |
258 | if (po->pppoe_dev != dev) { | 309 | if (po->pppoe_dev != dev) { |
259 | po = po->next; | 310 | po = po->next; |
260 | continue; | 311 | continue; |
261 | } | 312 | } |
313 | sk = sk_pppox(po); | ||
314 | spin_lock(&flush_lock); | ||
262 | po->pppoe_dev = NULL; | 315 | po->pppoe_dev = NULL; |
316 | spin_unlock(&flush_lock); | ||
263 | dev_put(dev); | 317 | dev_put(dev); |
264 | 318 | ||
265 | |||
266 | /* We always grab the socket lock, followed by the | 319 | /* We always grab the socket lock, followed by the |
267 | * pppoe_hash_lock, in that order. Since we should | 320 | * hash_lock, in that order. Since we should |
268 | * hold the sock lock while doing any unbinding, | 321 | * hold the sock lock while doing any unbinding, |
269 | * we need to release the lock we're holding. | 322 | * we need to release the lock we're holding. |
270 | * Hold a reference to the sock so it doesn't disappear | 323 | * Hold a reference to the sock so it doesn't disappear |
@@ -273,7 +326,7 @@ static void pppoe_flush_dev(struct net_device *dev) | |||
273 | 326 | ||
274 | sock_hold(sk); | 327 | sock_hold(sk); |
275 | 328 | ||
276 | write_unlock_bh(&pppoe_hash_lock); | 329 | write_unlock_bh(&pn->hash_lock); |
277 | lock_sock(sk); | 330 | lock_sock(sk); |
278 | 331 | ||
279 | if (sk->sk_state & (PPPOX_CONNECTED | PPPOX_BOUND)) { | 332 | if (sk->sk_state & (PPPOX_CONNECTED | PPPOX_BOUND)) { |
@@ -289,20 +342,17 @@ static void pppoe_flush_dev(struct net_device *dev) | |||
289 | * While the lock was dropped the chain contents may | 342 | * While the lock was dropped the chain contents may |
290 | * have changed. | 343 | * have changed. |
291 | */ | 344 | */ |
292 | write_lock_bh(&pppoe_hash_lock); | 345 | write_lock_bh(&pn->hash_lock); |
293 | po = item_hash_table[hash]; | 346 | po = pn->hash_table[i]; |
294 | } | 347 | } |
295 | } | 348 | } |
296 | write_unlock_bh(&pppoe_hash_lock); | 349 | write_unlock_bh(&pn->hash_lock); |
297 | } | 350 | } |
298 | 351 | ||
299 | static int pppoe_device_event(struct notifier_block *this, | 352 | static int pppoe_device_event(struct notifier_block *this, |
300 | unsigned long event, void *ptr) | 353 | unsigned long event, void *ptr) |
301 | { | 354 | { |
302 | struct net_device *dev = (struct net_device *) ptr; | 355 | struct net_device *dev = (struct net_device *)ptr; |
303 | |||
304 | if (dev_net(dev) != &init_net) | ||
305 | return NOTIFY_DONE; | ||
306 | 356 | ||
307 | /* Only look at sockets that are using this specific device. */ | 357 | /* Only look at sockets that are using this specific device. */ |
308 | switch (event) { | 358 | switch (event) { |
@@ -324,12 +374,10 @@ static int pppoe_device_event(struct notifier_block *this, | |||
324 | return NOTIFY_DONE; | 374 | return NOTIFY_DONE; |
325 | } | 375 | } |
326 | 376 | ||
327 | |||
328 | static struct notifier_block pppoe_notifier = { | 377 | static struct notifier_block pppoe_notifier = { |
329 | .notifier_call = pppoe_device_event, | 378 | .notifier_call = pppoe_device_event, |
330 | }; | 379 | }; |
331 | 380 | ||
332 | |||
333 | /************************************************************************ | 381 | /************************************************************************ |
334 | * | 382 | * |
335 | * Do the real work of receiving a PPPoE Session frame. | 383 | * Do the real work of receiving a PPPoE Session frame. |
@@ -343,8 +391,8 @@ static int pppoe_rcv_core(struct sock *sk, struct sk_buff *skb) | |||
343 | if (sk->sk_state & PPPOX_BOUND) { | 391 | if (sk->sk_state & PPPOX_BOUND) { |
344 | ppp_input(&po->chan, skb); | 392 | ppp_input(&po->chan, skb); |
345 | } else if (sk->sk_state & PPPOX_RELAY) { | 393 | } else if (sk->sk_state & PPPOX_RELAY) { |
346 | relay_po = get_item_by_addr(&po->pppoe_relay); | 394 | relay_po = get_item_by_addr(dev_net(po->pppoe_dev), |
347 | 395 | &po->pppoe_relay); | |
348 | if (relay_po == NULL) | 396 | if (relay_po == NULL) |
349 | goto abort_kfree; | 397 | goto abort_kfree; |
350 | 398 | ||
@@ -373,22 +421,18 @@ abort_kfree: | |||
373 | * Receive wrapper called in BH context. | 421 | * Receive wrapper called in BH context. |
374 | * | 422 | * |
375 | ***********************************************************************/ | 423 | ***********************************************************************/ |
376 | static int pppoe_rcv(struct sk_buff *skb, | 424 | static int pppoe_rcv(struct sk_buff *skb, struct net_device *dev, |
377 | struct net_device *dev, | 425 | struct packet_type *pt, struct net_device *orig_dev) |
378 | struct packet_type *pt, | ||
379 | struct net_device *orig_dev) | ||
380 | |||
381 | { | 426 | { |
382 | struct pppoe_hdr *ph; | 427 | struct pppoe_hdr *ph; |
383 | struct pppox_sock *po; | 428 | struct pppox_sock *po; |
429 | struct pppoe_net *pn; | ||
384 | int len; | 430 | int len; |
385 | 431 | ||
386 | if (!(skb = skb_share_check(skb, GFP_ATOMIC))) | 432 | skb = skb_share_check(skb, GFP_ATOMIC); |
433 | if (!skb) | ||
387 | goto out; | 434 | goto out; |
388 | 435 | ||
389 | if (dev_net(dev) != &init_net) | ||
390 | goto drop; | ||
391 | |||
392 | if (!pskb_may_pull(skb, sizeof(struct pppoe_hdr))) | 436 | if (!pskb_may_pull(skb, sizeof(struct pppoe_hdr))) |
393 | goto drop; | 437 | goto drop; |
394 | 438 | ||
@@ -402,7 +446,8 @@ static int pppoe_rcv(struct sk_buff *skb, | |||
402 | if (pskb_trim_rcsum(skb, len)) | 446 | if (pskb_trim_rcsum(skb, len)) |
403 | goto drop; | 447 | goto drop; |
404 | 448 | ||
405 | po = get_item(ph->sid, eth_hdr(skb)->h_source, dev->ifindex); | 449 | pn = pppoe_pernet(dev_net(dev)); |
450 | po = get_item(pn, ph->sid, eth_hdr(skb)->h_source, dev->ifindex); | ||
406 | if (!po) | 451 | if (!po) |
407 | goto drop; | 452 | goto drop; |
408 | 453 | ||
@@ -420,19 +465,16 @@ out: | |||
420 | * This is solely for detection of PADT frames | 465 | * This is solely for detection of PADT frames |
421 | * | 466 | * |
422 | ***********************************************************************/ | 467 | ***********************************************************************/ |
423 | static int pppoe_disc_rcv(struct sk_buff *skb, | 468 | static int pppoe_disc_rcv(struct sk_buff *skb, struct net_device *dev, |
424 | struct net_device *dev, | 469 | struct packet_type *pt, struct net_device *orig_dev) |
425 | struct packet_type *pt, | ||
426 | struct net_device *orig_dev) | ||
427 | 470 | ||
428 | { | 471 | { |
429 | struct pppoe_hdr *ph; | 472 | struct pppoe_hdr *ph; |
430 | struct pppox_sock *po; | 473 | struct pppox_sock *po; |
474 | struct pppoe_net *pn; | ||
431 | 475 | ||
432 | if (dev_net(dev) != &init_net) | 476 | skb = skb_share_check(skb, GFP_ATOMIC); |
433 | goto abort; | 477 | if (!skb) |
434 | |||
435 | if (!(skb = skb_share_check(skb, GFP_ATOMIC))) | ||
436 | goto out; | 478 | goto out; |
437 | 479 | ||
438 | if (!pskb_may_pull(skb, sizeof(struct pppoe_hdr))) | 480 | if (!pskb_may_pull(skb, sizeof(struct pppoe_hdr))) |
@@ -442,7 +484,8 @@ static int pppoe_disc_rcv(struct sk_buff *skb, | |||
442 | if (ph->code != PADT_CODE) | 484 | if (ph->code != PADT_CODE) |
443 | goto abort; | 485 | goto abort; |
444 | 486 | ||
445 | po = get_item(ph->sid, eth_hdr(skb)->h_source, dev->ifindex); | 487 | pn = pppoe_pernet(dev_net(dev)); |
488 | po = get_item(pn, ph->sid, eth_hdr(skb)->h_source, dev->ifindex); | ||
446 | if (po) { | 489 | if (po) { |
447 | struct sock *sk = sk_pppox(po); | 490 | struct sock *sk = sk_pppox(po); |
448 | 491 | ||
@@ -471,12 +514,12 @@ out: | |||
471 | } | 514 | } |
472 | 515 | ||
473 | static struct packet_type pppoes_ptype = { | 516 | static struct packet_type pppoes_ptype = { |
474 | .type = __constant_htons(ETH_P_PPP_SES), | 517 | .type = cpu_to_be16(ETH_P_PPP_SES), |
475 | .func = pppoe_rcv, | 518 | .func = pppoe_rcv, |
476 | }; | 519 | }; |
477 | 520 | ||
478 | static struct packet_type pppoed_ptype = { | 521 | static struct packet_type pppoed_ptype = { |
479 | .type = __constant_htons(ETH_P_PPP_DISC), | 522 | .type = cpu_to_be16(ETH_P_PPP_DISC), |
480 | .func = pppoe_disc_rcv, | 523 | .func = pppoe_disc_rcv, |
481 | }; | 524 | }; |
482 | 525 | ||
@@ -493,38 +536,37 @@ static struct proto pppoe_sk_proto = { | |||
493 | **********************************************************************/ | 536 | **********************************************************************/ |
494 | static int pppoe_create(struct net *net, struct socket *sock) | 537 | static int pppoe_create(struct net *net, struct socket *sock) |
495 | { | 538 | { |
496 | int error = -ENOMEM; | ||
497 | struct sock *sk; | 539 | struct sock *sk; |
498 | 540 | ||
499 | sk = sk_alloc(net, PF_PPPOX, GFP_KERNEL, &pppoe_sk_proto); | 541 | sk = sk_alloc(net, PF_PPPOX, GFP_KERNEL, &pppoe_sk_proto); |
500 | if (!sk) | 542 | if (!sk) |
501 | goto out; | 543 | return -ENOMEM; |
502 | 544 | ||
503 | sock_init_data(sock, sk); | 545 | sock_init_data(sock, sk); |
504 | 546 | ||
505 | sock->state = SS_UNCONNECTED; | 547 | sock->state = SS_UNCONNECTED; |
506 | sock->ops = &pppoe_ops; | 548 | sock->ops = &pppoe_ops; |
507 | 549 | ||
508 | sk->sk_backlog_rcv = pppoe_rcv_core; | 550 | sk->sk_backlog_rcv = pppoe_rcv_core; |
509 | sk->sk_state = PPPOX_NONE; | 551 | sk->sk_state = PPPOX_NONE; |
510 | sk->sk_type = SOCK_STREAM; | 552 | sk->sk_type = SOCK_STREAM; |
511 | sk->sk_family = PF_PPPOX; | 553 | sk->sk_family = PF_PPPOX; |
512 | sk->sk_protocol = PX_PROTO_OE; | 554 | sk->sk_protocol = PX_PROTO_OE; |
513 | 555 | ||
514 | error = 0; | 556 | return 0; |
515 | out: return error; | ||
516 | } | 557 | } |
517 | 558 | ||
518 | static int pppoe_release(struct socket *sock) | 559 | static int pppoe_release(struct socket *sock) |
519 | { | 560 | { |
520 | struct sock *sk = sock->sk; | 561 | struct sock *sk = sock->sk; |
521 | struct pppox_sock *po; | 562 | struct pppox_sock *po; |
563 | struct pppoe_net *pn; | ||
522 | 564 | ||
523 | if (!sk) | 565 | if (!sk) |
524 | return 0; | 566 | return 0; |
525 | 567 | ||
526 | lock_sock(sk); | 568 | lock_sock(sk); |
527 | if (sock_flag(sk, SOCK_DEAD)){ | 569 | if (sock_flag(sk, SOCK_DEAD)) { |
528 | release_sock(sk); | 570 | release_sock(sk); |
529 | return -EBADF; | 571 | return -EBADF; |
530 | } | 572 | } |
@@ -534,26 +576,39 @@ static int pppoe_release(struct socket *sock) | |||
534 | /* Signal the death of the socket. */ | 576 | /* Signal the death of the socket. */ |
535 | sk->sk_state = PPPOX_DEAD; | 577 | sk->sk_state = PPPOX_DEAD; |
536 | 578 | ||
579 | /* | ||
580 | * pppoe_flush_dev could lead to a race with | ||
581 | * this routine so we use flush_lock to eliminate | ||
582 | * such a case (we only need per-net specific data) | ||
583 | */ | ||
584 | spin_lock(&flush_lock); | ||
585 | po = pppox_sk(sk); | ||
586 | if (!po->pppoe_dev) { | ||
587 | spin_unlock(&flush_lock); | ||
588 | goto out; | ||
589 | } | ||
590 | pn = pppoe_pernet(dev_net(po->pppoe_dev)); | ||
591 | spin_unlock(&flush_lock); | ||
537 | 592 | ||
538 | /* Write lock on hash lock protects the entire "po" struct from | 593 | /* |
539 | * concurrent updates via pppoe_flush_dev. The "po" struct should | 594 | * protect "po" from concurrent updates |
540 | * be considered part of the hash table contents, thus protected | 595 | * on pppoe_flush_dev |
541 | * by the hash table lock */ | 596 | */ |
542 | write_lock_bh(&pppoe_hash_lock); | 597 | write_lock_bh(&pn->hash_lock); |
543 | 598 | ||
544 | po = pppox_sk(sk); | 599 | po = pppox_sk(sk); |
545 | if (po->pppoe_pa.sid) { | 600 | if (stage_session(po->pppoe_pa.sid)) |
546 | __delete_item(po->pppoe_pa.sid, | 601 | __delete_item(pn, po->pppoe_pa.sid, po->pppoe_pa.remote, |
547 | po->pppoe_pa.remote, po->pppoe_ifindex); | 602 | po->pppoe_ifindex); |
548 | } | ||
549 | 603 | ||
550 | if (po->pppoe_dev) { | 604 | if (po->pppoe_dev) { |
551 | dev_put(po->pppoe_dev); | 605 | dev_put(po->pppoe_dev); |
552 | po->pppoe_dev = NULL; | 606 | po->pppoe_dev = NULL; |
553 | } | 607 | } |
554 | 608 | ||
555 | write_unlock_bh(&pppoe_hash_lock); | 609 | write_unlock_bh(&pn->hash_lock); |
556 | 610 | ||
611 | out: | ||
557 | sock_orphan(sk); | 612 | sock_orphan(sk); |
558 | sock->sk = NULL; | 613 | sock->sk = NULL; |
559 | 614 | ||
@@ -564,14 +619,14 @@ static int pppoe_release(struct socket *sock) | |||
564 | return 0; | 619 | return 0; |
565 | } | 620 | } |
566 | 621 | ||
567 | |||
568 | static int pppoe_connect(struct socket *sock, struct sockaddr *uservaddr, | 622 | static int pppoe_connect(struct socket *sock, struct sockaddr *uservaddr, |
569 | int sockaddr_len, int flags) | 623 | int sockaddr_len, int flags) |
570 | { | 624 | { |
571 | struct sock *sk = sock->sk; | 625 | struct sock *sk = sock->sk; |
572 | struct net_device *dev; | 626 | struct sockaddr_pppox *sp = (struct sockaddr_pppox *)uservaddr; |
573 | struct sockaddr_pppox *sp = (struct sockaddr_pppox *) uservaddr; | ||
574 | struct pppox_sock *po = pppox_sk(sk); | 627 | struct pppox_sock *po = pppox_sk(sk); |
628 | struct net_device *dev; | ||
629 | struct pppoe_net *pn; | ||
575 | int error; | 630 | int error; |
576 | 631 | ||
577 | lock_sock(sk); | 632 | lock_sock(sk); |
@@ -582,44 +637,45 @@ static int pppoe_connect(struct socket *sock, struct sockaddr *uservaddr, | |||
582 | 637 | ||
583 | /* Check for already bound sockets */ | 638 | /* Check for already bound sockets */ |
584 | error = -EBUSY; | 639 | error = -EBUSY; |
585 | if ((sk->sk_state & PPPOX_CONNECTED) && sp->sa_addr.pppoe.sid) | 640 | if ((sk->sk_state & PPPOX_CONNECTED) && |
641 | stage_session(sp->sa_addr.pppoe.sid)) | ||
586 | goto end; | 642 | goto end; |
587 | 643 | ||
588 | /* Check for already disconnected sockets, on attempts to disconnect */ | 644 | /* Check for already disconnected sockets, on attempts to disconnect */ |
589 | error = -EALREADY; | 645 | error = -EALREADY; |
590 | if ((sk->sk_state & PPPOX_DEAD) && !sp->sa_addr.pppoe.sid ) | 646 | if ((sk->sk_state & PPPOX_DEAD) && |
647 | !stage_session(sp->sa_addr.pppoe.sid)) | ||
591 | goto end; | 648 | goto end; |
592 | 649 | ||
593 | error = 0; | 650 | error = 0; |
594 | if (po->pppoe_pa.sid) { | ||
595 | pppox_unbind_sock(sk); | ||
596 | |||
597 | /* Delete the old binding */ | ||
598 | delete_item(po->pppoe_pa.sid,po->pppoe_pa.remote,po->pppoe_ifindex); | ||
599 | 651 | ||
600 | if(po->pppoe_dev) | 652 | /* Delete the old binding */ |
653 | if (stage_session(po->pppoe_pa.sid)) { | ||
654 | pppox_unbind_sock(sk); | ||
655 | if (po->pppoe_dev) { | ||
656 | pn = pppoe_pernet(dev_net(po->pppoe_dev)); | ||
657 | delete_item(pn, po->pppoe_pa.sid, | ||
658 | po->pppoe_pa.remote, po->pppoe_ifindex); | ||
601 | dev_put(po->pppoe_dev); | 659 | dev_put(po->pppoe_dev); |
602 | 660 | } | |
603 | memset(sk_pppox(po) + 1, 0, | 661 | memset(sk_pppox(po) + 1, 0, |
604 | sizeof(struct pppox_sock) - sizeof(struct sock)); | 662 | sizeof(struct pppox_sock) - sizeof(struct sock)); |
605 | |||
606 | sk->sk_state = PPPOX_NONE; | 663 | sk->sk_state = PPPOX_NONE; |
607 | } | 664 | } |
608 | 665 | ||
609 | /* Don't re-bind if sid==0 */ | 666 | /* Re-bind in session stage only */ |
610 | if (sp->sa_addr.pppoe.sid != 0) { | 667 | if (stage_session(sp->sa_addr.pppoe.sid)) { |
611 | dev = dev_get_by_name(&init_net, sp->sa_addr.pppoe.dev); | ||
612 | |||
613 | error = -ENODEV; | 668 | error = -ENODEV; |
669 | dev = dev_get_by_name(sock_net(sk), sp->sa_addr.pppoe.dev); | ||
614 | if (!dev) | 670 | if (!dev) |
615 | goto end; | 671 | goto end; |
616 | 672 | ||
617 | po->pppoe_dev = dev; | 673 | po->pppoe_dev = dev; |
618 | po->pppoe_ifindex = dev->ifindex; | 674 | po->pppoe_ifindex = dev->ifindex; |
619 | 675 | pn = pppoe_pernet(dev_net(dev)); | |
620 | write_lock_bh(&pppoe_hash_lock); | 676 | write_lock_bh(&pn->hash_lock); |
621 | if (!(dev->flags & IFF_UP)){ | 677 | if (!(dev->flags & IFF_UP)) { |
622 | write_unlock_bh(&pppoe_hash_lock); | 678 | write_unlock_bh(&pn->hash_lock); |
623 | goto err_put; | 679 | goto err_put; |
624 | } | 680 | } |
625 | 681 | ||
@@ -627,8 +683,8 @@ static int pppoe_connect(struct socket *sock, struct sockaddr *uservaddr, | |||
627 | &sp->sa_addr.pppoe, | 683 | &sp->sa_addr.pppoe, |
628 | sizeof(struct pppoe_addr)); | 684 | sizeof(struct pppoe_addr)); |
629 | 685 | ||
630 | error = __set_item(po); | 686 | error = __set_item(pn, po); |
631 | write_unlock_bh(&pppoe_hash_lock); | 687 | write_unlock_bh(&pn->hash_lock); |
632 | if (error < 0) | 688 | if (error < 0) |
633 | goto err_put; | 689 | goto err_put; |
634 | 690 | ||
@@ -639,7 +695,7 @@ static int pppoe_connect(struct socket *sock, struct sockaddr *uservaddr, | |||
639 | po->chan.private = sk; | 695 | po->chan.private = sk; |
640 | po->chan.ops = &pppoe_chan_ops; | 696 | po->chan.ops = &pppoe_chan_ops; |
641 | 697 | ||
642 | error = ppp_register_channel(&po->chan); | 698 | error = ppp_register_net_channel(dev_net(dev), &po->chan); |
643 | if (error) | 699 | if (error) |
644 | goto err_put; | 700 | goto err_put; |
645 | 701 | ||
@@ -648,7 +704,7 @@ static int pppoe_connect(struct socket *sock, struct sockaddr *uservaddr, | |||
648 | 704 | ||
649 | po->num = sp->sa_addr.pppoe.sid; | 705 | po->num = sp->sa_addr.pppoe.sid; |
650 | 706 | ||
651 | end: | 707 | end: |
652 | release_sock(sk); | 708 | release_sock(sk); |
653 | return error; | 709 | return error; |
654 | err_put: | 710 | err_put: |
@@ -659,7 +715,6 @@ err_put: | |||
659 | goto end; | 715 | goto end; |
660 | } | 716 | } |
661 | 717 | ||
662 | |||
663 | static int pppoe_getname(struct socket *sock, struct sockaddr *uaddr, | 718 | static int pppoe_getname(struct socket *sock, struct sockaddr *uaddr, |
664 | int *usockaddr_len, int peer) | 719 | int *usockaddr_len, int peer) |
665 | { | 720 | { |
@@ -678,7 +733,6 @@ static int pppoe_getname(struct socket *sock, struct sockaddr *uaddr, | |||
678 | return 0; | 733 | return 0; |
679 | } | 734 | } |
680 | 735 | ||
681 | |||
682 | static int pppoe_ioctl(struct socket *sock, unsigned int cmd, | 736 | static int pppoe_ioctl(struct socket *sock, unsigned int cmd, |
683 | unsigned long arg) | 737 | unsigned long arg) |
684 | { | 738 | { |
@@ -690,7 +744,6 @@ static int pppoe_ioctl(struct socket *sock, unsigned int cmd, | |||
690 | switch (cmd) { | 744 | switch (cmd) { |
691 | case PPPIOCGMRU: | 745 | case PPPIOCGMRU: |
692 | err = -ENXIO; | 746 | err = -ENXIO; |
693 | |||
694 | if (!(sk->sk_state & PPPOX_CONNECTED)) | 747 | if (!(sk->sk_state & PPPOX_CONNECTED)) |
695 | break; | 748 | break; |
696 | 749 | ||
@@ -698,7 +751,7 @@ static int pppoe_ioctl(struct socket *sock, unsigned int cmd, | |||
698 | if (put_user(po->pppoe_dev->mtu - | 751 | if (put_user(po->pppoe_dev->mtu - |
699 | sizeof(struct pppoe_hdr) - | 752 | sizeof(struct pppoe_hdr) - |
700 | PPP_HDRLEN, | 753 | PPP_HDRLEN, |
701 | (int __user *) arg)) | 754 | (int __user *)arg)) |
702 | break; | 755 | break; |
703 | err = 0; | 756 | err = 0; |
704 | break; | 757 | break; |
@@ -709,7 +762,7 @@ static int pppoe_ioctl(struct socket *sock, unsigned int cmd, | |||
709 | break; | 762 | break; |
710 | 763 | ||
711 | err = -EFAULT; | 764 | err = -EFAULT; |
712 | if (get_user(val,(int __user *) arg)) | 765 | if (get_user(val, (int __user *)arg)) |
713 | break; | 766 | break; |
714 | 767 | ||
715 | if (val < (po->pppoe_dev->mtu | 768 | if (val < (po->pppoe_dev->mtu |
@@ -722,7 +775,7 @@ static int pppoe_ioctl(struct socket *sock, unsigned int cmd, | |||
722 | 775 | ||
723 | case PPPIOCSFLAGS: | 776 | case PPPIOCSFLAGS: |
724 | err = -EFAULT; | 777 | err = -EFAULT; |
725 | if (get_user(val, (int __user *) arg)) | 778 | if (get_user(val, (int __user *)arg)) |
726 | break; | 779 | break; |
727 | err = 0; | 780 | err = 0; |
728 | break; | 781 | break; |
@@ -749,13 +802,12 @@ static int pppoe_ioctl(struct socket *sock, unsigned int cmd, | |||
749 | 802 | ||
750 | err = -EINVAL; | 803 | err = -EINVAL; |
751 | if (po->pppoe_relay.sa_family != AF_PPPOX || | 804 | if (po->pppoe_relay.sa_family != AF_PPPOX || |
752 | po->pppoe_relay.sa_protocol!= PX_PROTO_OE) | 805 | po->pppoe_relay.sa_protocol != PX_PROTO_OE) |
753 | break; | 806 | break; |
754 | 807 | ||
755 | /* Check that the socket referenced by the address | 808 | /* Check that the socket referenced by the address |
756 | actually exists. */ | 809 | actually exists. */ |
757 | relay_po = get_item_by_addr(&po->pppoe_relay); | 810 | relay_po = get_item_by_addr(sock_net(sk), &po->pppoe_relay); |
758 | |||
759 | if (!relay_po) | 811 | if (!relay_po) |
760 | break; | 812 | break; |
761 | 813 | ||
@@ -781,7 +833,6 @@ static int pppoe_ioctl(struct socket *sock, unsigned int cmd, | |||
781 | return err; | 833 | return err; |
782 | } | 834 | } |
783 | 835 | ||
784 | |||
785 | static int pppoe_sendmsg(struct kiocb *iocb, struct socket *sock, | 836 | static int pppoe_sendmsg(struct kiocb *iocb, struct socket *sock, |
786 | struct msghdr *m, size_t total_len) | 837 | struct msghdr *m, size_t total_len) |
787 | { | 838 | { |
@@ -808,7 +859,7 @@ static int pppoe_sendmsg(struct kiocb *iocb, struct socket *sock, | |||
808 | dev = po->pppoe_dev; | 859 | dev = po->pppoe_dev; |
809 | 860 | ||
810 | error = -EMSGSIZE; | 861 | error = -EMSGSIZE; |
811 | if (total_len > (dev->mtu + dev->hard_header_len)) | 862 | if (total_len > (dev->mtu + dev->hard_header_len)) |
812 | goto end; | 863 | goto end; |
813 | 864 | ||
814 | 865 | ||
@@ -826,13 +877,12 @@ static int pppoe_sendmsg(struct kiocb *iocb, struct socket *sock, | |||
826 | skb->dev = dev; | 877 | skb->dev = dev; |
827 | 878 | ||
828 | skb->priority = sk->sk_priority; | 879 | skb->priority = sk->sk_priority; |
829 | skb->protocol = __constant_htons(ETH_P_PPP_SES); | 880 | skb->protocol = cpu_to_be16(ETH_P_PPP_SES); |
830 | 881 | ||
831 | ph = (struct pppoe_hdr *) skb_put(skb, total_len + sizeof(struct pppoe_hdr)); | 882 | ph = (struct pppoe_hdr *)skb_put(skb, total_len + sizeof(struct pppoe_hdr)); |
832 | start = (char *) &ph->tag[0]; | 883 | start = (char *)&ph->tag[0]; |
833 | 884 | ||
834 | error = memcpy_fromiovec(start, m->msg_iov, total_len); | 885 | error = memcpy_fromiovec(start, m->msg_iov, total_len); |
835 | |||
836 | if (error < 0) { | 886 | if (error < 0) { |
837 | kfree_skb(skb); | 887 | kfree_skb(skb); |
838 | goto end; | 888 | goto end; |
@@ -853,7 +903,6 @@ end: | |||
853 | return error; | 903 | return error; |
854 | } | 904 | } |
855 | 905 | ||
856 | |||
857 | /************************************************************************ | 906 | /************************************************************************ |
858 | * | 907 | * |
859 | * xmit function for internal use. | 908 | * xmit function for internal use. |
@@ -888,7 +937,7 @@ static int __pppoe_xmit(struct sock *sk, struct sk_buff *skb) | |||
888 | ph->sid = po->num; | 937 | ph->sid = po->num; |
889 | ph->length = htons(data_len); | 938 | ph->length = htons(data_len); |
890 | 939 | ||
891 | skb->protocol = __constant_htons(ETH_P_PPP_SES); | 940 | skb->protocol = cpu_to_be16(ETH_P_PPP_SES); |
892 | skb->dev = dev; | 941 | skb->dev = dev; |
893 | 942 | ||
894 | dev_hard_header(skb, dev, ETH_P_PPP_SES, | 943 | dev_hard_header(skb, dev, ETH_P_PPP_SES, |
@@ -903,7 +952,6 @@ abort: | |||
903 | return 1; | 952 | return 1; |
904 | } | 953 | } |
905 | 954 | ||
906 | |||
907 | /************************************************************************ | 955 | /************************************************************************ |
908 | * | 956 | * |
909 | * xmit function called by generic PPP driver | 957 | * xmit function called by generic PPP driver |
@@ -912,11 +960,10 @@ abort: | |||
912 | ***********************************************************************/ | 960 | ***********************************************************************/ |
913 | static int pppoe_xmit(struct ppp_channel *chan, struct sk_buff *skb) | 961 | static int pppoe_xmit(struct ppp_channel *chan, struct sk_buff *skb) |
914 | { | 962 | { |
915 | struct sock *sk = (struct sock *) chan->private; | 963 | struct sock *sk = (struct sock *)chan->private; |
916 | return __pppoe_xmit(sk, skb); | 964 | return __pppoe_xmit(sk, skb); |
917 | } | 965 | } |
918 | 966 | ||
919 | |||
920 | static struct ppp_channel_ops pppoe_chan_ops = { | 967 | static struct ppp_channel_ops pppoe_chan_ops = { |
921 | .start_xmit = pppoe_xmit, | 968 | .start_xmit = pppoe_xmit, |
922 | }; | 969 | }; |
@@ -935,7 +982,6 @@ static int pppoe_recvmsg(struct kiocb *iocb, struct socket *sock, | |||
935 | 982 | ||
936 | skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT, | 983 | skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT, |
937 | flags & MSG_DONTWAIT, &error); | 984 | flags & MSG_DONTWAIT, &error); |
938 | |||
939 | if (error < 0) | 985 | if (error < 0) |
940 | goto end; | 986 | goto end; |
941 | 987 | ||
@@ -968,44 +1014,47 @@ static int pppoe_seq_show(struct seq_file *seq, void *v) | |||
968 | dev_name = po->pppoe_pa.dev; | 1014 | dev_name = po->pppoe_pa.dev; |
969 | 1015 | ||
970 | seq_printf(seq, "%08X %pM %8s\n", | 1016 | seq_printf(seq, "%08X %pM %8s\n", |
971 | po->pppoe_pa.sid, po->pppoe_pa.remote, dev_name); | 1017 | po->pppoe_pa.sid, po->pppoe_pa.remote, dev_name); |
972 | out: | 1018 | out: |
973 | return 0; | 1019 | return 0; |
974 | } | 1020 | } |
975 | 1021 | ||
976 | static __inline__ struct pppox_sock *pppoe_get_idx(loff_t pos) | 1022 | static inline struct pppox_sock *pppoe_get_idx(struct pppoe_net *pn, loff_t pos) |
977 | { | 1023 | { |
978 | struct pppox_sock *po; | 1024 | struct pppox_sock *po; |
979 | int i = 0; | 1025 | int i; |
980 | 1026 | ||
981 | for (; i < PPPOE_HASH_SIZE; i++) { | 1027 | for (i = 0; i < PPPOE_HASH_SIZE; i++) { |
982 | po = item_hash_table[i]; | 1028 | po = pn->hash_table[i]; |
983 | while (po) { | 1029 | while (po) { |
984 | if (!pos--) | 1030 | if (!pos--) |
985 | goto out; | 1031 | goto out; |
986 | po = po->next; | 1032 | po = po->next; |
987 | } | 1033 | } |
988 | } | 1034 | } |
1035 | |||
989 | out: | 1036 | out: |
990 | return po; | 1037 | return po; |
991 | } | 1038 | } |
992 | 1039 | ||
993 | static void *pppoe_seq_start(struct seq_file *seq, loff_t *pos) | 1040 | static void *pppoe_seq_start(struct seq_file *seq, loff_t *pos) |
994 | __acquires(pppoe_hash_lock) | 1041 | __acquires(pn->hash_lock) |
995 | { | 1042 | { |
1043 | struct pppoe_net *pn = pppoe_pernet(seq_file_net(seq)); | ||
996 | loff_t l = *pos; | 1044 | loff_t l = *pos; |
997 | 1045 | ||
998 | read_lock_bh(&pppoe_hash_lock); | 1046 | read_lock_bh(&pn->hash_lock); |
999 | return l ? pppoe_get_idx(--l) : SEQ_START_TOKEN; | 1047 | return l ? pppoe_get_idx(pn, --l) : SEQ_START_TOKEN; |
1000 | } | 1048 | } |
1001 | 1049 | ||
1002 | static void *pppoe_seq_next(struct seq_file *seq, void *v, loff_t *pos) | 1050 | static void *pppoe_seq_next(struct seq_file *seq, void *v, loff_t *pos) |
1003 | { | 1051 | { |
1052 | struct pppoe_net *pn = pppoe_pernet(seq_file_net(seq)); | ||
1004 | struct pppox_sock *po; | 1053 | struct pppox_sock *po; |
1005 | 1054 | ||
1006 | ++*pos; | 1055 | ++*pos; |
1007 | if (v == SEQ_START_TOKEN) { | 1056 | if (v == SEQ_START_TOKEN) { |
1008 | po = pppoe_get_idx(0); | 1057 | po = pppoe_get_idx(pn, 0); |
1009 | goto out; | 1058 | goto out; |
1010 | } | 1059 | } |
1011 | po = v; | 1060 | po = v; |
@@ -1015,22 +1064,24 @@ static void *pppoe_seq_next(struct seq_file *seq, void *v, loff_t *pos) | |||
1015 | int hash = hash_item(po->pppoe_pa.sid, po->pppoe_pa.remote); | 1064 | int hash = hash_item(po->pppoe_pa.sid, po->pppoe_pa.remote); |
1016 | 1065 | ||
1017 | while (++hash < PPPOE_HASH_SIZE) { | 1066 | while (++hash < PPPOE_HASH_SIZE) { |
1018 | po = item_hash_table[hash]; | 1067 | po = pn->hash_table[hash]; |
1019 | if (po) | 1068 | if (po) |
1020 | break; | 1069 | break; |
1021 | } | 1070 | } |
1022 | } | 1071 | } |
1072 | |||
1023 | out: | 1073 | out: |
1024 | return po; | 1074 | return po; |
1025 | } | 1075 | } |
1026 | 1076 | ||
1027 | static void pppoe_seq_stop(struct seq_file *seq, void *v) | 1077 | static void pppoe_seq_stop(struct seq_file *seq, void *v) |
1028 | __releases(pppoe_hash_lock) | 1078 | __releases(pn->hash_lock) |
1029 | { | 1079 | { |
1030 | read_unlock_bh(&pppoe_hash_lock); | 1080 | struct pppoe_net *pn = pppoe_pernet(seq_file_net(seq)); |
1081 | read_unlock_bh(&pn->hash_lock); | ||
1031 | } | 1082 | } |
1032 | 1083 | ||
1033 | static struct seq_operations pppoe_seq_ops = { | 1084 | static const struct seq_operations pppoe_seq_ops = { |
1034 | .start = pppoe_seq_start, | 1085 | .start = pppoe_seq_start, |
1035 | .next = pppoe_seq_next, | 1086 | .next = pppoe_seq_next, |
1036 | .stop = pppoe_seq_stop, | 1087 | .stop = pppoe_seq_stop, |
@@ -1039,7 +1090,8 @@ static struct seq_operations pppoe_seq_ops = { | |||
1039 | 1090 | ||
1040 | static int pppoe_seq_open(struct inode *inode, struct file *file) | 1091 | static int pppoe_seq_open(struct inode *inode, struct file *file) |
1041 | { | 1092 | { |
1042 | return seq_open(file, &pppoe_seq_ops); | 1093 | return seq_open_net(inode, file, &pppoe_seq_ops, |
1094 | sizeof(struct seq_net_private)); | ||
1043 | } | 1095 | } |
1044 | 1096 | ||
1045 | static const struct file_operations pppoe_seq_fops = { | 1097 | static const struct file_operations pppoe_seq_fops = { |
@@ -1047,74 +1099,115 @@ static const struct file_operations pppoe_seq_fops = { | |||
1047 | .open = pppoe_seq_open, | 1099 | .open = pppoe_seq_open, |
1048 | .read = seq_read, | 1100 | .read = seq_read, |
1049 | .llseek = seq_lseek, | 1101 | .llseek = seq_lseek, |
1050 | .release = seq_release, | 1102 | .release = seq_release_net, |
1051 | }; | 1103 | }; |
1052 | 1104 | ||
1053 | static int __init pppoe_proc_init(void) | ||
1054 | { | ||
1055 | struct proc_dir_entry *p; | ||
1056 | |||
1057 | p = proc_net_fops_create(&init_net, "pppoe", S_IRUGO, &pppoe_seq_fops); | ||
1058 | if (!p) | ||
1059 | return -ENOMEM; | ||
1060 | return 0; | ||
1061 | } | ||
1062 | #else /* CONFIG_PROC_FS */ | ||
1063 | static inline int pppoe_proc_init(void) { return 0; } | ||
1064 | #endif /* CONFIG_PROC_FS */ | 1105 | #endif /* CONFIG_PROC_FS */ |
1065 | 1106 | ||
1066 | static const struct proto_ops pppoe_ops = { | 1107 | static const struct proto_ops pppoe_ops = { |
1067 | .family = AF_PPPOX, | 1108 | .family = AF_PPPOX, |
1068 | .owner = THIS_MODULE, | 1109 | .owner = THIS_MODULE, |
1069 | .release = pppoe_release, | 1110 | .release = pppoe_release, |
1070 | .bind = sock_no_bind, | 1111 | .bind = sock_no_bind, |
1071 | .connect = pppoe_connect, | 1112 | .connect = pppoe_connect, |
1072 | .socketpair = sock_no_socketpair, | 1113 | .socketpair = sock_no_socketpair, |
1073 | .accept = sock_no_accept, | 1114 | .accept = sock_no_accept, |
1074 | .getname = pppoe_getname, | 1115 | .getname = pppoe_getname, |
1075 | .poll = datagram_poll, | 1116 | .poll = datagram_poll, |
1076 | .listen = sock_no_listen, | 1117 | .listen = sock_no_listen, |
1077 | .shutdown = sock_no_shutdown, | 1118 | .shutdown = sock_no_shutdown, |
1078 | .setsockopt = sock_no_setsockopt, | 1119 | .setsockopt = sock_no_setsockopt, |
1079 | .getsockopt = sock_no_getsockopt, | 1120 | .getsockopt = sock_no_getsockopt, |
1080 | .sendmsg = pppoe_sendmsg, | 1121 | .sendmsg = pppoe_sendmsg, |
1081 | .recvmsg = pppoe_recvmsg, | 1122 | .recvmsg = pppoe_recvmsg, |
1082 | .mmap = sock_no_mmap, | 1123 | .mmap = sock_no_mmap, |
1083 | .ioctl = pppox_ioctl, | 1124 | .ioctl = pppox_ioctl, |
1084 | }; | 1125 | }; |
1085 | 1126 | ||
1086 | static struct pppox_proto pppoe_proto = { | 1127 | static struct pppox_proto pppoe_proto = { |
1087 | .create = pppoe_create, | 1128 | .create = pppoe_create, |
1088 | .ioctl = pppoe_ioctl, | 1129 | .ioctl = pppoe_ioctl, |
1089 | .owner = THIS_MODULE, | 1130 | .owner = THIS_MODULE, |
1090 | }; | 1131 | }; |
1091 | 1132 | ||
1133 | static __net_init int pppoe_init_net(struct net *net) | ||
1134 | { | ||
1135 | struct pppoe_net *pn; | ||
1136 | struct proc_dir_entry *pde; | ||
1137 | int err; | ||
1138 | |||
1139 | pn = kzalloc(sizeof(*pn), GFP_KERNEL); | ||
1140 | if (!pn) | ||
1141 | return -ENOMEM; | ||
1142 | |||
1143 | rwlock_init(&pn->hash_lock); | ||
1144 | |||
1145 | err = net_assign_generic(net, pppoe_net_id, pn); | ||
1146 | if (err) | ||
1147 | goto out; | ||
1148 | |||
1149 | pde = proc_net_fops_create(net, "pppoe", S_IRUGO, &pppoe_seq_fops); | ||
1150 | #ifdef CONFIG_PROC_FS | ||
1151 | if (!pde) { | ||
1152 | err = -ENOMEM; | ||
1153 | goto out; | ||
1154 | } | ||
1155 | #endif | ||
1156 | |||
1157 | return 0; | ||
1158 | |||
1159 | out: | ||
1160 | kfree(pn); | ||
1161 | return err; | ||
1162 | } | ||
1163 | |||
1164 | static __net_exit void pppoe_exit_net(struct net *net) | ||
1165 | { | ||
1166 | struct pppoe_net *pn; | ||
1167 | |||
1168 | proc_net_remove(net, "pppoe"); | ||
1169 | pn = net_generic(net, pppoe_net_id); | ||
1170 | /* | ||
1171 | * if someone has cached our net then | ||
1172 | * further net_generic call will return NULL | ||
1173 | */ | ||
1174 | net_assign_generic(net, pppoe_net_id, NULL); | ||
1175 | kfree(pn); | ||
1176 | } | ||
1177 | |||
1178 | static __net_initdata struct pernet_operations pppoe_net_ops = { | ||
1179 | .init = pppoe_init_net, | ||
1180 | .exit = pppoe_exit_net, | ||
1181 | }; | ||
1092 | 1182 | ||
1093 | static int __init pppoe_init(void) | 1183 | static int __init pppoe_init(void) |
1094 | { | 1184 | { |
1095 | int err = proto_register(&pppoe_sk_proto, 0); | 1185 | int err; |
1096 | 1186 | ||
1187 | err = proto_register(&pppoe_sk_proto, 0); | ||
1097 | if (err) | 1188 | if (err) |
1098 | goto out; | 1189 | goto out; |
1099 | 1190 | ||
1100 | err = register_pppox_proto(PX_PROTO_OE, &pppoe_proto); | 1191 | err = register_pppox_proto(PX_PROTO_OE, &pppoe_proto); |
1101 | if (err) | 1192 | if (err) |
1102 | goto out_unregister_pppoe_proto; | 1193 | goto out_unregister_pppoe_proto; |
1103 | 1194 | ||
1104 | err = pppoe_proc_init(); | 1195 | err = register_pernet_gen_device(&pppoe_net_id, &pppoe_net_ops); |
1105 | if (err) | 1196 | if (err) |
1106 | goto out_unregister_pppox_proto; | 1197 | goto out_unregister_pppox_proto; |
1107 | 1198 | ||
1108 | dev_add_pack(&pppoes_ptype); | 1199 | dev_add_pack(&pppoes_ptype); |
1109 | dev_add_pack(&pppoed_ptype); | 1200 | dev_add_pack(&pppoed_ptype); |
1110 | register_netdevice_notifier(&pppoe_notifier); | 1201 | register_netdevice_notifier(&pppoe_notifier); |
1111 | out: | 1202 | |
1112 | return err; | 1203 | return 0; |
1204 | |||
1113 | out_unregister_pppox_proto: | 1205 | out_unregister_pppox_proto: |
1114 | unregister_pppox_proto(PX_PROTO_OE); | 1206 | unregister_pppox_proto(PX_PROTO_OE); |
1115 | out_unregister_pppoe_proto: | 1207 | out_unregister_pppoe_proto: |
1116 | proto_unregister(&pppoe_sk_proto); | 1208 | proto_unregister(&pppoe_sk_proto); |
1117 | goto out; | 1209 | out: |
1210 | return err; | ||
1118 | } | 1211 | } |
1119 | 1212 | ||
1120 | static void __exit pppoe_exit(void) | 1213 | static void __exit pppoe_exit(void) |
@@ -1123,7 +1216,7 @@ static void __exit pppoe_exit(void) | |||
1123 | dev_remove_pack(&pppoes_ptype); | 1216 | dev_remove_pack(&pppoes_ptype); |
1124 | dev_remove_pack(&pppoed_ptype); | 1217 | dev_remove_pack(&pppoed_ptype); |
1125 | unregister_netdevice_notifier(&pppoe_notifier); | 1218 | unregister_netdevice_notifier(&pppoe_notifier); |
1126 | remove_proc_entry("pppoe", init_net.proc_net); | 1219 | unregister_pernet_gen_device(pppoe_net_id, &pppoe_net_ops); |
1127 | proto_unregister(&pppoe_sk_proto); | 1220 | proto_unregister(&pppoe_sk_proto); |
1128 | } | 1221 | } |
1129 | 1222 | ||
diff --git a/drivers/net/pppol2tp.c b/drivers/net/pppol2tp.c index f1a946785c6a..15f4a43a6890 100644 --- a/drivers/net/pppol2tp.c +++ b/drivers/net/pppol2tp.c | |||
@@ -90,7 +90,9 @@ | |||
90 | #include <linux/hash.h> | 90 | #include <linux/hash.h> |
91 | #include <linux/sort.h> | 91 | #include <linux/sort.h> |
92 | #include <linux/proc_fs.h> | 92 | #include <linux/proc_fs.h> |
93 | #include <linux/nsproxy.h> | ||
93 | #include <net/net_namespace.h> | 94 | #include <net/net_namespace.h> |
95 | #include <net/netns/generic.h> | ||
94 | #include <net/dst.h> | 96 | #include <net/dst.h> |
95 | #include <net/ip.h> | 97 | #include <net/ip.h> |
96 | #include <net/udp.h> | 98 | #include <net/udp.h> |
@@ -204,6 +206,7 @@ struct pppol2tp_tunnel | |||
204 | struct sock *sock; /* Parent socket */ | 206 | struct sock *sock; /* Parent socket */ |
205 | struct list_head list; /* Keep a list of all open | 207 | struct list_head list; /* Keep a list of all open |
206 | * prepared sockets */ | 208 | * prepared sockets */ |
209 | struct net *pppol2tp_net; /* the net we belong to */ | ||
207 | 210 | ||
208 | atomic_t ref_count; | 211 | atomic_t ref_count; |
209 | }; | 212 | }; |
@@ -227,8 +230,20 @@ static atomic_t pppol2tp_tunnel_count; | |||
227 | static atomic_t pppol2tp_session_count; | 230 | static atomic_t pppol2tp_session_count; |
228 | static struct ppp_channel_ops pppol2tp_chan_ops = { pppol2tp_xmit , NULL }; | 231 | static struct ppp_channel_ops pppol2tp_chan_ops = { pppol2tp_xmit , NULL }; |
229 | static struct proto_ops pppol2tp_ops; | 232 | static struct proto_ops pppol2tp_ops; |
230 | static LIST_HEAD(pppol2tp_tunnel_list); | 233 | |
231 | static DEFINE_RWLOCK(pppol2tp_tunnel_list_lock); | 234 | /* per-net private data for this module */ |
235 | static unsigned int pppol2tp_net_id; | ||
236 | struct pppol2tp_net { | ||
237 | struct list_head pppol2tp_tunnel_list; | ||
238 | rwlock_t pppol2tp_tunnel_list_lock; | ||
239 | }; | ||
240 | |||
241 | static inline struct pppol2tp_net *pppol2tp_pernet(struct net *net) | ||
242 | { | ||
243 | BUG_ON(!net); | ||
244 | |||
245 | return net_generic(net, pppol2tp_net_id); | ||
246 | } | ||
232 | 247 | ||
233 | /* Helpers to obtain tunnel/session contexts from sockets. | 248 | /* Helpers to obtain tunnel/session contexts from sockets. |
234 | */ | 249 | */ |
@@ -321,18 +336,19 @@ pppol2tp_session_find(struct pppol2tp_tunnel *tunnel, u16 session_id) | |||
321 | 336 | ||
322 | /* Lookup a tunnel by id | 337 | /* Lookup a tunnel by id |
323 | */ | 338 | */ |
324 | static struct pppol2tp_tunnel *pppol2tp_tunnel_find(u16 tunnel_id) | 339 | static struct pppol2tp_tunnel *pppol2tp_tunnel_find(struct net *net, u16 tunnel_id) |
325 | { | 340 | { |
326 | struct pppol2tp_tunnel *tunnel = NULL; | 341 | struct pppol2tp_tunnel *tunnel; |
342 | struct pppol2tp_net *pn = pppol2tp_pernet(net); | ||
327 | 343 | ||
328 | read_lock_bh(&pppol2tp_tunnel_list_lock); | 344 | read_lock_bh(&pn->pppol2tp_tunnel_list_lock); |
329 | list_for_each_entry(tunnel, &pppol2tp_tunnel_list, list) { | 345 | list_for_each_entry(tunnel, &pn->pppol2tp_tunnel_list, list) { |
330 | if (tunnel->stats.tunnel_id == tunnel_id) { | 346 | if (tunnel->stats.tunnel_id == tunnel_id) { |
331 | read_unlock_bh(&pppol2tp_tunnel_list_lock); | 347 | read_unlock_bh(&pn->pppol2tp_tunnel_list_lock); |
332 | return tunnel; | 348 | return tunnel; |
333 | } | 349 | } |
334 | } | 350 | } |
335 | read_unlock_bh(&pppol2tp_tunnel_list_lock); | 351 | read_unlock_bh(&pn->pppol2tp_tunnel_list_lock); |
336 | 352 | ||
337 | return NULL; | 353 | return NULL; |
338 | } | 354 | } |
@@ -1287,10 +1303,12 @@ again: | |||
1287 | */ | 1303 | */ |
1288 | static void pppol2tp_tunnel_free(struct pppol2tp_tunnel *tunnel) | 1304 | static void pppol2tp_tunnel_free(struct pppol2tp_tunnel *tunnel) |
1289 | { | 1305 | { |
1306 | struct pppol2tp_net *pn = pppol2tp_pernet(tunnel->pppol2tp_net); | ||
1307 | |||
1290 | /* Remove from socket list */ | 1308 | /* Remove from socket list */ |
1291 | write_lock_bh(&pppol2tp_tunnel_list_lock); | 1309 | write_lock_bh(&pn->pppol2tp_tunnel_list_lock); |
1292 | list_del_init(&tunnel->list); | 1310 | list_del_init(&tunnel->list); |
1293 | write_unlock_bh(&pppol2tp_tunnel_list_lock); | 1311 | write_unlock_bh(&pn->pppol2tp_tunnel_list_lock); |
1294 | 1312 | ||
1295 | atomic_dec(&pppol2tp_tunnel_count); | 1313 | atomic_dec(&pppol2tp_tunnel_count); |
1296 | kfree(tunnel); | 1314 | kfree(tunnel); |
@@ -1444,13 +1462,14 @@ error: | |||
1444 | /* Internal function to prepare a tunnel (UDP) socket to have PPPoX | 1462 | /* Internal function to prepare a tunnel (UDP) socket to have PPPoX |
1445 | * sockets attached to it. | 1463 | * sockets attached to it. |
1446 | */ | 1464 | */ |
1447 | static struct sock *pppol2tp_prepare_tunnel_socket(int fd, u16 tunnel_id, | 1465 | static struct sock *pppol2tp_prepare_tunnel_socket(struct net *net, |
1448 | int *error) | 1466 | int fd, u16 tunnel_id, int *error) |
1449 | { | 1467 | { |
1450 | int err; | 1468 | int err; |
1451 | struct socket *sock = NULL; | 1469 | struct socket *sock = NULL; |
1452 | struct sock *sk; | 1470 | struct sock *sk; |
1453 | struct pppol2tp_tunnel *tunnel; | 1471 | struct pppol2tp_tunnel *tunnel; |
1472 | struct pppol2tp_net *pn; | ||
1454 | struct sock *ret = NULL; | 1473 | struct sock *ret = NULL; |
1455 | 1474 | ||
1456 | /* Get the tunnel UDP socket from the fd, which was opened by | 1475 | /* Get the tunnel UDP socket from the fd, which was opened by |
@@ -1524,11 +1543,15 @@ static struct sock *pppol2tp_prepare_tunnel_socket(int fd, u16 tunnel_id, | |||
1524 | /* Misc init */ | 1543 | /* Misc init */ |
1525 | rwlock_init(&tunnel->hlist_lock); | 1544 | rwlock_init(&tunnel->hlist_lock); |
1526 | 1545 | ||
1546 | /* The net we belong to */ | ||
1547 | tunnel->pppol2tp_net = net; | ||
1548 | pn = pppol2tp_pernet(net); | ||
1549 | |||
1527 | /* Add tunnel to our list */ | 1550 | /* Add tunnel to our list */ |
1528 | INIT_LIST_HEAD(&tunnel->list); | 1551 | INIT_LIST_HEAD(&tunnel->list); |
1529 | write_lock_bh(&pppol2tp_tunnel_list_lock); | 1552 | write_lock_bh(&pn->pppol2tp_tunnel_list_lock); |
1530 | list_add(&tunnel->list, &pppol2tp_tunnel_list); | 1553 | list_add(&tunnel->list, &pn->pppol2tp_tunnel_list); |
1531 | write_unlock_bh(&pppol2tp_tunnel_list_lock); | 1554 | write_unlock_bh(&pn->pppol2tp_tunnel_list_lock); |
1532 | atomic_inc(&pppol2tp_tunnel_count); | 1555 | atomic_inc(&pppol2tp_tunnel_count); |
1533 | 1556 | ||
1534 | /* Bump the reference count. The tunnel context is deleted | 1557 | /* Bump the reference count. The tunnel context is deleted |
@@ -1629,7 +1652,8 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr, | |||
1629 | * tunnel id. | 1652 | * tunnel id. |
1630 | */ | 1653 | */ |
1631 | if ((sp->pppol2tp.s_session == 0) && (sp->pppol2tp.d_session == 0)) { | 1654 | if ((sp->pppol2tp.s_session == 0) && (sp->pppol2tp.d_session == 0)) { |
1632 | tunnel_sock = pppol2tp_prepare_tunnel_socket(sp->pppol2tp.fd, | 1655 | tunnel_sock = pppol2tp_prepare_tunnel_socket(sock_net(sk), |
1656 | sp->pppol2tp.fd, | ||
1633 | sp->pppol2tp.s_tunnel, | 1657 | sp->pppol2tp.s_tunnel, |
1634 | &error); | 1658 | &error); |
1635 | if (tunnel_sock == NULL) | 1659 | if (tunnel_sock == NULL) |
@@ -1637,7 +1661,7 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr, | |||
1637 | 1661 | ||
1638 | tunnel = tunnel_sock->sk_user_data; | 1662 | tunnel = tunnel_sock->sk_user_data; |
1639 | } else { | 1663 | } else { |
1640 | tunnel = pppol2tp_tunnel_find(sp->pppol2tp.s_tunnel); | 1664 | tunnel = pppol2tp_tunnel_find(sock_net(sk), sp->pppol2tp.s_tunnel); |
1641 | 1665 | ||
1642 | /* Error if we can't find the tunnel */ | 1666 | /* Error if we can't find the tunnel */ |
1643 | error = -ENOENT; | 1667 | error = -ENOENT; |
@@ -1725,7 +1749,7 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr, | |||
1725 | po->chan.ops = &pppol2tp_chan_ops; | 1749 | po->chan.ops = &pppol2tp_chan_ops; |
1726 | po->chan.mtu = session->mtu; | 1750 | po->chan.mtu = session->mtu; |
1727 | 1751 | ||
1728 | error = ppp_register_channel(&po->chan); | 1752 | error = ppp_register_net_channel(sock_net(sk), &po->chan); |
1729 | if (error) | 1753 | if (error) |
1730 | goto end_put_tun; | 1754 | goto end_put_tun; |
1731 | 1755 | ||
@@ -2347,8 +2371,9 @@ end: | |||
2347 | #include <linux/seq_file.h> | 2371 | #include <linux/seq_file.h> |
2348 | 2372 | ||
2349 | struct pppol2tp_seq_data { | 2373 | struct pppol2tp_seq_data { |
2350 | struct pppol2tp_tunnel *tunnel; /* current tunnel */ | 2374 | struct seq_net_private p; |
2351 | struct pppol2tp_session *session; /* NULL means get first session in tunnel */ | 2375 | struct pppol2tp_tunnel *tunnel; /* current tunnel */ |
2376 | struct pppol2tp_session *session; /* NULL means get first session in tunnel */ | ||
2352 | }; | 2377 | }; |
2353 | 2378 | ||
2354 | static struct pppol2tp_session *next_session(struct pppol2tp_tunnel *tunnel, struct pppol2tp_session *curr) | 2379 | static struct pppol2tp_session *next_session(struct pppol2tp_tunnel *tunnel, struct pppol2tp_session *curr) |
@@ -2384,17 +2409,18 @@ out: | |||
2384 | return session; | 2409 | return session; |
2385 | } | 2410 | } |
2386 | 2411 | ||
2387 | static struct pppol2tp_tunnel *next_tunnel(struct pppol2tp_tunnel *curr) | 2412 | static struct pppol2tp_tunnel *next_tunnel(struct pppol2tp_net *pn, |
2413 | struct pppol2tp_tunnel *curr) | ||
2388 | { | 2414 | { |
2389 | struct pppol2tp_tunnel *tunnel = NULL; | 2415 | struct pppol2tp_tunnel *tunnel = NULL; |
2390 | 2416 | ||
2391 | read_lock_bh(&pppol2tp_tunnel_list_lock); | 2417 | read_lock_bh(&pn->pppol2tp_tunnel_list_lock); |
2392 | if (list_is_last(&curr->list, &pppol2tp_tunnel_list)) { | 2418 | if (list_is_last(&curr->list, &pn->pppol2tp_tunnel_list)) { |
2393 | goto out; | 2419 | goto out; |
2394 | } | 2420 | } |
2395 | tunnel = list_entry(curr->list.next, struct pppol2tp_tunnel, list); | 2421 | tunnel = list_entry(curr->list.next, struct pppol2tp_tunnel, list); |
2396 | out: | 2422 | out: |
2397 | read_unlock_bh(&pppol2tp_tunnel_list_lock); | 2423 | read_unlock_bh(&pn->pppol2tp_tunnel_list_lock); |
2398 | 2424 | ||
2399 | return tunnel; | 2425 | return tunnel; |
2400 | } | 2426 | } |
@@ -2402,6 +2428,7 @@ out: | |||
2402 | static void *pppol2tp_seq_start(struct seq_file *m, loff_t *offs) | 2428 | static void *pppol2tp_seq_start(struct seq_file *m, loff_t *offs) |
2403 | { | 2429 | { |
2404 | struct pppol2tp_seq_data *pd = SEQ_START_TOKEN; | 2430 | struct pppol2tp_seq_data *pd = SEQ_START_TOKEN; |
2431 | struct pppol2tp_net *pn; | ||
2405 | loff_t pos = *offs; | 2432 | loff_t pos = *offs; |
2406 | 2433 | ||
2407 | if (!pos) | 2434 | if (!pos) |
@@ -2409,14 +2436,15 @@ static void *pppol2tp_seq_start(struct seq_file *m, loff_t *offs) | |||
2409 | 2436 | ||
2410 | BUG_ON(m->private == NULL); | 2437 | BUG_ON(m->private == NULL); |
2411 | pd = m->private; | 2438 | pd = m->private; |
2439 | pn = pppol2tp_pernet(seq_file_net(m)); | ||
2412 | 2440 | ||
2413 | if (pd->tunnel == NULL) { | 2441 | if (pd->tunnel == NULL) { |
2414 | if (!list_empty(&pppol2tp_tunnel_list)) | 2442 | if (!list_empty(&pn->pppol2tp_tunnel_list)) |
2415 | pd->tunnel = list_entry(pppol2tp_tunnel_list.next, struct pppol2tp_tunnel, list); | 2443 | pd->tunnel = list_entry(pn->pppol2tp_tunnel_list.next, struct pppol2tp_tunnel, list); |
2416 | } else { | 2444 | } else { |
2417 | pd->session = next_session(pd->tunnel, pd->session); | 2445 | pd->session = next_session(pd->tunnel, pd->session); |
2418 | if (pd->session == NULL) { | 2446 | if (pd->session == NULL) { |
2419 | pd->tunnel = next_tunnel(pd->tunnel); | 2447 | pd->tunnel = next_tunnel(pn, pd->tunnel); |
2420 | } | 2448 | } |
2421 | } | 2449 | } |
2422 | 2450 | ||
@@ -2517,7 +2545,7 @@ out: | |||
2517 | return 0; | 2545 | return 0; |
2518 | } | 2546 | } |
2519 | 2547 | ||
2520 | static struct seq_operations pppol2tp_seq_ops = { | 2548 | static const struct seq_operations pppol2tp_seq_ops = { |
2521 | .start = pppol2tp_seq_start, | 2549 | .start = pppol2tp_seq_start, |
2522 | .next = pppol2tp_seq_next, | 2550 | .next = pppol2tp_seq_next, |
2523 | .stop = pppol2tp_seq_stop, | 2551 | .stop = pppol2tp_seq_stop, |
@@ -2530,51 +2558,18 @@ static struct seq_operations pppol2tp_seq_ops = { | |||
2530 | */ | 2558 | */ |
2531 | static int pppol2tp_proc_open(struct inode *inode, struct file *file) | 2559 | static int pppol2tp_proc_open(struct inode *inode, struct file *file) |
2532 | { | 2560 | { |
2533 | struct seq_file *m; | 2561 | return seq_open_net(inode, file, &pppol2tp_seq_ops, |
2534 | struct pppol2tp_seq_data *pd; | 2562 | sizeof(struct pppol2tp_seq_data)); |
2535 | int ret = 0; | ||
2536 | |||
2537 | ret = seq_open(file, &pppol2tp_seq_ops); | ||
2538 | if (ret < 0) | ||
2539 | goto out; | ||
2540 | |||
2541 | m = file->private_data; | ||
2542 | |||
2543 | /* Allocate and fill our proc_data for access later */ | ||
2544 | ret = -ENOMEM; | ||
2545 | m->private = kzalloc(sizeof(struct pppol2tp_seq_data), GFP_KERNEL); | ||
2546 | if (m->private == NULL) | ||
2547 | goto out; | ||
2548 | |||
2549 | pd = m->private; | ||
2550 | ret = 0; | ||
2551 | |||
2552 | out: | ||
2553 | return ret; | ||
2554 | } | ||
2555 | |||
2556 | /* Called when /proc file access completes. | ||
2557 | */ | ||
2558 | static int pppol2tp_proc_release(struct inode *inode, struct file *file) | ||
2559 | { | ||
2560 | struct seq_file *m = (struct seq_file *)file->private_data; | ||
2561 | |||
2562 | kfree(m->private); | ||
2563 | m->private = NULL; | ||
2564 | |||
2565 | return seq_release(inode, file); | ||
2566 | } | 2563 | } |
2567 | 2564 | ||
2568 | static struct file_operations pppol2tp_proc_fops = { | 2565 | static const struct file_operations pppol2tp_proc_fops = { |
2569 | .owner = THIS_MODULE, | 2566 | .owner = THIS_MODULE, |
2570 | .open = pppol2tp_proc_open, | 2567 | .open = pppol2tp_proc_open, |
2571 | .read = seq_read, | 2568 | .read = seq_read, |
2572 | .llseek = seq_lseek, | 2569 | .llseek = seq_lseek, |
2573 | .release = pppol2tp_proc_release, | 2570 | .release = seq_release_net, |
2574 | }; | 2571 | }; |
2575 | 2572 | ||
2576 | static struct proc_dir_entry *pppol2tp_proc; | ||
2577 | |||
2578 | #endif /* CONFIG_PROC_FS */ | 2573 | #endif /* CONFIG_PROC_FS */ |
2579 | 2574 | ||
2580 | /***************************************************************************** | 2575 | /***************************************************************************** |
@@ -2606,6 +2601,57 @@ static struct pppox_proto pppol2tp_proto = { | |||
2606 | .ioctl = pppol2tp_ioctl | 2601 | .ioctl = pppol2tp_ioctl |
2607 | }; | 2602 | }; |
2608 | 2603 | ||
2604 | static __net_init int pppol2tp_init_net(struct net *net) | ||
2605 | { | ||
2606 | struct pppol2tp_net *pn; | ||
2607 | struct proc_dir_entry *pde; | ||
2608 | int err; | ||
2609 | |||
2610 | pn = kzalloc(sizeof(*pn), GFP_KERNEL); | ||
2611 | if (!pn) | ||
2612 | return -ENOMEM; | ||
2613 | |||
2614 | INIT_LIST_HEAD(&pn->pppol2tp_tunnel_list); | ||
2615 | rwlock_init(&pn->pppol2tp_tunnel_list_lock); | ||
2616 | |||
2617 | err = net_assign_generic(net, pppol2tp_net_id, pn); | ||
2618 | if (err) | ||
2619 | goto out; | ||
2620 | |||
2621 | pde = proc_net_fops_create(net, "pppol2tp", S_IRUGO, &pppol2tp_proc_fops); | ||
2622 | #ifdef CONFIG_PROC_FS | ||
2623 | if (!pde) { | ||
2624 | err = -ENOMEM; | ||
2625 | goto out; | ||
2626 | } | ||
2627 | #endif | ||
2628 | |||
2629 | return 0; | ||
2630 | |||
2631 | out: | ||
2632 | kfree(pn); | ||
2633 | return err; | ||
2634 | } | ||
2635 | |||
2636 | static __net_exit void pppol2tp_exit_net(struct net *net) | ||
2637 | { | ||
2638 | struct pppoe_net *pn; | ||
2639 | |||
2640 | proc_net_remove(net, "pppol2tp"); | ||
2641 | pn = net_generic(net, pppol2tp_net_id); | ||
2642 | /* | ||
2643 | * if someone has cached our net then | ||
2644 | * further net_generic call will return NULL | ||
2645 | */ | ||
2646 | net_assign_generic(net, pppol2tp_net_id, NULL); | ||
2647 | kfree(pn); | ||
2648 | } | ||
2649 | |||
2650 | static __net_initdata struct pernet_operations pppol2tp_net_ops = { | ||
2651 | .init = pppol2tp_init_net, | ||
2652 | .exit = pppol2tp_exit_net, | ||
2653 | }; | ||
2654 | |||
2609 | static int __init pppol2tp_init(void) | 2655 | static int __init pppol2tp_init(void) |
2610 | { | 2656 | { |
2611 | int err; | 2657 | int err; |
@@ -2617,23 +2663,17 @@ static int __init pppol2tp_init(void) | |||
2617 | if (err) | 2663 | if (err) |
2618 | goto out_unregister_pppol2tp_proto; | 2664 | goto out_unregister_pppol2tp_proto; |
2619 | 2665 | ||
2620 | #ifdef CONFIG_PROC_FS | 2666 | err = register_pernet_gen_device(&pppol2tp_net_id, &pppol2tp_net_ops); |
2621 | pppol2tp_proc = proc_net_fops_create(&init_net, "pppol2tp", 0, | 2667 | if (err) |
2622 | &pppol2tp_proc_fops); | ||
2623 | if (!pppol2tp_proc) { | ||
2624 | err = -ENOMEM; | ||
2625 | goto out_unregister_pppox_proto; | 2668 | goto out_unregister_pppox_proto; |
2626 | } | 2669 | |
2627 | #endif /* CONFIG_PROC_FS */ | ||
2628 | printk(KERN_INFO "PPPoL2TP kernel driver, %s\n", | 2670 | printk(KERN_INFO "PPPoL2TP kernel driver, %s\n", |
2629 | PPPOL2TP_DRV_VERSION); | 2671 | PPPOL2TP_DRV_VERSION); |
2630 | 2672 | ||
2631 | out: | 2673 | out: |
2632 | return err; | 2674 | return err; |
2633 | #ifdef CONFIG_PROC_FS | ||
2634 | out_unregister_pppox_proto: | 2675 | out_unregister_pppox_proto: |
2635 | unregister_pppox_proto(PX_PROTO_OL2TP); | 2676 | unregister_pppox_proto(PX_PROTO_OL2TP); |
2636 | #endif | ||
2637 | out_unregister_pppol2tp_proto: | 2677 | out_unregister_pppol2tp_proto: |
2638 | proto_unregister(&pppol2tp_sk_proto); | 2678 | proto_unregister(&pppol2tp_sk_proto); |
2639 | goto out; | 2679 | goto out; |
@@ -2642,10 +2682,6 @@ out_unregister_pppol2tp_proto: | |||
2642 | static void __exit pppol2tp_exit(void) | 2682 | static void __exit pppol2tp_exit(void) |
2643 | { | 2683 | { |
2644 | unregister_pppox_proto(PX_PROTO_OL2TP); | 2684 | unregister_pppox_proto(PX_PROTO_OL2TP); |
2645 | |||
2646 | #ifdef CONFIG_PROC_FS | ||
2647 | remove_proc_entry("pppol2tp", init_net.proc_net); | ||
2648 | #endif | ||
2649 | proto_unregister(&pppol2tp_sk_proto); | 2685 | proto_unregister(&pppol2tp_sk_proto); |
2650 | } | 2686 | } |
2651 | 2687 | ||
diff --git a/drivers/net/pppox.c b/drivers/net/pppox.c index 03aecc97fb45..4f6d33fbc673 100644 --- a/drivers/net/pppox.c +++ b/drivers/net/pppox.c | |||
@@ -108,9 +108,6 @@ static int pppox_create(struct net *net, struct socket *sock, int protocol) | |||
108 | { | 108 | { |
109 | int rc = -EPROTOTYPE; | 109 | int rc = -EPROTOTYPE; |
110 | 110 | ||
111 | if (net != &init_net) | ||
112 | return -EAFNOSUPPORT; | ||
113 | |||
114 | if (protocol < 0 || protocol > PX_MAX_PROTO) | 111 | if (protocol < 0 || protocol > PX_MAX_PROTO) |
115 | goto out; | 112 | goto out; |
116 | 113 | ||
diff --git a/drivers/net/ps3_gelic_net.c b/drivers/net/ps3_gelic_net.c index 4b564eda5bd9..30900b30d532 100644 --- a/drivers/net/ps3_gelic_net.c +++ b/drivers/net/ps3_gelic_net.c | |||
@@ -745,7 +745,7 @@ static inline struct sk_buff *gelic_put_vlan_tag(struct sk_buff *skb, | |||
745 | /* Move the mac addresses to the top of buffer */ | 745 | /* Move the mac addresses to the top of buffer */ |
746 | memmove(skb->data, skb->data + VLAN_HLEN, 2 * ETH_ALEN); | 746 | memmove(skb->data, skb->data + VLAN_HLEN, 2 * ETH_ALEN); |
747 | 747 | ||
748 | veth->h_vlan_proto = __constant_htons(ETH_P_8021Q); | 748 | veth->h_vlan_proto = cpu_to_be16(ETH_P_8021Q); |
749 | veth->h_vlan_TCI = htons(tag); | 749 | veth->h_vlan_TCI = htons(tag); |
750 | 750 | ||
751 | return skb; | 751 | return skb; |
@@ -1403,6 +1403,19 @@ void gelic_net_tx_timeout(struct net_device *netdev) | |||
1403 | atomic_dec(&card->tx_timeout_task_counter); | 1403 | atomic_dec(&card->tx_timeout_task_counter); |
1404 | } | 1404 | } |
1405 | 1405 | ||
1406 | static const struct net_device_ops gelic_netdevice_ops = { | ||
1407 | .ndo_open = gelic_net_open, | ||
1408 | .ndo_stop = gelic_net_stop, | ||
1409 | .ndo_start_xmit = gelic_net_xmit, | ||
1410 | .ndo_set_multicast_list = gelic_net_set_multi, | ||
1411 | .ndo_change_mtu = gelic_net_change_mtu, | ||
1412 | .ndo_tx_timeout = gelic_net_tx_timeout, | ||
1413 | .ndo_validate_addr = eth_validate_addr, | ||
1414 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
1415 | .ndo_poll_controller = gelic_net_poll_controller, | ||
1416 | #endif | ||
1417 | }; | ||
1418 | |||
1406 | /** | 1419 | /** |
1407 | * gelic_ether_setup_netdev_ops - initialization of net_device operations | 1420 | * gelic_ether_setup_netdev_ops - initialization of net_device operations |
1408 | * @netdev: net_device structure | 1421 | * @netdev: net_device structure |
@@ -1412,21 +1425,12 @@ void gelic_net_tx_timeout(struct net_device *netdev) | |||
1412 | static void gelic_ether_setup_netdev_ops(struct net_device *netdev, | 1425 | static void gelic_ether_setup_netdev_ops(struct net_device *netdev, |
1413 | struct napi_struct *napi) | 1426 | struct napi_struct *napi) |
1414 | { | 1427 | { |
1415 | netdev->open = &gelic_net_open; | ||
1416 | netdev->stop = &gelic_net_stop; | ||
1417 | netdev->hard_start_xmit = &gelic_net_xmit; | ||
1418 | netdev->set_multicast_list = &gelic_net_set_multi; | ||
1419 | netdev->change_mtu = &gelic_net_change_mtu; | ||
1420 | /* tx watchdog */ | ||
1421 | netdev->tx_timeout = &gelic_net_tx_timeout; | ||
1422 | netdev->watchdog_timeo = GELIC_NET_WATCHDOG_TIMEOUT; | 1428 | netdev->watchdog_timeo = GELIC_NET_WATCHDOG_TIMEOUT; |
1423 | /* NAPI */ | 1429 | /* NAPI */ |
1424 | netif_napi_add(netdev, napi, | 1430 | netif_napi_add(netdev, napi, |
1425 | gelic_net_poll, GELIC_NET_NAPI_WEIGHT); | 1431 | gelic_net_poll, GELIC_NET_NAPI_WEIGHT); |
1426 | netdev->ethtool_ops = &gelic_ether_ethtool_ops; | 1432 | netdev->ethtool_ops = &gelic_ether_ethtool_ops; |
1427 | #ifdef CONFIG_NET_POLL_CONTROLLER | 1433 | netdev->netdev_ops = &gelic_netdevice_ops; |
1428 | netdev->poll_controller = gelic_net_poll_controller; | ||
1429 | #endif | ||
1430 | } | 1434 | } |
1431 | 1435 | ||
1432 | /** | 1436 | /** |
diff --git a/drivers/net/ps3_gelic_wireless.c b/drivers/net/ps3_gelic_wireless.c index 335da4831ab3..a5ac2bd58b5b 100644 --- a/drivers/net/ps3_gelic_wireless.c +++ b/drivers/net/ps3_gelic_wireless.c | |||
@@ -2697,6 +2697,19 @@ static int gelic_wl_stop(struct net_device *netdev) | |||
2697 | 2697 | ||
2698 | /* -- */ | 2698 | /* -- */ |
2699 | 2699 | ||
2700 | static const struct net_device_ops gelic_wl_netdevice_ops = { | ||
2701 | .ndo_open = gelic_wl_open, | ||
2702 | .ndo_stop = gelic_wl_stop, | ||
2703 | .ndo_start_xmit = gelic_net_xmit, | ||
2704 | .ndo_set_multicast_list = gelic_net_set_multi, | ||
2705 | .ndo_change_mtu = gelic_net_change_mtu, | ||
2706 | .ndo_tx_timeout = gelic_net_tx_timeout, | ||
2707 | .ndo_validate_addr = eth_validate_addr, | ||
2708 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
2709 | .ndo_poll_controller = gelic_net_poll_controller, | ||
2710 | #endif | ||
2711 | }; | ||
2712 | |||
2700 | static struct ethtool_ops gelic_wl_ethtool_ops = { | 2713 | static struct ethtool_ops gelic_wl_ethtool_ops = { |
2701 | .get_drvinfo = gelic_net_get_drvinfo, | 2714 | .get_drvinfo = gelic_net_get_drvinfo, |
2702 | .get_link = gelic_wl_get_link, | 2715 | .get_link = gelic_wl_get_link, |
@@ -2711,21 +2724,12 @@ static void gelic_wl_setup_netdev_ops(struct net_device *netdev) | |||
2711 | struct gelic_wl_info *wl; | 2724 | struct gelic_wl_info *wl; |
2712 | wl = port_wl(netdev_priv(netdev)); | 2725 | wl = port_wl(netdev_priv(netdev)); |
2713 | BUG_ON(!wl); | 2726 | BUG_ON(!wl); |
2714 | netdev->open = &gelic_wl_open; | ||
2715 | netdev->stop = &gelic_wl_stop; | ||
2716 | netdev->hard_start_xmit = &gelic_net_xmit; | ||
2717 | netdev->set_multicast_list = &gelic_net_set_multi; | ||
2718 | netdev->change_mtu = &gelic_net_change_mtu; | ||
2719 | netdev->wireless_data = &wl->wireless_data; | ||
2720 | netdev->wireless_handlers = &gelic_wl_wext_handler_def; | ||
2721 | /* tx watchdog */ | ||
2722 | netdev->tx_timeout = &gelic_net_tx_timeout; | ||
2723 | netdev->watchdog_timeo = GELIC_NET_WATCHDOG_TIMEOUT; | 2727 | netdev->watchdog_timeo = GELIC_NET_WATCHDOG_TIMEOUT; |
2724 | 2728 | ||
2725 | netdev->ethtool_ops = &gelic_wl_ethtool_ops; | 2729 | netdev->ethtool_ops = &gelic_wl_ethtool_ops; |
2726 | #ifdef CONFIG_NET_POLL_CONTROLLER | 2730 | netdev->netdev_ops = &gelic_wl_netdevice_ops; |
2727 | netdev->poll_controller = gelic_net_poll_controller; | 2731 | netdev->wireless_data = &wl->wireless_data; |
2728 | #endif | 2732 | netdev->wireless_handlers = &gelic_wl_wext_handler_def; |
2729 | } | 2733 | } |
2730 | 2734 | ||
2731 | /* | 2735 | /* |
diff --git a/drivers/net/qla3xxx.c b/drivers/net/qla3xxx.c index 189ec29ac7a4..8b2823c8dccf 100644 --- a/drivers/net/qla3xxx.c +++ b/drivers/net/qla3xxx.c | |||
@@ -2292,7 +2292,7 @@ static int ql_poll(struct napi_struct *napi, int budget) | |||
2292 | 2292 | ||
2293 | if (tx_cleaned + rx_cleaned != budget) { | 2293 | if (tx_cleaned + rx_cleaned != budget) { |
2294 | spin_lock_irqsave(&qdev->hw_lock, hw_flags); | 2294 | spin_lock_irqsave(&qdev->hw_lock, hw_flags); |
2295 | __netif_rx_complete(napi); | 2295 | __napi_complete(napi); |
2296 | ql_update_small_bufq_prod_index(qdev); | 2296 | ql_update_small_bufq_prod_index(qdev); |
2297 | ql_update_lrg_bufq_prod_index(qdev); | 2297 | ql_update_lrg_bufq_prod_index(qdev); |
2298 | writel(qdev->rsp_consumer_index, | 2298 | writel(qdev->rsp_consumer_index, |
@@ -2351,8 +2351,8 @@ static irqreturn_t ql3xxx_isr(int irq, void *dev_id) | |||
2351 | spin_unlock(&qdev->adapter_lock); | 2351 | spin_unlock(&qdev->adapter_lock); |
2352 | } else if (value & ISP_IMR_DISABLE_CMPL_INT) { | 2352 | } else if (value & ISP_IMR_DISABLE_CMPL_INT) { |
2353 | ql_disable_interrupts(qdev); | 2353 | ql_disable_interrupts(qdev); |
2354 | if (likely(netif_rx_schedule_prep(&qdev->napi))) { | 2354 | if (likely(napi_schedule_prep(&qdev->napi))) { |
2355 | __netif_rx_schedule(&qdev->napi); | 2355 | __napi_schedule(&qdev->napi); |
2356 | } | 2356 | } |
2357 | } else { | 2357 | } else { |
2358 | return IRQ_NONE; | 2358 | return IRQ_NONE; |
diff --git a/drivers/net/qlge/qlge_main.c b/drivers/net/qlge/qlge_main.c index 3d1d7b6e55aa..04bf2122264a 100644 --- a/drivers/net/qlge/qlge_main.c +++ b/drivers/net/qlge/qlge_main.c | |||
@@ -1446,6 +1446,7 @@ static void ql_process_mac_rx_intr(struct ql_adapter *qdev, | |||
1446 | qdev->stats.rx_packets++; | 1446 | qdev->stats.rx_packets++; |
1447 | qdev->stats.rx_bytes += skb->len; | 1447 | qdev->stats.rx_bytes += skb->len; |
1448 | skb->protocol = eth_type_trans(skb, ndev); | 1448 | skb->protocol = eth_type_trans(skb, ndev); |
1449 | skb_record_rx_queue(skb, rx_ring - &qdev->rx_ring[0]); | ||
1449 | if (qdev->vlgrp && (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V)) { | 1450 | if (qdev->vlgrp && (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V)) { |
1450 | QPRINTK(qdev, RX_STATUS, DEBUG, | 1451 | QPRINTK(qdev, RX_STATUS, DEBUG, |
1451 | "Passing a VLAN packet upstream.\n"); | 1452 | "Passing a VLAN packet upstream.\n"); |
@@ -1652,7 +1653,7 @@ static int ql_napi_poll_msix(struct napi_struct *napi, int budget) | |||
1652 | rx_ring->cq_id); | 1653 | rx_ring->cq_id); |
1653 | 1654 | ||
1654 | if (work_done < budget) { | 1655 | if (work_done < budget) { |
1655 | __netif_rx_complete(napi); | 1656 | __napi_complete(napi); |
1656 | ql_enable_completion_interrupt(qdev, rx_ring->irq); | 1657 | ql_enable_completion_interrupt(qdev, rx_ring->irq); |
1657 | } | 1658 | } |
1658 | return work_done; | 1659 | return work_done; |
@@ -1737,7 +1738,7 @@ static irqreturn_t qlge_msix_tx_isr(int irq, void *dev_id) | |||
1737 | static irqreturn_t qlge_msix_rx_isr(int irq, void *dev_id) | 1738 | static irqreturn_t qlge_msix_rx_isr(int irq, void *dev_id) |
1738 | { | 1739 | { |
1739 | struct rx_ring *rx_ring = dev_id; | 1740 | struct rx_ring *rx_ring = dev_id; |
1740 | netif_rx_schedule(&rx_ring->napi); | 1741 | napi_schedule(&rx_ring->napi); |
1741 | return IRQ_HANDLED; | 1742 | return IRQ_HANDLED; |
1742 | } | 1743 | } |
1743 | 1744 | ||
@@ -1823,7 +1824,7 @@ static irqreturn_t qlge_isr(int irq, void *dev_id) | |||
1823 | &rx_ring->rx_work, | 1824 | &rx_ring->rx_work, |
1824 | 0); | 1825 | 0); |
1825 | else | 1826 | else |
1826 | netif_rx_schedule(&rx_ring->napi); | 1827 | napi_schedule(&rx_ring->napi); |
1827 | work_done++; | 1828 | work_done++; |
1828 | } | 1829 | } |
1829 | } | 1830 | } |
diff --git a/drivers/net/r6040.c b/drivers/net/r6040.c index b2dcdb5ed8bd..3c27a7bfea49 100644 --- a/drivers/net/r6040.c +++ b/drivers/net/r6040.c | |||
@@ -676,7 +676,7 @@ static int r6040_poll(struct napi_struct *napi, int budget) | |||
676 | work_done = r6040_rx(dev, budget); | 676 | work_done = r6040_rx(dev, budget); |
677 | 677 | ||
678 | if (work_done < budget) { | 678 | if (work_done < budget) { |
679 | netif_rx_complete(napi); | 679 | napi_complete(napi); |
680 | /* Enable RX interrupt */ | 680 | /* Enable RX interrupt */ |
681 | iowrite16(ioread16(ioaddr + MIER) | RX_INTS, ioaddr + MIER); | 681 | iowrite16(ioread16(ioaddr + MIER) | RX_INTS, ioaddr + MIER); |
682 | } | 682 | } |
@@ -713,7 +713,7 @@ static irqreturn_t r6040_interrupt(int irq, void *dev_id) | |||
713 | 713 | ||
714 | /* Mask off RX interrupt */ | 714 | /* Mask off RX interrupt */ |
715 | misr &= ~RX_INTS; | 715 | misr &= ~RX_INTS; |
716 | netif_rx_schedule(&lp->napi); | 716 | napi_schedule(&lp->napi); |
717 | } | 717 | } |
718 | 718 | ||
719 | /* TX interrupt request */ | 719 | /* TX interrupt request */ |
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c index 2c73ca606b35..1c4a980253fe 100644 --- a/drivers/net/r8169.c +++ b/drivers/net/r8169.c | |||
@@ -3581,8 +3581,8 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance) | |||
3581 | RTL_W16(IntrMask, tp->intr_event & ~tp->napi_event); | 3581 | RTL_W16(IntrMask, tp->intr_event & ~tp->napi_event); |
3582 | tp->intr_mask = ~tp->napi_event; | 3582 | tp->intr_mask = ~tp->napi_event; |
3583 | 3583 | ||
3584 | if (likely(netif_rx_schedule_prep(&tp->napi))) | 3584 | if (likely(napi_schedule_prep(&tp->napi))) |
3585 | __netif_rx_schedule(&tp->napi); | 3585 | __napi_schedule(&tp->napi); |
3586 | else if (netif_msg_intr(tp)) { | 3586 | else if (netif_msg_intr(tp)) { |
3587 | printk(KERN_INFO "%s: interrupt %04x in poll\n", | 3587 | printk(KERN_INFO "%s: interrupt %04x in poll\n", |
3588 | dev->name, status); | 3588 | dev->name, status); |
@@ -3603,7 +3603,7 @@ static int rtl8169_poll(struct napi_struct *napi, int budget) | |||
3603 | rtl8169_tx_interrupt(dev, tp, ioaddr); | 3603 | rtl8169_tx_interrupt(dev, tp, ioaddr); |
3604 | 3604 | ||
3605 | if (work_done < budget) { | 3605 | if (work_done < budget) { |
3606 | netif_rx_complete(napi); | 3606 | napi_complete(napi); |
3607 | tp->intr_mask = 0xffff; | 3607 | tp->intr_mask = 0xffff; |
3608 | /* | 3608 | /* |
3609 | * 20040426: the barrier is not strictly required but the | 3609 | * 20040426: the barrier is not strictly required but the |
diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c index f5c57c059bca..e0a353f4ec92 100644 --- a/drivers/net/s2io.c +++ b/drivers/net/s2io.c | |||
@@ -2852,7 +2852,7 @@ static int s2io_poll_msix(struct napi_struct *napi, int budget) | |||
2852 | s2io_chk_rx_buffers(nic, ring); | 2852 | s2io_chk_rx_buffers(nic, ring); |
2853 | 2853 | ||
2854 | if (pkts_processed < budget_org) { | 2854 | if (pkts_processed < budget_org) { |
2855 | netif_rx_complete(napi); | 2855 | napi_complete(napi); |
2856 | /*Re Enable MSI-Rx Vector*/ | 2856 | /*Re Enable MSI-Rx Vector*/ |
2857 | addr = (u8 __iomem *)&bar0->xmsi_mask_reg; | 2857 | addr = (u8 __iomem *)&bar0->xmsi_mask_reg; |
2858 | addr += 7 - ring->ring_no; | 2858 | addr += 7 - ring->ring_no; |
@@ -2889,7 +2889,7 @@ static int s2io_poll_inta(struct napi_struct *napi, int budget) | |||
2889 | break; | 2889 | break; |
2890 | } | 2890 | } |
2891 | if (pkts_processed < budget_org) { | 2891 | if (pkts_processed < budget_org) { |
2892 | netif_rx_complete(napi); | 2892 | napi_complete(napi); |
2893 | /* Re enable the Rx interrupts for the ring */ | 2893 | /* Re enable the Rx interrupts for the ring */ |
2894 | writeq(0, &bar0->rx_traffic_mask); | 2894 | writeq(0, &bar0->rx_traffic_mask); |
2895 | readl(&bar0->rx_traffic_mask); | 2895 | readl(&bar0->rx_traffic_mask); |
@@ -4342,7 +4342,7 @@ static irqreturn_t s2io_msix_ring_handle(int irq, void *dev_id) | |||
4342 | val8 = (ring->ring_no == 0) ? 0x7f : 0xff; | 4342 | val8 = (ring->ring_no == 0) ? 0x7f : 0xff; |
4343 | writeb(val8, addr); | 4343 | writeb(val8, addr); |
4344 | val8 = readb(addr); | 4344 | val8 = readb(addr); |
4345 | netif_rx_schedule(&ring->napi); | 4345 | napi_schedule(&ring->napi); |
4346 | } else { | 4346 | } else { |
4347 | rx_intr_handler(ring, 0); | 4347 | rx_intr_handler(ring, 0); |
4348 | s2io_chk_rx_buffers(sp, ring); | 4348 | s2io_chk_rx_buffers(sp, ring); |
@@ -4789,7 +4789,7 @@ static irqreturn_t s2io_isr(int irq, void *dev_id) | |||
4789 | 4789 | ||
4790 | if (config->napi) { | 4790 | if (config->napi) { |
4791 | if (reason & GEN_INTR_RXTRAFFIC) { | 4791 | if (reason & GEN_INTR_RXTRAFFIC) { |
4792 | netif_rx_schedule(&sp->napi); | 4792 | napi_schedule(&sp->napi); |
4793 | writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_mask); | 4793 | writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_mask); |
4794 | writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int); | 4794 | writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int); |
4795 | readl(&bar0->rx_traffic_int); | 4795 | readl(&bar0->rx_traffic_int); |
@@ -7542,6 +7542,7 @@ static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp) | |||
7542 | 7542 | ||
7543 | sp->mac_control.stats_info->sw_stat.mem_freed += skb->truesize; | 7543 | sp->mac_control.stats_info->sw_stat.mem_freed += skb->truesize; |
7544 | send_up: | 7544 | send_up: |
7545 | skb_record_rx_queue(skb, ring_no); | ||
7545 | queue_rx_frame(skb, RXD_GET_VLAN_TAG(rxdp->Control_2)); | 7546 | queue_rx_frame(skb, RXD_GET_VLAN_TAG(rxdp->Control_2)); |
7546 | aggregate: | 7547 | aggregate: |
7547 | sp->mac_control.rings[ring_no].rx_bufs_left -= 1; | 7548 | sp->mac_control.rings[ring_no].rx_bufs_left -= 1; |
diff --git a/drivers/net/sb1250-mac.c b/drivers/net/sb1250-mac.c index 31e38fae017f..88dd2e09832f 100644 --- a/drivers/net/sb1250-mac.c +++ b/drivers/net/sb1250-mac.c | |||
@@ -2039,9 +2039,9 @@ static irqreturn_t sbmac_intr(int irq,void *dev_instance) | |||
2039 | sbdma_tx_process(sc,&(sc->sbm_txdma), 0); | 2039 | sbdma_tx_process(sc,&(sc->sbm_txdma), 0); |
2040 | 2040 | ||
2041 | if (isr & (M_MAC_INT_CHANNEL << S_MAC_RX_CH0)) { | 2041 | if (isr & (M_MAC_INT_CHANNEL << S_MAC_RX_CH0)) { |
2042 | if (netif_rx_schedule_prep(&sc->napi)) { | 2042 | if (napi_schedule_prep(&sc->napi)) { |
2043 | __raw_writeq(0, sc->sbm_imr); | 2043 | __raw_writeq(0, sc->sbm_imr); |
2044 | __netif_rx_schedule(&sc->napi); | 2044 | __napi_schedule(&sc->napi); |
2045 | /* Depend on the exit from poll to reenable intr */ | 2045 | /* Depend on the exit from poll to reenable intr */ |
2046 | } | 2046 | } |
2047 | else { | 2047 | else { |
@@ -2478,7 +2478,7 @@ static int sbmac_mii_probe(struct net_device *dev) | |||
2478 | return -ENXIO; | 2478 | return -ENXIO; |
2479 | } | 2479 | } |
2480 | 2480 | ||
2481 | phy_dev = phy_connect(dev, phy_dev->dev.bus_id, &sbmac_mii_poll, 0, | 2481 | phy_dev = phy_connect(dev, dev_name(&phy_dev->dev), &sbmac_mii_poll, 0, |
2482 | PHY_INTERFACE_MODE_GMII); | 2482 | PHY_INTERFACE_MODE_GMII); |
2483 | if (IS_ERR(phy_dev)) { | 2483 | if (IS_ERR(phy_dev)) { |
2484 | printk(KERN_ERR "%s: could not attach to PHY\n", dev->name); | 2484 | printk(KERN_ERR "%s: could not attach to PHY\n", dev->name); |
@@ -2500,7 +2500,7 @@ static int sbmac_mii_probe(struct net_device *dev) | |||
2500 | 2500 | ||
2501 | pr_info("%s: attached PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)\n", | 2501 | pr_info("%s: attached PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)\n", |
2502 | dev->name, phy_dev->drv->name, | 2502 | dev->name, phy_dev->drv->name, |
2503 | phy_dev->dev.bus_id, phy_dev->irq); | 2503 | dev_name(&phy_dev->dev), phy_dev->irq); |
2504 | 2504 | ||
2505 | sc->phy_dev = phy_dev; | 2505 | sc->phy_dev = phy_dev; |
2506 | 2506 | ||
@@ -2667,7 +2667,7 @@ static int sbmac_poll(struct napi_struct *napi, int budget) | |||
2667 | sbdma_tx_process(sc, &(sc->sbm_txdma), 1); | 2667 | sbdma_tx_process(sc, &(sc->sbm_txdma), 1); |
2668 | 2668 | ||
2669 | if (work_done < budget) { | 2669 | if (work_done < budget) { |
2670 | netif_rx_complete(napi); | 2670 | napi_complete(napi); |
2671 | 2671 | ||
2672 | #ifdef CONFIG_SBMAC_COALESCE | 2672 | #ifdef CONFIG_SBMAC_COALESCE |
2673 | __raw_writeq(((M_MAC_INT_EOP_COUNT | M_MAC_INT_EOP_TIMER) << S_MAC_TX_CH0) | | 2673 | __raw_writeq(((M_MAC_INT_EOP_COUNT | M_MAC_INT_EOP_TIMER) << S_MAC_TX_CH0) | |
@@ -2697,7 +2697,7 @@ static int __init sbmac_probe(struct platform_device *pldev) | |||
2697 | sbm_base = ioremap_nocache(res->start, res->end - res->start + 1); | 2697 | sbm_base = ioremap_nocache(res->start, res->end - res->start + 1); |
2698 | if (!sbm_base) { | 2698 | if (!sbm_base) { |
2699 | printk(KERN_ERR "%s: unable to map device registers\n", | 2699 | printk(KERN_ERR "%s: unable to map device registers\n", |
2700 | pldev->dev.bus_id); | 2700 | dev_name(&pldev->dev)); |
2701 | err = -ENOMEM; | 2701 | err = -ENOMEM; |
2702 | goto out_out; | 2702 | goto out_out; |
2703 | } | 2703 | } |
@@ -2708,7 +2708,7 @@ static int __init sbmac_probe(struct platform_device *pldev) | |||
2708 | * If we find a zero, skip this MAC. | 2708 | * If we find a zero, skip this MAC. |
2709 | */ | 2709 | */ |
2710 | sbmac_orig_hwaddr = __raw_readq(sbm_base + R_MAC_ETHERNET_ADDR); | 2710 | sbmac_orig_hwaddr = __raw_readq(sbm_base + R_MAC_ETHERNET_ADDR); |
2711 | pr_debug("%s: %sconfiguring MAC at 0x%08Lx\n", pldev->dev.bus_id, | 2711 | pr_debug("%s: %sconfiguring MAC at 0x%08Lx\n", dev_name(&pldev->dev), |
2712 | sbmac_orig_hwaddr ? "" : "not ", (long long)res->start); | 2712 | sbmac_orig_hwaddr ? "" : "not ", (long long)res->start); |
2713 | if (sbmac_orig_hwaddr == 0) { | 2713 | if (sbmac_orig_hwaddr == 0) { |
2714 | err = 0; | 2714 | err = 0; |
@@ -2721,7 +2721,7 @@ static int __init sbmac_probe(struct platform_device *pldev) | |||
2721 | dev = alloc_etherdev(sizeof(struct sbmac_softc)); | 2721 | dev = alloc_etherdev(sizeof(struct sbmac_softc)); |
2722 | if (!dev) { | 2722 | if (!dev) { |
2723 | printk(KERN_ERR "%s: unable to allocate etherdev\n", | 2723 | printk(KERN_ERR "%s: unable to allocate etherdev\n", |
2724 | pldev->dev.bus_id); | 2724 | dev_name(&pldev->dev)); |
2725 | err = -ENOMEM; | 2725 | err = -ENOMEM; |
2726 | goto out_unmap; | 2726 | goto out_unmap; |
2727 | } | 2727 | } |
diff --git a/drivers/net/sc92031.c b/drivers/net/sc92031.c index 8b75bef4a841..c13cbf099b88 100644 --- a/drivers/net/sc92031.c +++ b/drivers/net/sc92031.c | |||
@@ -13,6 +13,9 @@ | |||
13 | * Both are almost identical and seem to be based on pci-skeleton.c | 13 | * Both are almost identical and seem to be based on pci-skeleton.c |
14 | * | 14 | * |
15 | * Rewritten for 2.6 by Cesar Eduardo Barros | 15 | * Rewritten for 2.6 by Cesar Eduardo Barros |
16 | * | ||
17 | * A datasheet for this chip can be found at | ||
18 | * http://www.silan.com.cn/english/products/pdf/SC92031AY.pdf | ||
16 | */ | 19 | */ |
17 | 20 | ||
18 | /* Note about set_mac_address: I don't know how to change the hardware | 21 | /* Note about set_mac_address: I don't know how to change the hardware |
@@ -31,13 +34,7 @@ | |||
31 | 34 | ||
32 | #include <asm/irq.h> | 35 | #include <asm/irq.h> |
33 | 36 | ||
34 | #define PCI_VENDOR_ID_SILAN 0x1904 | ||
35 | #define PCI_DEVICE_ID_SILAN_SC92031 0x2031 | ||
36 | #define PCI_DEVICE_ID_SILAN_8139D 0x8139 | ||
37 | |||
38 | #define SC92031_NAME "sc92031" | 37 | #define SC92031_NAME "sc92031" |
39 | #define SC92031_DESCRIPTION "Silan SC92031 PCI Fast Ethernet Adapter driver" | ||
40 | #define SC92031_VERSION "2.0c" | ||
41 | 38 | ||
42 | /* BAR 0 is MMIO, BAR 1 is PIO */ | 39 | /* BAR 0 is MMIO, BAR 1 is PIO */ |
43 | #ifndef SC92031_USE_BAR | 40 | #ifndef SC92031_USE_BAR |
@@ -1264,7 +1261,6 @@ static void sc92031_ethtool_get_drvinfo(struct net_device *dev, | |||
1264 | struct pci_dev *pdev = priv->pdev; | 1261 | struct pci_dev *pdev = priv->pdev; |
1265 | 1262 | ||
1266 | strcpy(drvinfo->driver, SC92031_NAME); | 1263 | strcpy(drvinfo->driver, SC92031_NAME); |
1267 | strcpy(drvinfo->version, SC92031_VERSION); | ||
1268 | strcpy(drvinfo->bus_info, pci_name(pdev)); | 1264 | strcpy(drvinfo->bus_info, pci_name(pdev)); |
1269 | } | 1265 | } |
1270 | 1266 | ||
@@ -1423,6 +1419,7 @@ static int __devinit sc92031_probe(struct pci_dev *pdev, | |||
1423 | struct net_device *dev; | 1419 | struct net_device *dev; |
1424 | struct sc92031_priv *priv; | 1420 | struct sc92031_priv *priv; |
1425 | u32 mac0, mac1; | 1421 | u32 mac0, mac1; |
1422 | unsigned long base_addr; | ||
1426 | 1423 | ||
1427 | err = pci_enable_device(pdev); | 1424 | err = pci_enable_device(pdev); |
1428 | if (unlikely(err < 0)) | 1425 | if (unlikely(err < 0)) |
@@ -1497,6 +1494,14 @@ static int __devinit sc92031_probe(struct pci_dev *pdev, | |||
1497 | if (err < 0) | 1494 | if (err < 0) |
1498 | goto out_register_netdev; | 1495 | goto out_register_netdev; |
1499 | 1496 | ||
1497 | #if SC92031_USE_BAR == 0 | ||
1498 | base_addr = dev->mem_start; | ||
1499 | #elif SC92031_USE_BAR == 1 | ||
1500 | base_addr = dev->base_addr; | ||
1501 | #endif | ||
1502 | printk(KERN_INFO "%s: SC92031 at 0x%lx, %pM, IRQ %d\n", dev->name, | ||
1503 | base_addr, dev->dev_addr, dev->irq); | ||
1504 | |||
1500 | return 0; | 1505 | return 0; |
1501 | 1506 | ||
1502 | out_register_netdev: | 1507 | out_register_netdev: |
@@ -1586,8 +1591,8 @@ out: | |||
1586 | } | 1591 | } |
1587 | 1592 | ||
1588 | static struct pci_device_id sc92031_pci_device_id_table[] __devinitdata = { | 1593 | static struct pci_device_id sc92031_pci_device_id_table[] __devinitdata = { |
1589 | { PCI_DEVICE(PCI_VENDOR_ID_SILAN, PCI_DEVICE_ID_SILAN_SC92031) }, | 1594 | { PCI_DEVICE(PCI_VENDOR_ID_SILAN, 0x2031) }, |
1590 | { PCI_DEVICE(PCI_VENDOR_ID_SILAN, PCI_DEVICE_ID_SILAN_8139D) }, | 1595 | { PCI_DEVICE(PCI_VENDOR_ID_SILAN, 0x8139) }, |
1591 | { 0, } | 1596 | { 0, } |
1592 | }; | 1597 | }; |
1593 | MODULE_DEVICE_TABLE(pci, sc92031_pci_device_id_table); | 1598 | MODULE_DEVICE_TABLE(pci, sc92031_pci_device_id_table); |
@@ -1603,7 +1608,6 @@ static struct pci_driver sc92031_pci_driver = { | |||
1603 | 1608 | ||
1604 | static int __init sc92031_init(void) | 1609 | static int __init sc92031_init(void) |
1605 | { | 1610 | { |
1606 | printk(KERN_INFO SC92031_DESCRIPTION " " SC92031_VERSION "\n"); | ||
1607 | return pci_register_driver(&sc92031_pci_driver); | 1611 | return pci_register_driver(&sc92031_pci_driver); |
1608 | } | 1612 | } |
1609 | 1613 | ||
@@ -1617,5 +1621,4 @@ module_exit(sc92031_exit); | |||
1617 | 1621 | ||
1618 | MODULE_LICENSE("GPL"); | 1622 | MODULE_LICENSE("GPL"); |
1619 | MODULE_AUTHOR("Cesar Eduardo Barros <cesarb@cesarb.net>"); | 1623 | MODULE_AUTHOR("Cesar Eduardo Barros <cesarb@cesarb.net>"); |
1620 | MODULE_DESCRIPTION(SC92031_DESCRIPTION); | 1624 | MODULE_DESCRIPTION("Silan SC92031 PCI Fast Ethernet Adapter driver"); |
1621 | MODULE_VERSION(SC92031_VERSION); | ||
diff --git a/drivers/net/sfc/Kconfig b/drivers/net/sfc/Kconfig index c535408ad6be..12a82966b577 100644 --- a/drivers/net/sfc/Kconfig +++ b/drivers/net/sfc/Kconfig | |||
@@ -2,7 +2,6 @@ config SFC | |||
2 | tristate "Solarflare Solarstorm SFC4000 support" | 2 | tristate "Solarflare Solarstorm SFC4000 support" |
3 | depends on PCI && INET | 3 | depends on PCI && INET |
4 | select MII | 4 | select MII |
5 | select INET_LRO | ||
6 | select CRC32 | 5 | select CRC32 |
7 | select I2C | 6 | select I2C |
8 | select I2C_ALGOBIT | 7 | select I2C_ALGOBIT |
diff --git a/drivers/net/sfc/bitfield.h b/drivers/net/sfc/bitfield.h index d95c21828014..d54d84c267b9 100644 --- a/drivers/net/sfc/bitfield.h +++ b/drivers/net/sfc/bitfield.h | |||
@@ -543,7 +543,7 @@ typedef union efx_oword { | |||
543 | 543 | ||
544 | /* Static initialiser */ | 544 | /* Static initialiser */ |
545 | #define EFX_OWORD32(a, b, c, d) \ | 545 | #define EFX_OWORD32(a, b, c, d) \ |
546 | { .u32 = { __constant_cpu_to_le32(a), __constant_cpu_to_le32(b), \ | 546 | { .u32 = { cpu_to_le32(a), cpu_to_le32(b), \ |
547 | __constant_cpu_to_le32(c), __constant_cpu_to_le32(d) } } | 547 | cpu_to_le32(c), cpu_to_le32(d) } } |
548 | 548 | ||
549 | #endif /* EFX_BITFIELD_H */ | 549 | #endif /* EFX_BITFIELD_H */ |
diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c index ab0e09bf154d..75836599e43d 100644 --- a/drivers/net/sfc/efx.c +++ b/drivers/net/sfc/efx.c | |||
@@ -182,7 +182,6 @@ static int efx_process_channel(struct efx_channel *channel, int rx_quota) | |||
182 | channel->rx_pkt = NULL; | 182 | channel->rx_pkt = NULL; |
183 | } | 183 | } |
184 | 184 | ||
185 | efx_flush_lro(channel); | ||
186 | efx_rx_strategy(channel); | 185 | efx_rx_strategy(channel); |
187 | 186 | ||
188 | efx_fast_push_rx_descriptors(&efx->rx_queue[channel->channel]); | 187 | efx_fast_push_rx_descriptors(&efx->rx_queue[channel->channel]); |
@@ -225,11 +224,11 @@ static int efx_poll(struct napi_struct *napi, int budget) | |||
225 | 224 | ||
226 | if (rx_packets < budget) { | 225 | if (rx_packets < budget) { |
227 | /* There is no race here; although napi_disable() will | 226 | /* There is no race here; although napi_disable() will |
228 | * only wait for netif_rx_complete(), this isn't a problem | 227 | * only wait for napi_complete(), this isn't a problem |
229 | * since efx_channel_processed() will have no effect if | 228 | * since efx_channel_processed() will have no effect if |
230 | * interrupts have already been disabled. | 229 | * interrupts have already been disabled. |
231 | */ | 230 | */ |
232 | netif_rx_complete(napi); | 231 | napi_complete(napi); |
233 | efx_channel_processed(channel); | 232 | efx_channel_processed(channel); |
234 | } | 233 | } |
235 | 234 | ||
@@ -1269,18 +1268,11 @@ static int efx_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd) | |||
1269 | static int efx_init_napi(struct efx_nic *efx) | 1268 | static int efx_init_napi(struct efx_nic *efx) |
1270 | { | 1269 | { |
1271 | struct efx_channel *channel; | 1270 | struct efx_channel *channel; |
1272 | int rc; | ||
1273 | 1271 | ||
1274 | efx_for_each_channel(channel, efx) { | 1272 | efx_for_each_channel(channel, efx) { |
1275 | channel->napi_dev = efx->net_dev; | 1273 | channel->napi_dev = efx->net_dev; |
1276 | rc = efx_lro_init(&channel->lro_mgr, efx); | ||
1277 | if (rc) | ||
1278 | goto err; | ||
1279 | } | 1274 | } |
1280 | return 0; | 1275 | return 0; |
1281 | err: | ||
1282 | efx_fini_napi(efx); | ||
1283 | return rc; | ||
1284 | } | 1276 | } |
1285 | 1277 | ||
1286 | static void efx_fini_napi(struct efx_nic *efx) | 1278 | static void efx_fini_napi(struct efx_nic *efx) |
@@ -1288,7 +1280,6 @@ static void efx_fini_napi(struct efx_nic *efx) | |||
1288 | struct efx_channel *channel; | 1280 | struct efx_channel *channel; |
1289 | 1281 | ||
1290 | efx_for_each_channel(channel, efx) { | 1282 | efx_for_each_channel(channel, efx) { |
1291 | efx_lro_fini(&channel->lro_mgr); | ||
1292 | channel->napi_dev = NULL; | 1283 | channel->napi_dev = NULL; |
1293 | } | 1284 | } |
1294 | } | 1285 | } |
@@ -2120,7 +2111,7 @@ static int __devinit efx_pci_probe(struct pci_dev *pci_dev, | |||
2120 | net_dev->features |= (NETIF_F_IP_CSUM | NETIF_F_SG | | 2111 | net_dev->features |= (NETIF_F_IP_CSUM | NETIF_F_SG | |
2121 | NETIF_F_HIGHDMA | NETIF_F_TSO); | 2112 | NETIF_F_HIGHDMA | NETIF_F_TSO); |
2122 | if (lro) | 2113 | if (lro) |
2123 | net_dev->features |= NETIF_F_LRO; | 2114 | net_dev->features |= NETIF_F_GRO; |
2124 | /* Mask for features that also apply to VLAN devices */ | 2115 | /* Mask for features that also apply to VLAN devices */ |
2125 | net_dev->vlan_features |= (NETIF_F_ALL_CSUM | NETIF_F_SG | | 2116 | net_dev->vlan_features |= (NETIF_F_ALL_CSUM | NETIF_F_SG | |
2126 | NETIF_F_HIGHDMA | NETIF_F_TSO); | 2117 | NETIF_F_HIGHDMA | NETIF_F_TSO); |
diff --git a/drivers/net/sfc/efx.h b/drivers/net/sfc/efx.h index 55d0f131b0e9..8bde1d2a21db 100644 --- a/drivers/net/sfc/efx.h +++ b/drivers/net/sfc/efx.h | |||
@@ -80,7 +80,7 @@ static inline void efx_schedule_channel(struct efx_channel *channel) | |||
80 | channel->channel, raw_smp_processor_id()); | 80 | channel->channel, raw_smp_processor_id()); |
81 | channel->work_pending = true; | 81 | channel->work_pending = true; |
82 | 82 | ||
83 | netif_rx_schedule(&channel->napi_str); | 83 | napi_schedule(&channel->napi_str); |
84 | } | 84 | } |
85 | 85 | ||
86 | #endif /* EFX_EFX_H */ | 86 | #endif /* EFX_EFX_H */ |
diff --git a/drivers/net/sfc/net_driver.h b/drivers/net/sfc/net_driver.h index e019ad1fb9a0..19930ff9df7b 100644 --- a/drivers/net/sfc/net_driver.h +++ b/drivers/net/sfc/net_driver.h | |||
@@ -25,15 +25,11 @@ | |||
25 | #include <linux/device.h> | 25 | #include <linux/device.h> |
26 | #include <linux/highmem.h> | 26 | #include <linux/highmem.h> |
27 | #include <linux/workqueue.h> | 27 | #include <linux/workqueue.h> |
28 | #include <linux/inet_lro.h> | ||
29 | #include <linux/i2c.h> | 28 | #include <linux/i2c.h> |
30 | 29 | ||
31 | #include "enum.h" | 30 | #include "enum.h" |
32 | #include "bitfield.h" | 31 | #include "bitfield.h" |
33 | 32 | ||
34 | #define EFX_MAX_LRO_DESCRIPTORS 8 | ||
35 | #define EFX_MAX_LRO_AGGR MAX_SKB_FRAGS | ||
36 | |||
37 | /************************************************************************** | 33 | /************************************************************************** |
38 | * | 34 | * |
39 | * Build definitions | 35 | * Build definitions |
@@ -340,13 +336,10 @@ enum efx_rx_alloc_method { | |||
340 | * @eventq_read_ptr: Event queue read pointer | 336 | * @eventq_read_ptr: Event queue read pointer |
341 | * @last_eventq_read_ptr: Last event queue read pointer value. | 337 | * @last_eventq_read_ptr: Last event queue read pointer value. |
342 | * @eventq_magic: Event queue magic value for driver-generated test events | 338 | * @eventq_magic: Event queue magic value for driver-generated test events |
343 | * @lro_mgr: LRO state | ||
344 | * @rx_alloc_level: Watermark based heuristic counter for pushing descriptors | 339 | * @rx_alloc_level: Watermark based heuristic counter for pushing descriptors |
345 | * and diagnostic counters | 340 | * and diagnostic counters |
346 | * @rx_alloc_push_pages: RX allocation method currently in use for pushing | 341 | * @rx_alloc_push_pages: RX allocation method currently in use for pushing |
347 | * descriptors | 342 | * descriptors |
348 | * @rx_alloc_pop_pages: RX allocation method currently in use for popping | ||
349 | * descriptors | ||
350 | * @n_rx_tobe_disc: Count of RX_TOBE_DISC errors | 343 | * @n_rx_tobe_disc: Count of RX_TOBE_DISC errors |
351 | * @n_rx_ip_frag_err: Count of RX IP fragment errors | 344 | * @n_rx_ip_frag_err: Count of RX IP fragment errors |
352 | * @n_rx_ip_hdr_chksum_err: Count of RX IP header checksum errors | 345 | * @n_rx_ip_hdr_chksum_err: Count of RX IP header checksum errors |
@@ -371,10 +364,8 @@ struct efx_channel { | |||
371 | unsigned int last_eventq_read_ptr; | 364 | unsigned int last_eventq_read_ptr; |
372 | unsigned int eventq_magic; | 365 | unsigned int eventq_magic; |
373 | 366 | ||
374 | struct net_lro_mgr lro_mgr; | ||
375 | int rx_alloc_level; | 367 | int rx_alloc_level; |
376 | int rx_alloc_push_pages; | 368 | int rx_alloc_push_pages; |
377 | int rx_alloc_pop_pages; | ||
378 | 369 | ||
379 | unsigned n_rx_tobe_disc; | 370 | unsigned n_rx_tobe_disc; |
380 | unsigned n_rx_ip_frag_err; | 371 | unsigned n_rx_ip_frag_err; |
diff --git a/drivers/net/sfc/rx.c b/drivers/net/sfc/rx.c index b8ba4bbad889..66d7fe3db3e6 100644 --- a/drivers/net/sfc/rx.c +++ b/drivers/net/sfc/rx.c | |||
@@ -99,109 +99,6 @@ static inline unsigned int efx_rx_buf_size(struct efx_nic *efx) | |||
99 | } | 99 | } |
100 | 100 | ||
101 | 101 | ||
102 | /************************************************************************** | ||
103 | * | ||
104 | * Linux generic LRO handling | ||
105 | * | ||
106 | ************************************************************************** | ||
107 | */ | ||
108 | |||
109 | static int efx_lro_get_skb_hdr(struct sk_buff *skb, void **ip_hdr, | ||
110 | void **tcpudp_hdr, u64 *hdr_flags, void *priv) | ||
111 | { | ||
112 | struct efx_channel *channel = priv; | ||
113 | struct iphdr *iph; | ||
114 | struct tcphdr *th; | ||
115 | |||
116 | iph = (struct iphdr *)skb->data; | ||
117 | if (skb->protocol != htons(ETH_P_IP) || iph->protocol != IPPROTO_TCP) | ||
118 | goto fail; | ||
119 | |||
120 | th = (struct tcphdr *)(skb->data + iph->ihl * 4); | ||
121 | |||
122 | *tcpudp_hdr = th; | ||
123 | *ip_hdr = iph; | ||
124 | *hdr_flags = LRO_IPV4 | LRO_TCP; | ||
125 | |||
126 | channel->rx_alloc_level += RX_ALLOC_FACTOR_LRO; | ||
127 | return 0; | ||
128 | fail: | ||
129 | channel->rx_alloc_level += RX_ALLOC_FACTOR_SKB; | ||
130 | return -1; | ||
131 | } | ||
132 | |||
133 | static int efx_get_frag_hdr(struct skb_frag_struct *frag, void **mac_hdr, | ||
134 | void **ip_hdr, void **tcpudp_hdr, u64 *hdr_flags, | ||
135 | void *priv) | ||
136 | { | ||
137 | struct efx_channel *channel = priv; | ||
138 | struct ethhdr *eh; | ||
139 | struct iphdr *iph; | ||
140 | |||
141 | /* We support EtherII and VLAN encapsulated IPv4 */ | ||
142 | eh = page_address(frag->page) + frag->page_offset; | ||
143 | *mac_hdr = eh; | ||
144 | |||
145 | if (eh->h_proto == htons(ETH_P_IP)) { | ||
146 | iph = (struct iphdr *)(eh + 1); | ||
147 | } else { | ||
148 | struct vlan_ethhdr *veh = (struct vlan_ethhdr *)eh; | ||
149 | if (veh->h_vlan_encapsulated_proto != htons(ETH_P_IP)) | ||
150 | goto fail; | ||
151 | |||
152 | iph = (struct iphdr *)(veh + 1); | ||
153 | } | ||
154 | *ip_hdr = iph; | ||
155 | |||
156 | /* We can only do LRO over TCP */ | ||
157 | if (iph->protocol != IPPROTO_TCP) | ||
158 | goto fail; | ||
159 | |||
160 | *hdr_flags = LRO_IPV4 | LRO_TCP; | ||
161 | *tcpudp_hdr = (struct tcphdr *)((u8 *) iph + iph->ihl * 4); | ||
162 | |||
163 | channel->rx_alloc_level += RX_ALLOC_FACTOR_LRO; | ||
164 | return 0; | ||
165 | fail: | ||
166 | channel->rx_alloc_level += RX_ALLOC_FACTOR_SKB; | ||
167 | return -1; | ||
168 | } | ||
169 | |||
170 | int efx_lro_init(struct net_lro_mgr *lro_mgr, struct efx_nic *efx) | ||
171 | { | ||
172 | size_t s = sizeof(struct net_lro_desc) * EFX_MAX_LRO_DESCRIPTORS; | ||
173 | struct net_lro_desc *lro_arr; | ||
174 | |||
175 | /* Allocate the LRO descriptors structure */ | ||
176 | lro_arr = kzalloc(s, GFP_KERNEL); | ||
177 | if (lro_arr == NULL) | ||
178 | return -ENOMEM; | ||
179 | |||
180 | lro_mgr->lro_arr = lro_arr; | ||
181 | lro_mgr->max_desc = EFX_MAX_LRO_DESCRIPTORS; | ||
182 | lro_mgr->max_aggr = EFX_MAX_LRO_AGGR; | ||
183 | lro_mgr->frag_align_pad = EFX_PAGE_SKB_ALIGN; | ||
184 | |||
185 | lro_mgr->get_skb_header = efx_lro_get_skb_hdr; | ||
186 | lro_mgr->get_frag_header = efx_get_frag_hdr; | ||
187 | lro_mgr->dev = efx->net_dev; | ||
188 | |||
189 | lro_mgr->features = LRO_F_NAPI; | ||
190 | |||
191 | /* We can pass packets up with the checksum intact */ | ||
192 | lro_mgr->ip_summed = CHECKSUM_UNNECESSARY; | ||
193 | |||
194 | lro_mgr->ip_summed_aggr = CHECKSUM_UNNECESSARY; | ||
195 | |||
196 | return 0; | ||
197 | } | ||
198 | |||
199 | void efx_lro_fini(struct net_lro_mgr *lro_mgr) | ||
200 | { | ||
201 | kfree(lro_mgr->lro_arr); | ||
202 | lro_mgr->lro_arr = NULL; | ||
203 | } | ||
204 | |||
205 | /** | 102 | /** |
206 | * efx_init_rx_buffer_skb - create new RX buffer using skb-based allocation | 103 | * efx_init_rx_buffer_skb - create new RX buffer using skb-based allocation |
207 | * | 104 | * |
@@ -549,77 +446,31 @@ static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue, | |||
549 | static void efx_rx_packet_lro(struct efx_channel *channel, | 446 | static void efx_rx_packet_lro(struct efx_channel *channel, |
550 | struct efx_rx_buffer *rx_buf) | 447 | struct efx_rx_buffer *rx_buf) |
551 | { | 448 | { |
552 | struct net_lro_mgr *lro_mgr = &channel->lro_mgr; | 449 | struct napi_struct *napi = &channel->napi_str; |
553 | void *priv = channel; | ||
554 | 450 | ||
555 | /* Pass the skb/page into the LRO engine */ | 451 | /* Pass the skb/page into the LRO engine */ |
556 | if (rx_buf->page) { | 452 | if (rx_buf->page) { |
557 | struct skb_frag_struct frags; | 453 | struct napi_gro_fraginfo info; |
558 | 454 | ||
559 | frags.page = rx_buf->page; | 455 | info.frags[0].page = rx_buf->page; |
560 | frags.page_offset = efx_rx_buf_offset(rx_buf); | 456 | info.frags[0].page_offset = efx_rx_buf_offset(rx_buf); |
561 | frags.size = rx_buf->len; | 457 | info.frags[0].size = rx_buf->len; |
458 | info.nr_frags = 1; | ||
459 | info.ip_summed = CHECKSUM_UNNECESSARY; | ||
460 | info.len = rx_buf->len; | ||
562 | 461 | ||
563 | lro_receive_frags(lro_mgr, &frags, rx_buf->len, | 462 | napi_gro_frags(napi, &info); |
564 | rx_buf->len, priv, 0); | ||
565 | 463 | ||
566 | EFX_BUG_ON_PARANOID(rx_buf->skb); | 464 | EFX_BUG_ON_PARANOID(rx_buf->skb); |
567 | rx_buf->page = NULL; | 465 | rx_buf->page = NULL; |
568 | } else { | 466 | } else { |
569 | EFX_BUG_ON_PARANOID(!rx_buf->skb); | 467 | EFX_BUG_ON_PARANOID(!rx_buf->skb); |
570 | 468 | ||
571 | lro_receive_skb(lro_mgr, rx_buf->skb, priv); | 469 | napi_gro_receive(napi, rx_buf->skb); |
572 | rx_buf->skb = NULL; | 470 | rx_buf->skb = NULL; |
573 | } | 471 | } |
574 | } | 472 | } |
575 | 473 | ||
576 | /* Allocate and construct an SKB around a struct page.*/ | ||
577 | static struct sk_buff *efx_rx_mk_skb(struct efx_rx_buffer *rx_buf, | ||
578 | struct efx_nic *efx, | ||
579 | int hdr_len) | ||
580 | { | ||
581 | struct sk_buff *skb; | ||
582 | |||
583 | /* Allocate an SKB to store the headers */ | ||
584 | skb = netdev_alloc_skb(efx->net_dev, hdr_len + EFX_PAGE_SKB_ALIGN); | ||
585 | if (unlikely(skb == NULL)) { | ||
586 | EFX_ERR_RL(efx, "RX out of memory for skb\n"); | ||
587 | return NULL; | ||
588 | } | ||
589 | |||
590 | EFX_BUG_ON_PARANOID(skb_shinfo(skb)->nr_frags); | ||
591 | EFX_BUG_ON_PARANOID(rx_buf->len < hdr_len); | ||
592 | |||
593 | skb->ip_summed = CHECKSUM_UNNECESSARY; | ||
594 | skb_reserve(skb, EFX_PAGE_SKB_ALIGN); | ||
595 | |||
596 | skb->len = rx_buf->len; | ||
597 | skb->truesize = rx_buf->len + sizeof(struct sk_buff); | ||
598 | memcpy(skb->data, rx_buf->data, hdr_len); | ||
599 | skb->tail += hdr_len; | ||
600 | |||
601 | /* Append the remaining page onto the frag list */ | ||
602 | if (unlikely(rx_buf->len > hdr_len)) { | ||
603 | struct skb_frag_struct *frag = skb_shinfo(skb)->frags; | ||
604 | frag->page = rx_buf->page; | ||
605 | frag->page_offset = efx_rx_buf_offset(rx_buf) + hdr_len; | ||
606 | frag->size = skb->len - hdr_len; | ||
607 | skb_shinfo(skb)->nr_frags = 1; | ||
608 | skb->data_len = frag->size; | ||
609 | } else { | ||
610 | __free_pages(rx_buf->page, efx->rx_buffer_order); | ||
611 | skb->data_len = 0; | ||
612 | } | ||
613 | |||
614 | /* Ownership has transferred from the rx_buf to skb */ | ||
615 | rx_buf->page = NULL; | ||
616 | |||
617 | /* Move past the ethernet header */ | ||
618 | skb->protocol = eth_type_trans(skb, efx->net_dev); | ||
619 | |||
620 | return skb; | ||
621 | } | ||
622 | |||
623 | void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index, | 474 | void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index, |
624 | unsigned int len, bool checksummed, bool discard) | 475 | unsigned int len, bool checksummed, bool discard) |
625 | { | 476 | { |
@@ -687,7 +538,6 @@ void __efx_rx_packet(struct efx_channel *channel, | |||
687 | { | 538 | { |
688 | struct efx_nic *efx = channel->efx; | 539 | struct efx_nic *efx = channel->efx; |
689 | struct sk_buff *skb; | 540 | struct sk_buff *skb; |
690 | bool lro = !!(efx->net_dev->features & NETIF_F_LRO); | ||
691 | 541 | ||
692 | /* If we're in loopback test, then pass the packet directly to the | 542 | /* If we're in loopback test, then pass the packet directly to the |
693 | * loopback layer, and free the rx_buf here | 543 | * loopback layer, and free the rx_buf here |
@@ -709,41 +559,23 @@ void __efx_rx_packet(struct efx_channel *channel, | |||
709 | efx->net_dev); | 559 | efx->net_dev); |
710 | } | 560 | } |
711 | 561 | ||
712 | /* Both our generic-LRO and SFC-SSR support skb and page based | 562 | if (likely(checksummed || rx_buf->page)) { |
713 | * allocation, but neither support switching from one to the | ||
714 | * other on the fly. If we spot that the allocation mode has | ||
715 | * changed, then flush the LRO state. | ||
716 | */ | ||
717 | if (unlikely(channel->rx_alloc_pop_pages != (rx_buf->page != NULL))) { | ||
718 | efx_flush_lro(channel); | ||
719 | channel->rx_alloc_pop_pages = (rx_buf->page != NULL); | ||
720 | } | ||
721 | if (likely(checksummed && lro)) { | ||
722 | efx_rx_packet_lro(channel, rx_buf); | 563 | efx_rx_packet_lro(channel, rx_buf); |
723 | goto done; | 564 | goto done; |
724 | } | 565 | } |
725 | 566 | ||
726 | /* Form an skb if required */ | 567 | /* We now own the SKB */ |
727 | if (rx_buf->page) { | 568 | skb = rx_buf->skb; |
728 | int hdr_len = min(rx_buf->len, EFX_SKB_HEADERS); | 569 | rx_buf->skb = NULL; |
729 | skb = efx_rx_mk_skb(rx_buf, efx, hdr_len); | ||
730 | if (unlikely(skb == NULL)) { | ||
731 | efx_free_rx_buffer(efx, rx_buf); | ||
732 | goto done; | ||
733 | } | ||
734 | } else { | ||
735 | /* We now own the SKB */ | ||
736 | skb = rx_buf->skb; | ||
737 | rx_buf->skb = NULL; | ||
738 | } | ||
739 | 570 | ||
740 | EFX_BUG_ON_PARANOID(rx_buf->page); | 571 | EFX_BUG_ON_PARANOID(rx_buf->page); |
741 | EFX_BUG_ON_PARANOID(rx_buf->skb); | 572 | EFX_BUG_ON_PARANOID(rx_buf->skb); |
742 | EFX_BUG_ON_PARANOID(!skb); | 573 | EFX_BUG_ON_PARANOID(!skb); |
743 | 574 | ||
744 | /* Set the SKB flags */ | 575 | /* Set the SKB flags */ |
745 | if (unlikely(!checksummed || !efx->rx_checksum_enabled)) | 576 | skb->ip_summed = CHECKSUM_NONE; |
746 | skb->ip_summed = CHECKSUM_NONE; | 577 | |
578 | skb_record_rx_queue(skb, channel->channel); | ||
747 | 579 | ||
748 | /* Pass the packet up */ | 580 | /* Pass the packet up */ |
749 | netif_receive_skb(skb); | 581 | netif_receive_skb(skb); |
@@ -760,7 +592,7 @@ void efx_rx_strategy(struct efx_channel *channel) | |||
760 | enum efx_rx_alloc_method method = rx_alloc_method; | 592 | enum efx_rx_alloc_method method = rx_alloc_method; |
761 | 593 | ||
762 | /* Only makes sense to use page based allocation if LRO is enabled */ | 594 | /* Only makes sense to use page based allocation if LRO is enabled */ |
763 | if (!(channel->efx->net_dev->features & NETIF_F_LRO)) { | 595 | if (!(channel->efx->net_dev->features & NETIF_F_GRO)) { |
764 | method = RX_ALLOC_METHOD_SKB; | 596 | method = RX_ALLOC_METHOD_SKB; |
765 | } else if (method == RX_ALLOC_METHOD_AUTO) { | 597 | } else if (method == RX_ALLOC_METHOD_AUTO) { |
766 | /* Constrain the rx_alloc_level */ | 598 | /* Constrain the rx_alloc_level */ |
@@ -865,11 +697,6 @@ void efx_remove_rx_queue(struct efx_rx_queue *rx_queue) | |||
865 | rx_queue->buffer = NULL; | 697 | rx_queue->buffer = NULL; |
866 | } | 698 | } |
867 | 699 | ||
868 | void efx_flush_lro(struct efx_channel *channel) | ||
869 | { | ||
870 | lro_flush_all(&channel->lro_mgr); | ||
871 | } | ||
872 | |||
873 | 700 | ||
874 | module_param(rx_alloc_method, int, 0644); | 701 | module_param(rx_alloc_method, int, 0644); |
875 | MODULE_PARM_DESC(rx_alloc_method, "Allocation method used for RX buffers"); | 702 | MODULE_PARM_DESC(rx_alloc_method, "Allocation method used for RX buffers"); |
diff --git a/drivers/net/sfc/rx.h b/drivers/net/sfc/rx.h index 0e88a9ddc1c6..42ee7555a80b 100644 --- a/drivers/net/sfc/rx.h +++ b/drivers/net/sfc/rx.h | |||
@@ -17,9 +17,6 @@ void efx_remove_rx_queue(struct efx_rx_queue *rx_queue); | |||
17 | void efx_init_rx_queue(struct efx_rx_queue *rx_queue); | 17 | void efx_init_rx_queue(struct efx_rx_queue *rx_queue); |
18 | void efx_fini_rx_queue(struct efx_rx_queue *rx_queue); | 18 | void efx_fini_rx_queue(struct efx_rx_queue *rx_queue); |
19 | 19 | ||
20 | int efx_lro_init(struct net_lro_mgr *lro_mgr, struct efx_nic *efx); | ||
21 | void efx_lro_fini(struct net_lro_mgr *lro_mgr); | ||
22 | void efx_flush_lro(struct efx_channel *channel); | ||
23 | void efx_rx_strategy(struct efx_channel *channel); | 20 | void efx_rx_strategy(struct efx_channel *channel); |
24 | void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue); | 21 | void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue); |
25 | void efx_rx_work(struct work_struct *data); | 22 | void efx_rx_work(struct work_struct *data); |
diff --git a/drivers/net/sfc/sfe4001.c b/drivers/net/sfc/sfe4001.c index cb25ae5b257a..c0e906831623 100644 --- a/drivers/net/sfc/sfe4001.c +++ b/drivers/net/sfc/sfe4001.c | |||
@@ -24,6 +24,7 @@ | |||
24 | */ | 24 | */ |
25 | 25 | ||
26 | #include <linux/delay.h> | 26 | #include <linux/delay.h> |
27 | #include <linux/rtnetlink.h> | ||
27 | #include "net_driver.h" | 28 | #include "net_driver.h" |
28 | #include "efx.h" | 29 | #include "efx.h" |
29 | #include "phy.h" | 30 | #include "phy.h" |
diff --git a/drivers/net/sfc/tenxpress.c b/drivers/net/sfc/tenxpress.c index f0efd246962c..ac9eeab79f20 100644 --- a/drivers/net/sfc/tenxpress.c +++ b/drivers/net/sfc/tenxpress.c | |||
@@ -8,6 +8,7 @@ | |||
8 | */ | 8 | */ |
9 | 9 | ||
10 | #include <linux/delay.h> | 10 | #include <linux/delay.h> |
11 | #include <linux/rtnetlink.h> | ||
11 | #include <linux/seq_file.h> | 12 | #include <linux/seq_file.h> |
12 | #include "efx.h" | 13 | #include "efx.h" |
13 | #include "mdio_10g.h" | 14 | #include "mdio_10g.h" |
diff --git a/drivers/net/skge.c b/drivers/net/skge.c index c9dbb06f8c94..952d37ffee51 100644 --- a/drivers/net/skge.c +++ b/drivers/net/skge.c | |||
@@ -3214,7 +3214,7 @@ static int skge_poll(struct napi_struct *napi, int to_do) | |||
3214 | unsigned long flags; | 3214 | unsigned long flags; |
3215 | 3215 | ||
3216 | spin_lock_irqsave(&hw->hw_lock, flags); | 3216 | spin_lock_irqsave(&hw->hw_lock, flags); |
3217 | __netif_rx_complete(napi); | 3217 | __napi_complete(napi); |
3218 | hw->intr_mask |= napimask[skge->port]; | 3218 | hw->intr_mask |= napimask[skge->port]; |
3219 | skge_write32(hw, B0_IMSK, hw->intr_mask); | 3219 | skge_write32(hw, B0_IMSK, hw->intr_mask); |
3220 | skge_read32(hw, B0_IMSK); | 3220 | skge_read32(hw, B0_IMSK); |
@@ -3377,7 +3377,7 @@ static irqreturn_t skge_intr(int irq, void *dev_id) | |||
3377 | if (status & (IS_XA1_F|IS_R1_F)) { | 3377 | if (status & (IS_XA1_F|IS_R1_F)) { |
3378 | struct skge_port *skge = netdev_priv(hw->dev[0]); | 3378 | struct skge_port *skge = netdev_priv(hw->dev[0]); |
3379 | hw->intr_mask &= ~(IS_XA1_F|IS_R1_F); | 3379 | hw->intr_mask &= ~(IS_XA1_F|IS_R1_F); |
3380 | netif_rx_schedule(&skge->napi); | 3380 | napi_schedule(&skge->napi); |
3381 | } | 3381 | } |
3382 | 3382 | ||
3383 | if (status & IS_PA_TO_TX1) | 3383 | if (status & IS_PA_TO_TX1) |
@@ -3397,7 +3397,7 @@ static irqreturn_t skge_intr(int irq, void *dev_id) | |||
3397 | 3397 | ||
3398 | if (status & (IS_XA2_F|IS_R2_F)) { | 3398 | if (status & (IS_XA2_F|IS_R2_F)) { |
3399 | hw->intr_mask &= ~(IS_XA2_F|IS_R2_F); | 3399 | hw->intr_mask &= ~(IS_XA2_F|IS_R2_F); |
3400 | netif_rx_schedule(&skge->napi); | 3400 | napi_schedule(&skge->napi); |
3401 | } | 3401 | } |
3402 | 3402 | ||
3403 | if (status & IS_PA_TO_RX2) { | 3403 | if (status & IS_PA_TO_RX2) { |
diff --git a/drivers/net/smc911x.c b/drivers/net/smc911x.c index 223cde0d43be..293610334a77 100644 --- a/drivers/net/smc911x.c +++ b/drivers/net/smc911x.c | |||
@@ -1545,7 +1545,7 @@ smc911x_ethtool_getdrvinfo(struct net_device *dev, struct ethtool_drvinfo *info) | |||
1545 | { | 1545 | { |
1546 | strncpy(info->driver, CARDNAME, sizeof(info->driver)); | 1546 | strncpy(info->driver, CARDNAME, sizeof(info->driver)); |
1547 | strncpy(info->version, version, sizeof(info->version)); | 1547 | strncpy(info->version, version, sizeof(info->version)); |
1548 | strncpy(info->bus_info, dev->dev.parent->bus_id, sizeof(info->bus_info)); | 1548 | strncpy(info->bus_info, dev_name(dev->dev.parent), sizeof(info->bus_info)); |
1549 | } | 1549 | } |
1550 | 1550 | ||
1551 | static int smc911x_ethtool_nwayreset(struct net_device *dev) | 1551 | static int smc911x_ethtool_nwayreset(struct net_device *dev) |
diff --git a/drivers/net/smc91x.c b/drivers/net/smc91x.c index b215a8d85e62..fdcbaf8dfa73 100644 --- a/drivers/net/smc91x.c +++ b/drivers/net/smc91x.c | |||
@@ -1614,7 +1614,7 @@ smc_ethtool_getdrvinfo(struct net_device *dev, struct ethtool_drvinfo *info) | |||
1614 | { | 1614 | { |
1615 | strncpy(info->driver, CARDNAME, sizeof(info->driver)); | 1615 | strncpy(info->driver, CARDNAME, sizeof(info->driver)); |
1616 | strncpy(info->version, version, sizeof(info->version)); | 1616 | strncpy(info->version, version, sizeof(info->version)); |
1617 | strncpy(info->bus_info, dev->dev.parent->bus_id, sizeof(info->bus_info)); | 1617 | strncpy(info->bus_info, dev_name(dev->dev.parent), sizeof(info->bus_info)); |
1618 | } | 1618 | } |
1619 | 1619 | ||
1620 | static int smc_ethtool_nwayreset(struct net_device *dev) | 1620 | static int smc_ethtool_nwayreset(struct net_device *dev) |
@@ -1643,6 +1643,117 @@ static void smc_ethtool_setmsglevel(struct net_device *dev, u32 level) | |||
1643 | lp->msg_enable = level; | 1643 | lp->msg_enable = level; |
1644 | } | 1644 | } |
1645 | 1645 | ||
1646 | static int smc_write_eeprom_word(struct net_device *dev, u16 addr, u16 word) | ||
1647 | { | ||
1648 | u16 ctl; | ||
1649 | struct smc_local *lp = netdev_priv(dev); | ||
1650 | void __iomem *ioaddr = lp->base; | ||
1651 | |||
1652 | spin_lock_irq(&lp->lock); | ||
1653 | /* load word into GP register */ | ||
1654 | SMC_SELECT_BANK(lp, 1); | ||
1655 | SMC_SET_GP(lp, word); | ||
1656 | /* set the address to put the data in EEPROM */ | ||
1657 | SMC_SELECT_BANK(lp, 2); | ||
1658 | SMC_SET_PTR(lp, addr); | ||
1659 | /* tell it to write */ | ||
1660 | SMC_SELECT_BANK(lp, 1); | ||
1661 | ctl = SMC_GET_CTL(lp); | ||
1662 | SMC_SET_CTL(lp, ctl | (CTL_EEPROM_SELECT | CTL_STORE)); | ||
1663 | /* wait for it to finish */ | ||
1664 | do { | ||
1665 | udelay(1); | ||
1666 | } while (SMC_GET_CTL(lp) & CTL_STORE); | ||
1667 | /* clean up */ | ||
1668 | SMC_SET_CTL(lp, ctl); | ||
1669 | SMC_SELECT_BANK(lp, 2); | ||
1670 | spin_unlock_irq(&lp->lock); | ||
1671 | return 0; | ||
1672 | } | ||
1673 | |||
1674 | static int smc_read_eeprom_word(struct net_device *dev, u16 addr, u16 *word) | ||
1675 | { | ||
1676 | u16 ctl; | ||
1677 | struct smc_local *lp = netdev_priv(dev); | ||
1678 | void __iomem *ioaddr = lp->base; | ||
1679 | |||
1680 | spin_lock_irq(&lp->lock); | ||
1681 | /* set the EEPROM address to get the data from */ | ||
1682 | SMC_SELECT_BANK(lp, 2); | ||
1683 | SMC_SET_PTR(lp, addr | PTR_READ); | ||
1684 | /* tell it to load */ | ||
1685 | SMC_SELECT_BANK(lp, 1); | ||
1686 | SMC_SET_GP(lp, 0xffff); /* init to known */ | ||
1687 | ctl = SMC_GET_CTL(lp); | ||
1688 | SMC_SET_CTL(lp, ctl | (CTL_EEPROM_SELECT | CTL_RELOAD)); | ||
1689 | /* wait for it to finish */ | ||
1690 | do { | ||
1691 | udelay(1); | ||
1692 | } while (SMC_GET_CTL(lp) & CTL_RELOAD); | ||
1693 | /* read word from GP register */ | ||
1694 | *word = SMC_GET_GP(lp); | ||
1695 | /* clean up */ | ||
1696 | SMC_SET_CTL(lp, ctl); | ||
1697 | SMC_SELECT_BANK(lp, 2); | ||
1698 | spin_unlock_irq(&lp->lock); | ||
1699 | return 0; | ||
1700 | } | ||
1701 | |||
1702 | static int smc_ethtool_geteeprom_len(struct net_device *dev) | ||
1703 | { | ||
1704 | return 0x23 * 2; | ||
1705 | } | ||
1706 | |||
1707 | static int smc_ethtool_geteeprom(struct net_device *dev, | ||
1708 | struct ethtool_eeprom *eeprom, u8 *data) | ||
1709 | { | ||
1710 | int i; | ||
1711 | int imax; | ||
1712 | |||
1713 | DBG(1, "Reading %d bytes at %d(0x%x)\n", | ||
1714 | eeprom->len, eeprom->offset, eeprom->offset); | ||
1715 | imax = smc_ethtool_geteeprom_len(dev); | ||
1716 | for (i = 0; i < eeprom->len; i += 2) { | ||
1717 | int ret; | ||
1718 | u16 wbuf; | ||
1719 | int offset = i + eeprom->offset; | ||
1720 | if (offset > imax) | ||
1721 | break; | ||
1722 | ret = smc_read_eeprom_word(dev, offset >> 1, &wbuf); | ||
1723 | if (ret != 0) | ||
1724 | return ret; | ||
1725 | DBG(2, "Read 0x%x from 0x%x\n", wbuf, offset >> 1); | ||
1726 | data[i] = (wbuf >> 8) & 0xff; | ||
1727 | data[i+1] = wbuf & 0xff; | ||
1728 | } | ||
1729 | return 0; | ||
1730 | } | ||
1731 | |||
1732 | static int smc_ethtool_seteeprom(struct net_device *dev, | ||
1733 | struct ethtool_eeprom *eeprom, u8 *data) | ||
1734 | { | ||
1735 | int i; | ||
1736 | int imax; | ||
1737 | |||
1738 | DBG(1, "Writing %d bytes to %d(0x%x)\n", | ||
1739 | eeprom->len, eeprom->offset, eeprom->offset); | ||
1740 | imax = smc_ethtool_geteeprom_len(dev); | ||
1741 | for (i = 0; i < eeprom->len; i += 2) { | ||
1742 | int ret; | ||
1743 | u16 wbuf; | ||
1744 | int offset = i + eeprom->offset; | ||
1745 | if (offset > imax) | ||
1746 | break; | ||
1747 | wbuf = (data[i] << 8) | data[i + 1]; | ||
1748 | DBG(2, "Writing 0x%x to 0x%x\n", wbuf, offset >> 1); | ||
1749 | ret = smc_write_eeprom_word(dev, offset >> 1, wbuf); | ||
1750 | if (ret != 0) | ||
1751 | return ret; | ||
1752 | } | ||
1753 | return 0; | ||
1754 | } | ||
1755 | |||
1756 | |||
1646 | static const struct ethtool_ops smc_ethtool_ops = { | 1757 | static const struct ethtool_ops smc_ethtool_ops = { |
1647 | .get_settings = smc_ethtool_getsettings, | 1758 | .get_settings = smc_ethtool_getsettings, |
1648 | .set_settings = smc_ethtool_setsettings, | 1759 | .set_settings = smc_ethtool_setsettings, |
@@ -1652,8 +1763,22 @@ static const struct ethtool_ops smc_ethtool_ops = { | |||
1652 | .set_msglevel = smc_ethtool_setmsglevel, | 1763 | .set_msglevel = smc_ethtool_setmsglevel, |
1653 | .nway_reset = smc_ethtool_nwayreset, | 1764 | .nway_reset = smc_ethtool_nwayreset, |
1654 | .get_link = ethtool_op_get_link, | 1765 | .get_link = ethtool_op_get_link, |
1655 | // .get_eeprom = smc_ethtool_geteeprom, | 1766 | .get_eeprom_len = smc_ethtool_geteeprom_len, |
1656 | // .set_eeprom = smc_ethtool_seteeprom, | 1767 | .get_eeprom = smc_ethtool_geteeprom, |
1768 | .set_eeprom = smc_ethtool_seteeprom, | ||
1769 | }; | ||
1770 | |||
1771 | static const struct net_device_ops smc_netdev_ops = { | ||
1772 | .ndo_open = smc_open, | ||
1773 | .ndo_stop = smc_close, | ||
1774 | .ndo_start_xmit = smc_hard_start_xmit, | ||
1775 | .ndo_tx_timeout = smc_timeout, | ||
1776 | .ndo_set_multicast_list = smc_set_multicast_list, | ||
1777 | .ndo_validate_addr = eth_validate_addr, | ||
1778 | .ndo_set_mac_address = eth_mac_addr, | ||
1779 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
1780 | .ndo_poll_controller = smc_poll_controller, | ||
1781 | #endif | ||
1657 | }; | 1782 | }; |
1658 | 1783 | ||
1659 | /* | 1784 | /* |
@@ -1865,16 +1990,9 @@ static int __devinit smc_probe(struct net_device *dev, void __iomem *ioaddr, | |||
1865 | /* Fill in the fields of the device structure with ethernet values. */ | 1990 | /* Fill in the fields of the device structure with ethernet values. */ |
1866 | ether_setup(dev); | 1991 | ether_setup(dev); |
1867 | 1992 | ||
1868 | dev->open = smc_open; | ||
1869 | dev->stop = smc_close; | ||
1870 | dev->hard_start_xmit = smc_hard_start_xmit; | ||
1871 | dev->tx_timeout = smc_timeout; | ||
1872 | dev->watchdog_timeo = msecs_to_jiffies(watchdog); | 1993 | dev->watchdog_timeo = msecs_to_jiffies(watchdog); |
1873 | dev->set_multicast_list = smc_set_multicast_list; | 1994 | dev->netdev_ops = &smc_netdev_ops; |
1874 | dev->ethtool_ops = &smc_ethtool_ops; | 1995 | dev->ethtool_ops = &smc_ethtool_ops; |
1875 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
1876 | dev->poll_controller = smc_poll_controller; | ||
1877 | #endif | ||
1878 | 1996 | ||
1879 | tasklet_init(&lp->tx_task, smc_hardware_send_pkt, (unsigned long)dev); | 1997 | tasklet_init(&lp->tx_task, smc_hardware_send_pkt, (unsigned long)dev); |
1880 | INIT_WORK(&lp->phy_configure, smc_phy_configure); | 1998 | INIT_WORK(&lp->phy_configure, smc_phy_configure); |
diff --git a/drivers/net/smc91x.h b/drivers/net/smc91x.h index c4ccd121bc9c..ed9ae43523a1 100644 --- a/drivers/net/smc91x.h +++ b/drivers/net/smc91x.h | |||
@@ -1141,6 +1141,16 @@ static const char * chip_ids[ 16 ] = { | |||
1141 | 1141 | ||
1142 | #define SMC_GET_MII(lp) SMC_inw(ioaddr, MII_REG(lp)) | 1142 | #define SMC_GET_MII(lp) SMC_inw(ioaddr, MII_REG(lp)) |
1143 | 1143 | ||
1144 | #define SMC_GET_GP(lp) SMC_inw(ioaddr, GP_REG(lp)) | ||
1145 | |||
1146 | #define SMC_SET_GP(lp, x) \ | ||
1147 | do { \ | ||
1148 | if (SMC_MUST_ALIGN_WRITE(lp)) \ | ||
1149 | SMC_outl((x)<<16, ioaddr, SMC_REG(lp, 8, 1)); \ | ||
1150 | else \ | ||
1151 | SMC_outw(x, ioaddr, GP_REG(lp)); \ | ||
1152 | } while (0) | ||
1153 | |||
1144 | #define SMC_SET_MII(lp, x) SMC_outw(x, ioaddr, MII_REG(lp)) | 1154 | #define SMC_SET_MII(lp, x) SMC_outw(x, ioaddr, MII_REG(lp)) |
1145 | 1155 | ||
1146 | #define SMC_GET_MIR(lp) SMC_inw(ioaddr, MIR_REG(lp)) | 1156 | #define SMC_GET_MIR(lp) SMC_inw(ioaddr, MIR_REG(lp)) |
diff --git a/drivers/net/smsc911x.c b/drivers/net/smsc911x.c index 783c1a7b869e..6e175e5555a1 100644 --- a/drivers/net/smsc911x.c +++ b/drivers/net/smsc911x.c | |||
@@ -368,48 +368,53 @@ out: | |||
368 | return reg; | 368 | return reg; |
369 | } | 369 | } |
370 | 370 | ||
371 | /* Autodetects and initialises external phy for SMSC9115 and SMSC9117 flavors. | 371 | /* Switch to external phy. Assumes tx and rx are stopped. */ |
372 | * If something goes wrong, returns -ENODEV to revert back to internal phy. | 372 | static void smsc911x_phy_enable_external(struct smsc911x_data *pdata) |
373 | * Performed at initialisation only, so interrupts are enabled */ | ||
374 | static int smsc911x_phy_initialise_external(struct smsc911x_data *pdata) | ||
375 | { | 373 | { |
376 | unsigned int hwcfg = smsc911x_reg_read(pdata, HW_CFG); | 374 | unsigned int hwcfg = smsc911x_reg_read(pdata, HW_CFG); |
377 | 375 | ||
378 | /* External phy is requested, supported, and detected */ | 376 | /* Disable phy clocks to the MAC */ |
379 | if (hwcfg & HW_CFG_EXT_PHY_DET_) { | 377 | hwcfg &= (~HW_CFG_PHY_CLK_SEL_); |
378 | hwcfg |= HW_CFG_PHY_CLK_SEL_CLK_DIS_; | ||
379 | smsc911x_reg_write(pdata, HW_CFG, hwcfg); | ||
380 | udelay(10); /* Enough time for clocks to stop */ | ||
380 | 381 | ||
381 | /* Switch to external phy. Assuming tx and rx are stopped | 382 | /* Switch to external phy */ |
382 | * because smsc911x_phy_initialise is called before | 383 | hwcfg |= HW_CFG_EXT_PHY_EN_; |
383 | * smsc911x_rx_initialise and tx_initialise. */ | 384 | smsc911x_reg_write(pdata, HW_CFG, hwcfg); |
384 | 385 | ||
385 | /* Disable phy clocks to the MAC */ | 386 | /* Enable phy clocks to the MAC */ |
386 | hwcfg &= (~HW_CFG_PHY_CLK_SEL_); | 387 | hwcfg &= (~HW_CFG_PHY_CLK_SEL_); |
387 | hwcfg |= HW_CFG_PHY_CLK_SEL_CLK_DIS_; | 388 | hwcfg |= HW_CFG_PHY_CLK_SEL_EXT_PHY_; |
388 | smsc911x_reg_write(pdata, HW_CFG, hwcfg); | 389 | smsc911x_reg_write(pdata, HW_CFG, hwcfg); |
389 | udelay(10); /* Enough time for clocks to stop */ | 390 | udelay(10); /* Enough time for clocks to restart */ |
390 | 391 | ||
391 | /* Switch to external phy */ | 392 | hwcfg |= HW_CFG_SMI_SEL_; |
392 | hwcfg |= HW_CFG_EXT_PHY_EN_; | 393 | smsc911x_reg_write(pdata, HW_CFG, hwcfg); |
393 | smsc911x_reg_write(pdata, HW_CFG, hwcfg); | 394 | } |
394 | |||
395 | /* Enable phy clocks to the MAC */ | ||
396 | hwcfg &= (~HW_CFG_PHY_CLK_SEL_); | ||
397 | hwcfg |= HW_CFG_PHY_CLK_SEL_EXT_PHY_; | ||
398 | smsc911x_reg_write(pdata, HW_CFG, hwcfg); | ||
399 | udelay(10); /* Enough time for clocks to restart */ | ||
400 | 395 | ||
401 | hwcfg |= HW_CFG_SMI_SEL_; | 396 | /* Autodetects and enables external phy if present on supported chips. |
402 | smsc911x_reg_write(pdata, HW_CFG, hwcfg); | 397 | * autodetection can be overridden by specifying SMSC911X_FORCE_INTERNAL_PHY |
398 | * or SMSC911X_FORCE_EXTERNAL_PHY in the platform_data flags. */ | ||
399 | static void smsc911x_phy_initialise_external(struct smsc911x_data *pdata) | ||
400 | { | ||
401 | unsigned int hwcfg = smsc911x_reg_read(pdata, HW_CFG); | ||
403 | 402 | ||
404 | SMSC_TRACE(HW, "Successfully switched to external PHY"); | 403 | if (pdata->config.flags & SMSC911X_FORCE_INTERNAL_PHY) { |
404 | SMSC_TRACE(HW, "Forcing internal PHY"); | ||
405 | pdata->using_extphy = 0; | ||
406 | } else if (pdata->config.flags & SMSC911X_FORCE_EXTERNAL_PHY) { | ||
407 | SMSC_TRACE(HW, "Forcing external PHY"); | ||
408 | smsc911x_phy_enable_external(pdata); | ||
409 | pdata->using_extphy = 1; | ||
410 | } else if (hwcfg & HW_CFG_EXT_PHY_DET_) { | ||
411 | SMSC_TRACE(HW, "HW_CFG EXT_PHY_DET set, using external PHY"); | ||
412 | smsc911x_phy_enable_external(pdata); | ||
405 | pdata->using_extphy = 1; | 413 | pdata->using_extphy = 1; |
406 | } else { | 414 | } else { |
407 | SMSC_WARNING(HW, "No external PHY detected, " | 415 | SMSC_TRACE(HW, "HW_CFG EXT_PHY_DET clear, using internal PHY"); |
408 | "Using internal PHY instead."); | 416 | pdata->using_extphy = 0; |
409 | /* Use internal phy */ | ||
410 | return -ENODEV; | ||
411 | } | 417 | } |
412 | return 0; | ||
413 | } | 418 | } |
414 | 419 | ||
415 | /* Fetches a tx status out of the status fifo */ | 420 | /* Fetches a tx status out of the status fifo */ |
@@ -769,7 +774,7 @@ static int smsc911x_mii_probe(struct net_device *dev) | |||
769 | return -ENODEV; | 774 | return -ENODEV; |
770 | } | 775 | } |
771 | 776 | ||
772 | phydev = phy_connect(dev, phydev->dev.bus_id, | 777 | phydev = phy_connect(dev, dev_name(&phydev->dev), |
773 | &smsc911x_phy_adjust_link, 0, pdata->config.phy_interface); | 778 | &smsc911x_phy_adjust_link, 0, pdata->config.phy_interface); |
774 | 779 | ||
775 | if (IS_ERR(phydev)) { | 780 | if (IS_ERR(phydev)) { |
@@ -778,7 +783,8 @@ static int smsc911x_mii_probe(struct net_device *dev) | |||
778 | } | 783 | } |
779 | 784 | ||
780 | pr_info("%s: attached PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)\n", | 785 | pr_info("%s: attached PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)\n", |
781 | dev->name, phydev->drv->name, phydev->dev.bus_id, phydev->irq); | 786 | dev->name, phydev->drv->name, |
787 | dev_name(&phydev->dev), phydev->irq); | ||
782 | 788 | ||
783 | /* mask with MAC supported features */ | 789 | /* mask with MAC supported features */ |
784 | phydev->supported &= (PHY_BASIC_FEATURES | SUPPORTED_Pause | | 790 | phydev->supported &= (PHY_BASIC_FEATURES | SUPPORTED_Pause | |
@@ -824,22 +830,18 @@ static int __devinit smsc911x_mii_init(struct platform_device *pdev, | |||
824 | 830 | ||
825 | pdata->mii_bus->parent = &pdev->dev; | 831 | pdata->mii_bus->parent = &pdev->dev; |
826 | 832 | ||
827 | pdata->using_extphy = 0; | ||
828 | |||
829 | switch (pdata->idrev & 0xFFFF0000) { | 833 | switch (pdata->idrev & 0xFFFF0000) { |
830 | case 0x01170000: | 834 | case 0x01170000: |
831 | case 0x01150000: | 835 | case 0x01150000: |
832 | case 0x117A0000: | 836 | case 0x117A0000: |
833 | case 0x115A0000: | 837 | case 0x115A0000: |
834 | /* External PHY supported, try to autodetect */ | 838 | /* External PHY supported, try to autodetect */ |
835 | if (smsc911x_phy_initialise_external(pdata) < 0) { | 839 | smsc911x_phy_initialise_external(pdata); |
836 | SMSC_TRACE(HW, "No external PHY detected, " | ||
837 | "using internal PHY"); | ||
838 | } | ||
839 | break; | 840 | break; |
840 | default: | 841 | default: |
841 | SMSC_TRACE(HW, "External PHY is not supported, " | 842 | SMSC_TRACE(HW, "External PHY is not supported, " |
842 | "using internal PHY"); | 843 | "using internal PHY"); |
844 | pdata->using_extphy = 0; | ||
843 | break; | 845 | break; |
844 | } | 846 | } |
845 | 847 | ||
@@ -984,7 +986,7 @@ static int smsc911x_poll(struct napi_struct *napi, int budget) | |||
984 | /* We processed all packets available. Tell NAPI it can | 986 | /* We processed all packets available. Tell NAPI it can |
985 | * stop polling then re-enable rx interrupts */ | 987 | * stop polling then re-enable rx interrupts */ |
986 | smsc911x_reg_write(pdata, INT_STS, INT_STS_RSFL_); | 988 | smsc911x_reg_write(pdata, INT_STS, INT_STS_RSFL_); |
987 | netif_rx_complete(napi); | 989 | napi_complete(napi); |
988 | temp = smsc911x_reg_read(pdata, INT_EN); | 990 | temp = smsc911x_reg_read(pdata, INT_EN); |
989 | temp |= INT_EN_RSFL_EN_; | 991 | temp |= INT_EN_RSFL_EN_; |
990 | smsc911x_reg_write(pdata, INT_EN, temp); | 992 | smsc911x_reg_write(pdata, INT_EN, temp); |
@@ -1246,7 +1248,7 @@ static int smsc911x_open(struct net_device *dev) | |||
1246 | napi_enable(&pdata->napi); | 1248 | napi_enable(&pdata->napi); |
1247 | 1249 | ||
1248 | temp = smsc911x_reg_read(pdata, INT_EN); | 1250 | temp = smsc911x_reg_read(pdata, INT_EN); |
1249 | temp |= (INT_EN_TDFA_EN_ | INT_EN_RSFL_EN_); | 1251 | temp |= (INT_EN_TDFA_EN_ | INT_EN_RSFL_EN_ | INT_EN_RXSTOP_INT_EN_); |
1250 | smsc911x_reg_write(pdata, INT_EN, temp); | 1252 | smsc911x_reg_write(pdata, INT_EN, temp); |
1251 | 1253 | ||
1252 | spin_lock_irq(&pdata->mac_lock); | 1254 | spin_lock_irq(&pdata->mac_lock); |
@@ -1418,11 +1420,6 @@ static void smsc911x_set_multicast_list(struct net_device *dev) | |||
1418 | 1420 | ||
1419 | /* Request the hardware to stop, then perform the | 1421 | /* Request the hardware to stop, then perform the |
1420 | * update when we get an RX_STOP interrupt */ | 1422 | * update when we get an RX_STOP interrupt */ |
1421 | smsc911x_reg_write(pdata, INT_STS, INT_STS_RXSTOP_INT_); | ||
1422 | temp = smsc911x_reg_read(pdata, INT_EN); | ||
1423 | temp |= INT_EN_RXSTOP_INT_EN_; | ||
1424 | smsc911x_reg_write(pdata, INT_EN, temp); | ||
1425 | |||
1426 | temp = smsc911x_mac_read(pdata, MAC_CR); | 1423 | temp = smsc911x_mac_read(pdata, MAC_CR); |
1427 | temp &= ~(MAC_CR_RXEN_); | 1424 | temp &= ~(MAC_CR_RXEN_); |
1428 | smsc911x_mac_write(pdata, MAC_CR, temp); | 1425 | smsc911x_mac_write(pdata, MAC_CR, temp); |
@@ -1461,11 +1458,9 @@ static irqreturn_t smsc911x_irqhandler(int irq, void *dev_id) | |||
1461 | /* Called when there is a multicast update scheduled and | 1458 | /* Called when there is a multicast update scheduled and |
1462 | * it is now safe to complete the update */ | 1459 | * it is now safe to complete the update */ |
1463 | SMSC_TRACE(INTR, "RX Stop interrupt"); | 1460 | SMSC_TRACE(INTR, "RX Stop interrupt"); |
1464 | temp = smsc911x_reg_read(pdata, INT_EN); | ||
1465 | temp &= (~INT_EN_RXSTOP_INT_EN_); | ||
1466 | smsc911x_reg_write(pdata, INT_EN, temp); | ||
1467 | smsc911x_reg_write(pdata, INT_STS, INT_STS_RXSTOP_INT_); | 1461 | smsc911x_reg_write(pdata, INT_STS, INT_STS_RXSTOP_INT_); |
1468 | smsc911x_rx_multicast_update_workaround(pdata); | 1462 | if (pdata->multicast_update_pending) |
1463 | smsc911x_rx_multicast_update_workaround(pdata); | ||
1469 | serviced = IRQ_HANDLED; | 1464 | serviced = IRQ_HANDLED; |
1470 | } | 1465 | } |
1471 | 1466 | ||
@@ -1485,16 +1480,16 @@ static irqreturn_t smsc911x_irqhandler(int irq, void *dev_id) | |||
1485 | } | 1480 | } |
1486 | 1481 | ||
1487 | if (likely(intsts & inten & INT_STS_RSFL_)) { | 1482 | if (likely(intsts & inten & INT_STS_RSFL_)) { |
1488 | if (likely(netif_rx_schedule_prep(&pdata->napi))) { | 1483 | if (likely(napi_schedule_prep(&pdata->napi))) { |
1489 | /* Disable Rx interrupts */ | 1484 | /* Disable Rx interrupts */ |
1490 | temp = smsc911x_reg_read(pdata, INT_EN); | 1485 | temp = smsc911x_reg_read(pdata, INT_EN); |
1491 | temp &= (~INT_EN_RSFL_EN_); | 1486 | temp &= (~INT_EN_RSFL_EN_); |
1492 | smsc911x_reg_write(pdata, INT_EN, temp); | 1487 | smsc911x_reg_write(pdata, INT_EN, temp); |
1493 | /* Schedule a NAPI poll */ | 1488 | /* Schedule a NAPI poll */ |
1494 | __netif_rx_schedule(&pdata->napi); | 1489 | __napi_schedule(&pdata->napi); |
1495 | } else { | 1490 | } else { |
1496 | SMSC_WARNING(RX_ERR, | 1491 | SMSC_WARNING(RX_ERR, |
1497 | "netif_rx_schedule_prep failed"); | 1492 | "napi_schedule_prep failed"); |
1498 | } | 1493 | } |
1499 | serviced = IRQ_HANDLED; | 1494 | serviced = IRQ_HANDLED; |
1500 | } | 1495 | } |
@@ -1545,7 +1540,7 @@ static void smsc911x_ethtool_getdrvinfo(struct net_device *dev, | |||
1545 | { | 1540 | { |
1546 | strlcpy(info->driver, SMSC_CHIPNAME, sizeof(info->driver)); | 1541 | strlcpy(info->driver, SMSC_CHIPNAME, sizeof(info->driver)); |
1547 | strlcpy(info->version, SMSC_DRV_VERSION, sizeof(info->version)); | 1542 | strlcpy(info->version, SMSC_DRV_VERSION, sizeof(info->version)); |
1548 | strlcpy(info->bus_info, dev->dev.parent->bus_id, | 1543 | strlcpy(info->bus_info, dev_name(dev->dev.parent), |
1549 | sizeof(info->bus_info)); | 1544 | sizeof(info->bus_info)); |
1550 | } | 1545 | } |
1551 | 1546 | ||
@@ -1747,6 +1742,21 @@ static const struct net_device_ops smsc911x_netdev_ops = { | |||
1747 | #endif | 1742 | #endif |
1748 | }; | 1743 | }; |
1749 | 1744 | ||
1745 | /* copies the current mac address from hardware to dev->dev_addr */ | ||
1746 | static void __devinit smsc911x_read_mac_address(struct net_device *dev) | ||
1747 | { | ||
1748 | struct smsc911x_data *pdata = netdev_priv(dev); | ||
1749 | u32 mac_high16 = smsc911x_mac_read(pdata, ADDRH); | ||
1750 | u32 mac_low32 = smsc911x_mac_read(pdata, ADDRL); | ||
1751 | |||
1752 | dev->dev_addr[0] = (u8)(mac_low32); | ||
1753 | dev->dev_addr[1] = (u8)(mac_low32 >> 8); | ||
1754 | dev->dev_addr[2] = (u8)(mac_low32 >> 16); | ||
1755 | dev->dev_addr[3] = (u8)(mac_low32 >> 24); | ||
1756 | dev->dev_addr[4] = (u8)(mac_high16); | ||
1757 | dev->dev_addr[5] = (u8)(mac_high16 >> 8); | ||
1758 | } | ||
1759 | |||
1750 | /* Initializing private device structures, only called from probe */ | 1760 | /* Initializing private device structures, only called from probe */ |
1751 | static int __devinit smsc911x_init(struct net_device *dev) | 1761 | static int __devinit smsc911x_init(struct net_device *dev) |
1752 | { | 1762 | { |
@@ -1834,6 +1844,12 @@ static int __devinit smsc911x_init(struct net_device *dev) | |||
1834 | SMSC_WARNING(PROBE, | 1844 | SMSC_WARNING(PROBE, |
1835 | "This driver is not intended for this chip revision"); | 1845 | "This driver is not intended for this chip revision"); |
1836 | 1846 | ||
1847 | /* workaround for platforms without an eeprom, where the mac address | ||
1848 | * is stored elsewhere and set by the bootloader. This saves the | ||
1849 | * mac address before resetting the device */ | ||
1850 | if (pdata->config.flags & SMSC911X_SAVE_MAC_ADDRESS) | ||
1851 | smsc911x_read_mac_address(dev); | ||
1852 | |||
1837 | /* Reset the LAN911x */ | 1853 | /* Reset the LAN911x */ |
1838 | if (smsc911x_soft_reset(pdata)) | 1854 | if (smsc911x_soft_reset(pdata)) |
1839 | return -ENODEV; | 1855 | return -ENODEV; |
@@ -1892,9 +1908,9 @@ static int __devinit smsc911x_drv_probe(struct platform_device *pdev) | |||
1892 | struct net_device *dev; | 1908 | struct net_device *dev; |
1893 | struct smsc911x_data *pdata; | 1909 | struct smsc911x_data *pdata; |
1894 | struct smsc911x_platform_config *config = pdev->dev.platform_data; | 1910 | struct smsc911x_platform_config *config = pdev->dev.platform_data; |
1895 | struct resource *res; | 1911 | struct resource *res, *irq_res; |
1896 | unsigned int intcfg = 0; | 1912 | unsigned int intcfg = 0; |
1897 | int res_size; | 1913 | int res_size, irq_flags; |
1898 | int retval; | 1914 | int retval; |
1899 | DECLARE_MAC_BUF(mac); | 1915 | DECLARE_MAC_BUF(mac); |
1900 | 1916 | ||
@@ -1919,6 +1935,14 @@ static int __devinit smsc911x_drv_probe(struct platform_device *pdev) | |||
1919 | } | 1935 | } |
1920 | res_size = res->end - res->start; | 1936 | res_size = res->end - res->start; |
1921 | 1937 | ||
1938 | irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); | ||
1939 | if (!irq_res) { | ||
1940 | pr_warning("%s: Could not allocate irq resource.\n", | ||
1941 | SMSC_CHIPNAME); | ||
1942 | retval = -ENODEV; | ||
1943 | goto out_0; | ||
1944 | } | ||
1945 | |||
1922 | if (!request_mem_region(res->start, res_size, SMSC_CHIPNAME)) { | 1946 | if (!request_mem_region(res->start, res_size, SMSC_CHIPNAME)) { |
1923 | retval = -EBUSY; | 1947 | retval = -EBUSY; |
1924 | goto out_0; | 1948 | goto out_0; |
@@ -1935,7 +1959,8 @@ static int __devinit smsc911x_drv_probe(struct platform_device *pdev) | |||
1935 | 1959 | ||
1936 | pdata = netdev_priv(dev); | 1960 | pdata = netdev_priv(dev); |
1937 | 1961 | ||
1938 | dev->irq = platform_get_irq(pdev, 0); | 1962 | dev->irq = irq_res->start; |
1963 | irq_flags = irq_res->flags & IRQF_TRIGGER_MASK; | ||
1939 | pdata->ioaddr = ioremap_nocache(res->start, res_size); | 1964 | pdata->ioaddr = ioremap_nocache(res->start, res_size); |
1940 | 1965 | ||
1941 | /* copy config parameters across to pdata */ | 1966 | /* copy config parameters across to pdata */ |
@@ -1968,8 +1993,8 @@ static int __devinit smsc911x_drv_probe(struct platform_device *pdev) | |||
1968 | smsc911x_reg_write(pdata, INT_EN, 0); | 1993 | smsc911x_reg_write(pdata, INT_EN, 0); |
1969 | smsc911x_reg_write(pdata, INT_STS, 0xFFFFFFFF); | 1994 | smsc911x_reg_write(pdata, INT_STS, 0xFFFFFFFF); |
1970 | 1995 | ||
1971 | retval = request_irq(dev->irq, smsc911x_irqhandler, IRQF_DISABLED, | 1996 | retval = request_irq(dev->irq, smsc911x_irqhandler, |
1972 | dev->name, dev); | 1997 | irq_flags | IRQF_SHARED, dev->name, dev); |
1973 | if (retval) { | 1998 | if (retval) { |
1974 | SMSC_WARNING(PROBE, | 1999 | SMSC_WARNING(PROBE, |
1975 | "Unable to claim requested irq: %d", dev->irq); | 2000 | "Unable to claim requested irq: %d", dev->irq); |
@@ -2005,14 +2030,7 @@ static int __devinit smsc911x_drv_probe(struct platform_device *pdev) | |||
2005 | } else { | 2030 | } else { |
2006 | /* Try reading mac address from device. if EEPROM is present | 2031 | /* Try reading mac address from device. if EEPROM is present |
2007 | * it will already have been set */ | 2032 | * it will already have been set */ |
2008 | u32 mac_high16 = smsc911x_mac_read(pdata, ADDRH); | 2033 | smsc911x_read_mac_address(dev); |
2009 | u32 mac_low32 = smsc911x_mac_read(pdata, ADDRL); | ||
2010 | dev->dev_addr[0] = (u8)(mac_low32); | ||
2011 | dev->dev_addr[1] = (u8)(mac_low32 >> 8); | ||
2012 | dev->dev_addr[2] = (u8)(mac_low32 >> 16); | ||
2013 | dev->dev_addr[3] = (u8)(mac_low32 >> 24); | ||
2014 | dev->dev_addr[4] = (u8)(mac_high16); | ||
2015 | dev->dev_addr[5] = (u8)(mac_high16 >> 8); | ||
2016 | 2034 | ||
2017 | if (is_valid_ether_addr(dev->dev_addr)) { | 2035 | if (is_valid_ether_addr(dev->dev_addr)) { |
2018 | /* eeprom values are valid so use them */ | 2036 | /* eeprom values are valid so use them */ |
diff --git a/drivers/net/smsc9420.c b/drivers/net/smsc9420.c index a1e4b3895b33..da8b977a5357 100644 --- a/drivers/net/smsc9420.c +++ b/drivers/net/smsc9420.c | |||
@@ -666,7 +666,7 @@ static irqreturn_t smsc9420_isr(int irq, void *dev_id) | |||
666 | smsc9420_pci_flush_write(pd); | 666 | smsc9420_pci_flush_write(pd); |
667 | 667 | ||
668 | ints_to_clear |= (DMAC_STS_RX_ | DMAC_STS_NIS_); | 668 | ints_to_clear |= (DMAC_STS_RX_ | DMAC_STS_NIS_); |
669 | netif_rx_schedule(&pd->napi); | 669 | napi_schedule(&pd->napi); |
670 | } | 670 | } |
671 | 671 | ||
672 | if (ints_to_clear) | 672 | if (ints_to_clear) |
@@ -889,7 +889,7 @@ static int smsc9420_rx_poll(struct napi_struct *napi, int budget) | |||
889 | smsc9420_pci_flush_write(pd); | 889 | smsc9420_pci_flush_write(pd); |
890 | 890 | ||
891 | if (work_done < budget) { | 891 | if (work_done < budget) { |
892 | netif_rx_complete(&pd->napi); | 892 | napi_complete(&pd->napi); |
893 | 893 | ||
894 | /* re-enable RX DMA interrupts */ | 894 | /* re-enable RX DMA interrupts */ |
895 | dma_intr_ena = smsc9420_reg_read(pd, DMAC_INTR_ENA); | 895 | dma_intr_ena = smsc9420_reg_read(pd, DMAC_INTR_ENA); |
@@ -1156,7 +1156,7 @@ static int smsc9420_mii_probe(struct net_device *dev) | |||
1156 | smsc_info(PROBE, "PHY addr %d, phy_id 0x%08X", phydev->addr, | 1156 | smsc_info(PROBE, "PHY addr %d, phy_id 0x%08X", phydev->addr, |
1157 | phydev->phy_id); | 1157 | phydev->phy_id); |
1158 | 1158 | ||
1159 | phydev = phy_connect(dev, phydev->dev.bus_id, | 1159 | phydev = phy_connect(dev, dev_name(&phydev->dev), |
1160 | &smsc9420_phy_adjust_link, 0, PHY_INTERFACE_MODE_MII); | 1160 | &smsc9420_phy_adjust_link, 0, PHY_INTERFACE_MODE_MII); |
1161 | 1161 | ||
1162 | if (IS_ERR(phydev)) { | 1162 | if (IS_ERR(phydev)) { |
@@ -1165,7 +1165,7 @@ static int smsc9420_mii_probe(struct net_device *dev) | |||
1165 | } | 1165 | } |
1166 | 1166 | ||
1167 | pr_info("%s: attached PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)\n", | 1167 | pr_info("%s: attached PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)\n", |
1168 | dev->name, phydev->drv->name, phydev->dev.bus_id, phydev->irq); | 1168 | dev->name, phydev->drv->name, dev_name(&phydev->dev), phydev->irq); |
1169 | 1169 | ||
1170 | /* mask with MAC supported features */ | 1170 | /* mask with MAC supported features */ |
1171 | phydev->supported &= (PHY_BASIC_FEATURES | SUPPORTED_Pause | | 1171 | phydev->supported &= (PHY_BASIC_FEATURES | SUPPORTED_Pause | |
diff --git a/drivers/net/spider_net.c b/drivers/net/spider_net.c index 88d2c67788df..7f6b4a4052ee 100644 --- a/drivers/net/spider_net.c +++ b/drivers/net/spider_net.c | |||
@@ -1301,7 +1301,7 @@ static int spider_net_poll(struct napi_struct *napi, int budget) | |||
1301 | /* if all packets are in the stack, enable interrupts and return 0 */ | 1301 | /* if all packets are in the stack, enable interrupts and return 0 */ |
1302 | /* if not, return 1 */ | 1302 | /* if not, return 1 */ |
1303 | if (packets_done < budget) { | 1303 | if (packets_done < budget) { |
1304 | netif_rx_complete(napi); | 1304 | napi_complete(napi); |
1305 | spider_net_rx_irq_on(card); | 1305 | spider_net_rx_irq_on(card); |
1306 | card->ignore_rx_ramfull = 0; | 1306 | card->ignore_rx_ramfull = 0; |
1307 | } | 1307 | } |
@@ -1528,7 +1528,7 @@ spider_net_handle_error_irq(struct spider_net_card *card, u32 status_reg, | |||
1528 | spider_net_refill_rx_chain(card); | 1528 | spider_net_refill_rx_chain(card); |
1529 | spider_net_enable_rxdmac(card); | 1529 | spider_net_enable_rxdmac(card); |
1530 | card->num_rx_ints ++; | 1530 | card->num_rx_ints ++; |
1531 | netif_rx_schedule(&card->napi); | 1531 | napi_schedule(&card->napi); |
1532 | } | 1532 | } |
1533 | show_error = 0; | 1533 | show_error = 0; |
1534 | break; | 1534 | break; |
@@ -1548,7 +1548,7 @@ spider_net_handle_error_irq(struct spider_net_card *card, u32 status_reg, | |||
1548 | spider_net_refill_rx_chain(card); | 1548 | spider_net_refill_rx_chain(card); |
1549 | spider_net_enable_rxdmac(card); | 1549 | spider_net_enable_rxdmac(card); |
1550 | card->num_rx_ints ++; | 1550 | card->num_rx_ints ++; |
1551 | netif_rx_schedule(&card->napi); | 1551 | napi_schedule(&card->napi); |
1552 | show_error = 0; | 1552 | show_error = 0; |
1553 | break; | 1553 | break; |
1554 | 1554 | ||
@@ -1562,7 +1562,7 @@ spider_net_handle_error_irq(struct spider_net_card *card, u32 status_reg, | |||
1562 | spider_net_refill_rx_chain(card); | 1562 | spider_net_refill_rx_chain(card); |
1563 | spider_net_enable_rxdmac(card); | 1563 | spider_net_enable_rxdmac(card); |
1564 | card->num_rx_ints ++; | 1564 | card->num_rx_ints ++; |
1565 | netif_rx_schedule(&card->napi); | 1565 | napi_schedule(&card->napi); |
1566 | show_error = 0; | 1566 | show_error = 0; |
1567 | break; | 1567 | break; |
1568 | 1568 | ||
@@ -1656,11 +1656,11 @@ spider_net_interrupt(int irq, void *ptr) | |||
1656 | 1656 | ||
1657 | if (status_reg & SPIDER_NET_RXINT ) { | 1657 | if (status_reg & SPIDER_NET_RXINT ) { |
1658 | spider_net_rx_irq_off(card); | 1658 | spider_net_rx_irq_off(card); |
1659 | netif_rx_schedule(&card->napi); | 1659 | napi_schedule(&card->napi); |
1660 | card->num_rx_ints ++; | 1660 | card->num_rx_ints ++; |
1661 | } | 1661 | } |
1662 | if (status_reg & SPIDER_NET_TXINT) | 1662 | if (status_reg & SPIDER_NET_TXINT) |
1663 | netif_rx_schedule(&card->napi); | 1663 | napi_schedule(&card->napi); |
1664 | 1664 | ||
1665 | if (status_reg & SPIDER_NET_LINKINT) | 1665 | if (status_reg & SPIDER_NET_LINKINT) |
1666 | spider_net_link_reset(netdev); | 1666 | spider_net_link_reset(netdev); |
diff --git a/drivers/net/starfire.c b/drivers/net/starfire.c index da3a76b18eff..98fe79515bab 100644 --- a/drivers/net/starfire.c +++ b/drivers/net/starfire.c | |||
@@ -1342,8 +1342,8 @@ static irqreturn_t intr_handler(int irq, void *dev_instance) | |||
1342 | if (intr_status & (IntrRxDone | IntrRxEmpty)) { | 1342 | if (intr_status & (IntrRxDone | IntrRxEmpty)) { |
1343 | u32 enable; | 1343 | u32 enable; |
1344 | 1344 | ||
1345 | if (likely(netif_rx_schedule_prep(&np->napi))) { | 1345 | if (likely(napi_schedule_prep(&np->napi))) { |
1346 | __netif_rx_schedule(&np->napi); | 1346 | __napi_schedule(&np->napi); |
1347 | enable = readl(ioaddr + IntrEnable); | 1347 | enable = readl(ioaddr + IntrEnable); |
1348 | enable &= ~(IntrRxDone | IntrRxEmpty); | 1348 | enable &= ~(IntrRxDone | IntrRxEmpty); |
1349 | writel(enable, ioaddr + IntrEnable); | 1349 | writel(enable, ioaddr + IntrEnable); |
@@ -1587,7 +1587,7 @@ static int netdev_poll(struct napi_struct *napi, int budget) | |||
1587 | intr_status = readl(ioaddr + IntrStatus); | 1587 | intr_status = readl(ioaddr + IntrStatus); |
1588 | } while (intr_status & (IntrRxDone | IntrRxEmpty)); | 1588 | } while (intr_status & (IntrRxDone | IntrRxEmpty)); |
1589 | 1589 | ||
1590 | netif_rx_complete(napi); | 1590 | napi_complete(napi); |
1591 | intr_status = readl(ioaddr + IntrEnable); | 1591 | intr_status = readl(ioaddr + IntrEnable); |
1592 | intr_status |= IntrRxDone | IntrRxEmpty; | 1592 | intr_status |= IntrRxDone | IntrRxEmpty; |
1593 | writel(intr_status, ioaddr + IntrEnable); | 1593 | writel(intr_status, ioaddr + IntrEnable); |
diff --git a/drivers/net/sungem.c b/drivers/net/sungem.c index b17efa9cc530..fc1e7f1d024b 100644 --- a/drivers/net/sungem.c +++ b/drivers/net/sungem.c | |||
@@ -921,7 +921,7 @@ static int gem_poll(struct napi_struct *napi, int budget) | |||
921 | gp->status = readl(gp->regs + GREG_STAT); | 921 | gp->status = readl(gp->regs + GREG_STAT); |
922 | } while (gp->status & GREG_STAT_NAPI); | 922 | } while (gp->status & GREG_STAT_NAPI); |
923 | 923 | ||
924 | __netif_rx_complete(napi); | 924 | __napi_complete(napi); |
925 | gem_enable_ints(gp); | 925 | gem_enable_ints(gp); |
926 | 926 | ||
927 | spin_unlock_irqrestore(&gp->lock, flags); | 927 | spin_unlock_irqrestore(&gp->lock, flags); |
@@ -944,7 +944,7 @@ static irqreturn_t gem_interrupt(int irq, void *dev_id) | |||
944 | 944 | ||
945 | spin_lock_irqsave(&gp->lock, flags); | 945 | spin_lock_irqsave(&gp->lock, flags); |
946 | 946 | ||
947 | if (netif_rx_schedule_prep(&gp->napi)) { | 947 | if (napi_schedule_prep(&gp->napi)) { |
948 | u32 gem_status = readl(gp->regs + GREG_STAT); | 948 | u32 gem_status = readl(gp->regs + GREG_STAT); |
949 | 949 | ||
950 | if (gem_status == 0) { | 950 | if (gem_status == 0) { |
@@ -954,7 +954,7 @@ static irqreturn_t gem_interrupt(int irq, void *dev_id) | |||
954 | } | 954 | } |
955 | gp->status = gem_status; | 955 | gp->status = gem_status; |
956 | gem_disable_ints(gp); | 956 | gem_disable_ints(gp); |
957 | __netif_rx_schedule(&gp->napi); | 957 | __napi_schedule(&gp->napi); |
958 | } | 958 | } |
959 | 959 | ||
960 | spin_unlock_irqrestore(&gp->lock, flags); | 960 | spin_unlock_irqrestore(&gp->lock, flags); |
diff --git a/drivers/net/tc35815.c b/drivers/net/tc35815.c index bcd0e60cbda9..b52a1c088f37 100644 --- a/drivers/net/tc35815.c +++ b/drivers/net/tc35815.c | |||
@@ -725,7 +725,7 @@ static int tc_mii_probe(struct net_device *dev) | |||
725 | } | 725 | } |
726 | 726 | ||
727 | /* attach the mac to the phy */ | 727 | /* attach the mac to the phy */ |
728 | phydev = phy_connect(dev, phydev->dev.bus_id, | 728 | phydev = phy_connect(dev, dev_name(&phydev->dev), |
729 | &tc_handle_link_change, 0, | 729 | &tc_handle_link_change, 0, |
730 | lp->chiptype == TC35815_TX4939 ? | 730 | lp->chiptype == TC35815_TX4939 ? |
731 | PHY_INTERFACE_MODE_RMII : PHY_INTERFACE_MODE_MII); | 731 | PHY_INTERFACE_MODE_RMII : PHY_INTERFACE_MODE_MII); |
@@ -735,7 +735,7 @@ static int tc_mii_probe(struct net_device *dev) | |||
735 | } | 735 | } |
736 | printk(KERN_INFO "%s: attached PHY driver [%s] " | 736 | printk(KERN_INFO "%s: attached PHY driver [%s] " |
737 | "(mii_bus:phy_addr=%s, id=%x)\n", | 737 | "(mii_bus:phy_addr=%s, id=%x)\n", |
738 | dev->name, phydev->drv->name, phydev->dev.bus_id, | 738 | dev->name, phydev->drv->name, dev_name(&phydev->dev), |
739 | phydev->phy_id); | 739 | phydev->phy_id); |
740 | 740 | ||
741 | /* mask with MAC supported features */ | 741 | /* mask with MAC supported features */ |
@@ -1609,8 +1609,8 @@ static irqreturn_t tc35815_interrupt(int irq, void *dev_id) | |||
1609 | if (!(dmactl & DMA_IntMask)) { | 1609 | if (!(dmactl & DMA_IntMask)) { |
1610 | /* disable interrupts */ | 1610 | /* disable interrupts */ |
1611 | tc_writel(dmactl | DMA_IntMask, &tr->DMA_Ctl); | 1611 | tc_writel(dmactl | DMA_IntMask, &tr->DMA_Ctl); |
1612 | if (netif_rx_schedule_prep(&lp->napi)) | 1612 | if (napi_schedule_prep(&lp->napi)) |
1613 | __netif_rx_schedule(&lp->napi); | 1613 | __napi_schedule(&lp->napi); |
1614 | else { | 1614 | else { |
1615 | printk(KERN_ERR "%s: interrupt taken in poll\n", | 1615 | printk(KERN_ERR "%s: interrupt taken in poll\n", |
1616 | dev->name); | 1616 | dev->name); |
@@ -1919,7 +1919,7 @@ static int tc35815_poll(struct napi_struct *napi, int budget) | |||
1919 | spin_unlock(&lp->lock); | 1919 | spin_unlock(&lp->lock); |
1920 | 1920 | ||
1921 | if (received < budget) { | 1921 | if (received < budget) { |
1922 | netif_rx_complete(napi); | 1922 | napi_complete(napi); |
1923 | /* enable interrupts */ | 1923 | /* enable interrupts */ |
1924 | tc_writel(tc_readl(&tr->DMA_Ctl) & ~DMA_IntMask, &tr->DMA_Ctl); | 1924 | tc_writel(tc_readl(&tr->DMA_Ctl) & ~DMA_IntMask, &tr->DMA_Ctl); |
1925 | } | 1925 | } |
diff --git a/drivers/net/tehuti.c b/drivers/net/tehuti.c index a7a4dc4d6313..be9f38f8f0bf 100644 --- a/drivers/net/tehuti.c +++ b/drivers/net/tehuti.c | |||
@@ -265,8 +265,8 @@ static irqreturn_t bdx_isr_napi(int irq, void *dev) | |||
265 | bdx_isr_extra(priv, isr); | 265 | bdx_isr_extra(priv, isr); |
266 | 266 | ||
267 | if (isr & (IR_RX_DESC_0 | IR_TX_FREE_0)) { | 267 | if (isr & (IR_RX_DESC_0 | IR_TX_FREE_0)) { |
268 | if (likely(netif_rx_schedule_prep(&priv->napi))) { | 268 | if (likely(napi_schedule_prep(&priv->napi))) { |
269 | __netif_rx_schedule(&priv->napi); | 269 | __napi_schedule(&priv->napi); |
270 | RET(IRQ_HANDLED); | 270 | RET(IRQ_HANDLED); |
271 | } else { | 271 | } else { |
272 | /* NOTE: we get here if intr has slipped into window | 272 | /* NOTE: we get here if intr has slipped into window |
@@ -302,7 +302,7 @@ static int bdx_poll(struct napi_struct *napi, int budget) | |||
302 | * device lock and allow waiting tasks (eg rmmod) to advance) */ | 302 | * device lock and allow waiting tasks (eg rmmod) to advance) */ |
303 | priv->napi_stop = 0; | 303 | priv->napi_stop = 0; |
304 | 304 | ||
305 | netif_rx_complete(napi); | 305 | napi_complete(napi); |
306 | bdx_enable_interrupts(priv); | 306 | bdx_enable_interrupts(priv); |
307 | } | 307 | } |
308 | return work_done; | 308 | return work_done; |
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c index 8b3f84685387..5b3d60568d55 100644 --- a/drivers/net/tg3.c +++ b/drivers/net/tg3.c | |||
@@ -860,7 +860,7 @@ static int tg3_bmcr_reset(struct tg3 *tp) | |||
860 | 860 | ||
861 | static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg) | 861 | static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg) |
862 | { | 862 | { |
863 | struct tg3 *tp = (struct tg3 *)bp->priv; | 863 | struct tg3 *tp = bp->priv; |
864 | u32 val; | 864 | u32 val; |
865 | 865 | ||
866 | if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_PAUSED) | 866 | if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_PAUSED) |
@@ -874,7 +874,7 @@ static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg) | |||
874 | 874 | ||
875 | static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val) | 875 | static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val) |
876 | { | 876 | { |
877 | struct tg3 *tp = (struct tg3 *)bp->priv; | 877 | struct tg3 *tp = bp->priv; |
878 | 878 | ||
879 | if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_PAUSED) | 879 | if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_PAUSED) |
880 | return -EAGAIN; | 880 | return -EAGAIN; |
@@ -4460,7 +4460,7 @@ static int tg3_poll(struct napi_struct *napi, int budget) | |||
4460 | sblk->status &= ~SD_STATUS_UPDATED; | 4460 | sblk->status &= ~SD_STATUS_UPDATED; |
4461 | 4461 | ||
4462 | if (likely(!tg3_has_work(tp))) { | 4462 | if (likely(!tg3_has_work(tp))) { |
4463 | netif_rx_complete(napi); | 4463 | napi_complete(napi); |
4464 | tg3_restart_ints(tp); | 4464 | tg3_restart_ints(tp); |
4465 | break; | 4465 | break; |
4466 | } | 4466 | } |
@@ -4470,7 +4470,7 @@ static int tg3_poll(struct napi_struct *napi, int budget) | |||
4470 | 4470 | ||
4471 | tx_recovery: | 4471 | tx_recovery: |
4472 | /* work_done is guaranteed to be less than budget. */ | 4472 | /* work_done is guaranteed to be less than budget. */ |
4473 | netif_rx_complete(napi); | 4473 | napi_complete(napi); |
4474 | schedule_work(&tp->reset_task); | 4474 | schedule_work(&tp->reset_task); |
4475 | return work_done; | 4475 | return work_done; |
4476 | } | 4476 | } |
@@ -4519,7 +4519,7 @@ static irqreturn_t tg3_msi_1shot(int irq, void *dev_id) | |||
4519 | prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]); | 4519 | prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]); |
4520 | 4520 | ||
4521 | if (likely(!tg3_irq_sync(tp))) | 4521 | if (likely(!tg3_irq_sync(tp))) |
4522 | netif_rx_schedule(&tp->napi); | 4522 | napi_schedule(&tp->napi); |
4523 | 4523 | ||
4524 | return IRQ_HANDLED; | 4524 | return IRQ_HANDLED; |
4525 | } | 4525 | } |
@@ -4544,7 +4544,7 @@ static irqreturn_t tg3_msi(int irq, void *dev_id) | |||
4544 | */ | 4544 | */ |
4545 | tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001); | 4545 | tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001); |
4546 | if (likely(!tg3_irq_sync(tp))) | 4546 | if (likely(!tg3_irq_sync(tp))) |
4547 | netif_rx_schedule(&tp->napi); | 4547 | napi_schedule(&tp->napi); |
4548 | 4548 | ||
4549 | return IRQ_RETVAL(1); | 4549 | return IRQ_RETVAL(1); |
4550 | } | 4550 | } |
@@ -4586,7 +4586,7 @@ static irqreturn_t tg3_interrupt(int irq, void *dev_id) | |||
4586 | sblk->status &= ~SD_STATUS_UPDATED; | 4586 | sblk->status &= ~SD_STATUS_UPDATED; |
4587 | if (likely(tg3_has_work(tp))) { | 4587 | if (likely(tg3_has_work(tp))) { |
4588 | prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]); | 4588 | prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]); |
4589 | netif_rx_schedule(&tp->napi); | 4589 | napi_schedule(&tp->napi); |
4590 | } else { | 4590 | } else { |
4591 | /* No work, shared interrupt perhaps? re-enable | 4591 | /* No work, shared interrupt perhaps? re-enable |
4592 | * interrupts, and flush that PCI write | 4592 | * interrupts, and flush that PCI write |
@@ -4632,7 +4632,7 @@ static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id) | |||
4632 | tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001); | 4632 | tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001); |
4633 | if (tg3_irq_sync(tp)) | 4633 | if (tg3_irq_sync(tp)) |
4634 | goto out; | 4634 | goto out; |
4635 | if (netif_rx_schedule_prep(&tp->napi)) { | 4635 | if (napi_schedule_prep(&tp->napi)) { |
4636 | prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]); | 4636 | prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]); |
4637 | /* Update last_tag to mark that this status has been | 4637 | /* Update last_tag to mark that this status has been |
4638 | * seen. Because interrupt may be shared, we may be | 4638 | * seen. Because interrupt may be shared, we may be |
@@ -4640,7 +4640,7 @@ static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id) | |||
4640 | * if tg3_poll() is not scheduled. | 4640 | * if tg3_poll() is not scheduled. |
4641 | */ | 4641 | */ |
4642 | tp->last_tag = sblk->status_tag; | 4642 | tp->last_tag = sblk->status_tag; |
4643 | __netif_rx_schedule(&tp->napi); | 4643 | __napi_schedule(&tp->napi); |
4644 | } | 4644 | } |
4645 | out: | 4645 | out: |
4646 | return IRQ_RETVAL(handled); | 4646 | return IRQ_RETVAL(handled); |
diff --git a/drivers/net/tokenring/3c359.c b/drivers/net/tokenring/3c359.c index 43853e3b210e..4a65fc2dd928 100644 --- a/drivers/net/tokenring/3c359.c +++ b/drivers/net/tokenring/3c359.c | |||
@@ -274,6 +274,15 @@ static void xl_ee_write(struct net_device *dev, int ee_addr, u16 ee_value) | |||
274 | 274 | ||
275 | return ; | 275 | return ; |
276 | } | 276 | } |
277 | |||
278 | static const struct net_device_ops xl_netdev_ops = { | ||
279 | .ndo_open = xl_open, | ||
280 | .ndo_stop = xl_close, | ||
281 | .ndo_start_xmit = xl_xmit, | ||
282 | .ndo_change_mtu = xl_change_mtu, | ||
283 | .ndo_set_multicast_list = xl_set_rx_mode, | ||
284 | .ndo_set_mac_address = xl_set_mac_address, | ||
285 | }; | ||
277 | 286 | ||
278 | static int __devinit xl_probe(struct pci_dev *pdev, | 287 | static int __devinit xl_probe(struct pci_dev *pdev, |
279 | const struct pci_device_id *ent) | 288 | const struct pci_device_id *ent) |
@@ -337,13 +346,7 @@ static int __devinit xl_probe(struct pci_dev *pdev, | |||
337 | return i ; | 346 | return i ; |
338 | } | 347 | } |
339 | 348 | ||
340 | dev->open=&xl_open; | 349 | dev->netdev_ops = &xl_netdev_ops; |
341 | dev->hard_start_xmit=&xl_xmit; | ||
342 | dev->change_mtu=&xl_change_mtu; | ||
343 | dev->stop=&xl_close; | ||
344 | dev->do_ioctl=NULL; | ||
345 | dev->set_multicast_list=&xl_set_rx_mode; | ||
346 | dev->set_mac_address=&xl_set_mac_address ; | ||
347 | SET_NETDEV_DEV(dev, &pdev->dev); | 350 | SET_NETDEV_DEV(dev, &pdev->dev); |
348 | 351 | ||
349 | pci_set_drvdata(pdev,dev) ; | 352 | pci_set_drvdata(pdev,dev) ; |
diff --git a/drivers/net/tokenring/abyss.c b/drivers/net/tokenring/abyss.c index b566d6d79ecd..b9db1b5a58a3 100644 --- a/drivers/net/tokenring/abyss.c +++ b/drivers/net/tokenring/abyss.c | |||
@@ -92,6 +92,8 @@ static void abyss_sifwritew(struct net_device *dev, unsigned short val, unsigned | |||
92 | outw(val, dev->base_addr + reg); | 92 | outw(val, dev->base_addr + reg); |
93 | } | 93 | } |
94 | 94 | ||
95 | static struct net_device_ops abyss_netdev_ops; | ||
96 | |||
95 | static int __devinit abyss_attach(struct pci_dev *pdev, const struct pci_device_id *ent) | 97 | static int __devinit abyss_attach(struct pci_dev *pdev, const struct pci_device_id *ent) |
96 | { | 98 | { |
97 | static int versionprinted; | 99 | static int versionprinted; |
@@ -157,8 +159,7 @@ static int __devinit abyss_attach(struct pci_dev *pdev, const struct pci_device_ | |||
157 | 159 | ||
158 | memcpy(tp->ProductID, "Madge PCI 16/4 Mk2", PROD_ID_SIZE + 1); | 160 | memcpy(tp->ProductID, "Madge PCI 16/4 Mk2", PROD_ID_SIZE + 1); |
159 | 161 | ||
160 | dev->open = abyss_open; | 162 | dev->netdev_ops = &abyss_netdev_ops; |
161 | dev->stop = abyss_close; | ||
162 | 163 | ||
163 | pci_set_drvdata(pdev, dev); | 164 | pci_set_drvdata(pdev, dev); |
164 | SET_NETDEV_DEV(dev, &pdev->dev); | 165 | SET_NETDEV_DEV(dev, &pdev->dev); |
@@ -450,6 +451,11 @@ static struct pci_driver abyss_driver = { | |||
450 | 451 | ||
451 | static int __init abyss_init (void) | 452 | static int __init abyss_init (void) |
452 | { | 453 | { |
454 | abyss_netdev_ops = tms380tr_netdev_ops; | ||
455 | |||
456 | abyss_netdev_ops.ndo_open = abyss_open; | ||
457 | abyss_netdev_ops.ndo_stop = abyss_close; | ||
458 | |||
453 | return pci_register_driver(&abyss_driver); | 459 | return pci_register_driver(&abyss_driver); |
454 | } | 460 | } |
455 | 461 | ||
diff --git a/drivers/net/tokenring/ibmtr.c b/drivers/net/tokenring/ibmtr.c index fa7bce6e0c6d..9d896116cf76 100644 --- a/drivers/net/tokenring/ibmtr.c +++ b/drivers/net/tokenring/ibmtr.c | |||
@@ -200,7 +200,6 @@ static void tr_rx(struct net_device *dev); | |||
200 | static void ibmtr_reset_timer(struct timer_list*tmr,struct net_device *dev); | 200 | static void ibmtr_reset_timer(struct timer_list*tmr,struct net_device *dev); |
201 | static void tok_rerun(unsigned long dev_addr); | 201 | static void tok_rerun(unsigned long dev_addr); |
202 | static void ibmtr_readlog(struct net_device *dev); | 202 | static void ibmtr_readlog(struct net_device *dev); |
203 | static struct net_device_stats *tok_get_stats(struct net_device *dev); | ||
204 | static int ibmtr_change_mtu(struct net_device *dev, int mtu); | 203 | static int ibmtr_change_mtu(struct net_device *dev, int mtu); |
205 | static void find_turbo_adapters(int *iolist); | 204 | static void find_turbo_adapters(int *iolist); |
206 | 205 | ||
@@ -816,18 +815,21 @@ static unsigned char __devinit get_sram_size(struct tok_info *adapt_info) | |||
816 | 815 | ||
817 | /*****************************************************************************/ | 816 | /*****************************************************************************/ |
818 | 817 | ||
818 | static const struct net_device_ops trdev_netdev_ops = { | ||
819 | .ndo_open = tok_open, | ||
820 | .ndo_stop = tok_close, | ||
821 | .ndo_start_xmit = tok_send_packet, | ||
822 | .ndo_set_multicast_list = tok_set_multicast_list, | ||
823 | .ndo_change_mtu = ibmtr_change_mtu, | ||
824 | }; | ||
825 | |||
819 | static int __devinit trdev_init(struct net_device *dev) | 826 | static int __devinit trdev_init(struct net_device *dev) |
820 | { | 827 | { |
821 | struct tok_info *ti = netdev_priv(dev); | 828 | struct tok_info *ti = netdev_priv(dev); |
822 | 829 | ||
823 | SET_PAGE(ti->srb_page); | 830 | SET_PAGE(ti->srb_page); |
824 | ti->open_failure = NO ; | 831 | ti->open_failure = NO ; |
825 | dev->open = tok_open; | 832 | dev->netdev_ops = &trdev_netdev_ops; |
826 | dev->stop = tok_close; | ||
827 | dev->hard_start_xmit = tok_send_packet; | ||
828 | dev->get_stats = tok_get_stats; | ||
829 | dev->set_multicast_list = tok_set_multicast_list; | ||
830 | dev->change_mtu = ibmtr_change_mtu; | ||
831 | 833 | ||
832 | return 0; | 834 | return 0; |
833 | } | 835 | } |
@@ -1460,7 +1462,7 @@ static irqreturn_t tok_interrupt(int irq, void *dev_id) | |||
1460 | "%02X\n", | 1462 | "%02X\n", |
1461 | (int)retcode, (int)readb(ti->ssb + 6)); | 1463 | (int)retcode, (int)readb(ti->ssb + 6)); |
1462 | else | 1464 | else |
1463 | ti->tr_stats.tx_packets++; | 1465 | dev->stats.tx_packets++; |
1464 | break; | 1466 | break; |
1465 | case XMIT_XID_CMD: | 1467 | case XMIT_XID_CMD: |
1466 | DPRINTK("xmit xid ret_code: %02X\n", | 1468 | DPRINTK("xmit xid ret_code: %02X\n", |
@@ -1646,7 +1648,7 @@ static void tr_tx(struct net_device *dev) | |||
1646 | break; | 1648 | break; |
1647 | } | 1649 | } |
1648 | writeb(RESP_IN_ASB, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD); | 1650 | writeb(RESP_IN_ASB, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD); |
1649 | ti->tr_stats.tx_bytes += ti->current_skb->len; | 1651 | dev->stats.tx_bytes += ti->current_skb->len; |
1650 | dev_kfree_skb_irq(ti->current_skb); | 1652 | dev_kfree_skb_irq(ti->current_skb); |
1651 | ti->current_skb = NULL; | 1653 | ti->current_skb = NULL; |
1652 | netif_wake_queue(dev); | 1654 | netif_wake_queue(dev); |
@@ -1722,7 +1724,7 @@ static void tr_rx(struct net_device *dev) | |||
1722 | if (readb(llc + offsetof(struct trllc, llc)) != UI_CMD) { | 1724 | if (readb(llc + offsetof(struct trllc, llc)) != UI_CMD) { |
1723 | SET_PAGE(ti->asb_page); | 1725 | SET_PAGE(ti->asb_page); |
1724 | writeb(DATA_LOST, ti->asb + RETCODE_OFST); | 1726 | writeb(DATA_LOST, ti->asb + RETCODE_OFST); |
1725 | ti->tr_stats.rx_dropped++; | 1727 | dev->stats.rx_dropped++; |
1726 | writeb(RESP_IN_ASB, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD); | 1728 | writeb(RESP_IN_ASB, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD); |
1727 | return; | 1729 | return; |
1728 | } | 1730 | } |
@@ -1757,7 +1759,7 @@ static void tr_rx(struct net_device *dev) | |||
1757 | 1759 | ||
1758 | if (!(skb = dev_alloc_skb(skb_size))) { | 1760 | if (!(skb = dev_alloc_skb(skb_size))) { |
1759 | DPRINTK("out of memory. frame dropped.\n"); | 1761 | DPRINTK("out of memory. frame dropped.\n"); |
1760 | ti->tr_stats.rx_dropped++; | 1762 | dev->stats.rx_dropped++; |
1761 | SET_PAGE(ti->asb_page); | 1763 | SET_PAGE(ti->asb_page); |
1762 | writeb(DATA_LOST, ti->asb + offsetof(struct asb_rec, ret_code)); | 1764 | writeb(DATA_LOST, ti->asb + offsetof(struct asb_rec, ret_code)); |
1763 | writeb(RESP_IN_ASB, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD); | 1765 | writeb(RESP_IN_ASB, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD); |
@@ -1813,8 +1815,8 @@ static void tr_rx(struct net_device *dev) | |||
1813 | 1815 | ||
1814 | writeb(RESP_IN_ASB, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD); | 1816 | writeb(RESP_IN_ASB, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD); |
1815 | 1817 | ||
1816 | ti->tr_stats.rx_bytes += skb->len; | 1818 | dev->stats.rx_bytes += skb->len; |
1817 | ti->tr_stats.rx_packets++; | 1819 | dev->stats.rx_packets++; |
1818 | 1820 | ||
1819 | skb->protocol = tr_type_trans(skb, dev); | 1821 | skb->protocol = tr_type_trans(skb, dev); |
1820 | if (IPv4_p) { | 1822 | if (IPv4_p) { |
@@ -1876,21 +1878,6 @@ static void ibmtr_readlog(struct net_device *dev) | |||
1876 | 1878 | ||
1877 | /*****************************************************************************/ | 1879 | /*****************************************************************************/ |
1878 | 1880 | ||
1879 | /* tok_get_stats(): Basically a scaffold routine which will return | ||
1880 | the address of the tr_statistics structure associated with | ||
1881 | this device -- the tr.... structure is an ethnet look-alike | ||
1882 | so at least for this iteration may suffice. */ | ||
1883 | |||
1884 | static struct net_device_stats *tok_get_stats(struct net_device *dev) | ||
1885 | { | ||
1886 | |||
1887 | struct tok_info *toki; | ||
1888 | toki = netdev_priv(dev); | ||
1889 | return (struct net_device_stats *) &toki->tr_stats; | ||
1890 | } | ||
1891 | |||
1892 | /*****************************************************************************/ | ||
1893 | |||
1894 | static int ibmtr_change_mtu(struct net_device *dev, int mtu) | 1881 | static int ibmtr_change_mtu(struct net_device *dev, int mtu) |
1895 | { | 1882 | { |
1896 | struct tok_info *ti = netdev_priv(dev); | 1883 | struct tok_info *ti = netdev_priv(dev); |
diff --git a/drivers/net/tokenring/lanstreamer.c b/drivers/net/tokenring/lanstreamer.c index 239c75217b12..0b2b7925da22 100644 --- a/drivers/net/tokenring/lanstreamer.c +++ b/drivers/net/tokenring/lanstreamer.c | |||
@@ -207,7 +207,6 @@ static int streamer_xmit(struct sk_buff *skb, struct net_device *dev); | |||
207 | static int streamer_close(struct net_device *dev); | 207 | static int streamer_close(struct net_device *dev); |
208 | static void streamer_set_rx_mode(struct net_device *dev); | 208 | static void streamer_set_rx_mode(struct net_device *dev); |
209 | static irqreturn_t streamer_interrupt(int irq, void *dev_id); | 209 | static irqreturn_t streamer_interrupt(int irq, void *dev_id); |
210 | static struct net_device_stats *streamer_get_stats(struct net_device *dev); | ||
211 | static int streamer_set_mac_address(struct net_device *dev, void *addr); | 210 | static int streamer_set_mac_address(struct net_device *dev, void *addr); |
212 | static void streamer_arb_cmd(struct net_device *dev); | 211 | static void streamer_arb_cmd(struct net_device *dev); |
213 | static int streamer_change_mtu(struct net_device *dev, int mtu); | 212 | static int streamer_change_mtu(struct net_device *dev, int mtu); |
@@ -222,6 +221,18 @@ struct streamer_private *dev_streamer=NULL; | |||
222 | #endif | 221 | #endif |
223 | #endif | 222 | #endif |
224 | 223 | ||
224 | static const struct net_device_ops streamer_netdev_ops = { | ||
225 | .ndo_open = streamer_open, | ||
226 | .ndo_stop = streamer_close, | ||
227 | .ndo_start_xmit = streamer_xmit, | ||
228 | .ndo_change_mtu = streamer_change_mtu, | ||
229 | #if STREAMER_IOCTL | ||
230 | .ndo_do_ioctl = streamer_ioctl, | ||
231 | #endif | ||
232 | .ndo_set_multicast_list = streamer_set_rx_mode, | ||
233 | .ndo_set_mac_address = streamer_set_mac_address, | ||
234 | }; | ||
235 | |||
225 | static int __devinit streamer_init_one(struct pci_dev *pdev, | 236 | static int __devinit streamer_init_one(struct pci_dev *pdev, |
226 | const struct pci_device_id *ent) | 237 | const struct pci_device_id *ent) |
227 | { | 238 | { |
@@ -321,18 +332,7 @@ static int __devinit streamer_init_one(struct pci_dev *pdev, | |||
321 | init_waitqueue_head(&streamer_priv->srb_wait); | 332 | init_waitqueue_head(&streamer_priv->srb_wait); |
322 | init_waitqueue_head(&streamer_priv->trb_wait); | 333 | init_waitqueue_head(&streamer_priv->trb_wait); |
323 | 334 | ||
324 | dev->open = &streamer_open; | 335 | dev->netdev_ops = &streamer_netdev_ops; |
325 | dev->hard_start_xmit = &streamer_xmit; | ||
326 | dev->change_mtu = &streamer_change_mtu; | ||
327 | dev->stop = &streamer_close; | ||
328 | #if STREAMER_IOCTL | ||
329 | dev->do_ioctl = &streamer_ioctl; | ||
330 | #else | ||
331 | dev->do_ioctl = NULL; | ||
332 | #endif | ||
333 | dev->set_multicast_list = &streamer_set_rx_mode; | ||
334 | dev->get_stats = &streamer_get_stats; | ||
335 | dev->set_mac_address = &streamer_set_mac_address; | ||
336 | dev->irq = pdev->irq; | 336 | dev->irq = pdev->irq; |
337 | dev->base_addr=pio_start; | 337 | dev->base_addr=pio_start; |
338 | SET_NETDEV_DEV(dev, &pdev->dev); | 338 | SET_NETDEV_DEV(dev, &pdev->dev); |
@@ -937,7 +937,7 @@ static void streamer_rx(struct net_device *dev) | |||
937 | if (skb == NULL) | 937 | if (skb == NULL) |
938 | { | 938 | { |
939 | printk(KERN_WARNING "%s: Not enough memory to copy packet to upper layers. \n", dev->name); | 939 | printk(KERN_WARNING "%s: Not enough memory to copy packet to upper layers. \n", dev->name); |
940 | streamer_priv->streamer_stats.rx_dropped++; | 940 | dev->stats.rx_dropped++; |
941 | } else { /* we allocated an skb OK */ | 941 | } else { /* we allocated an skb OK */ |
942 | if (buffer_cnt == 1) { | 942 | if (buffer_cnt == 1) { |
943 | /* release the DMA mapping */ | 943 | /* release the DMA mapping */ |
@@ -1009,8 +1009,8 @@ static void streamer_rx(struct net_device *dev) | |||
1009 | /* send up to the protocol */ | 1009 | /* send up to the protocol */ |
1010 | netif_rx(skb); | 1010 | netif_rx(skb); |
1011 | } | 1011 | } |
1012 | streamer_priv->streamer_stats.rx_packets++; | 1012 | dev->stats.rx_packets++; |
1013 | streamer_priv->streamer_stats.rx_bytes += length; | 1013 | dev->stats.rx_bytes += length; |
1014 | } /* if skb == null */ | 1014 | } /* if skb == null */ |
1015 | } /* end received without errors */ | 1015 | } /* end received without errors */ |
1016 | 1016 | ||
@@ -1053,8 +1053,8 @@ static irqreturn_t streamer_interrupt(int irq, void *dev_id) | |||
1053 | while(streamer_priv->streamer_tx_ring[(streamer_priv->tx_ring_last_status + 1) & (STREAMER_TX_RING_SIZE - 1)].status) { | 1053 | while(streamer_priv->streamer_tx_ring[(streamer_priv->tx_ring_last_status + 1) & (STREAMER_TX_RING_SIZE - 1)].status) { |
1054 | streamer_priv->tx_ring_last_status = (streamer_priv->tx_ring_last_status + 1) & (STREAMER_TX_RING_SIZE - 1); | 1054 | streamer_priv->tx_ring_last_status = (streamer_priv->tx_ring_last_status + 1) & (STREAMER_TX_RING_SIZE - 1); |
1055 | streamer_priv->free_tx_ring_entries++; | 1055 | streamer_priv->free_tx_ring_entries++; |
1056 | streamer_priv->streamer_stats.tx_bytes += streamer_priv->tx_ring_skb[streamer_priv->tx_ring_last_status]->len; | 1056 | dev->stats.tx_bytes += streamer_priv->tx_ring_skb[streamer_priv->tx_ring_last_status]->len; |
1057 | streamer_priv->streamer_stats.tx_packets++; | 1057 | dev->stats.tx_packets++; |
1058 | dev_kfree_skb_irq(streamer_priv->tx_ring_skb[streamer_priv->tx_ring_last_status]); | 1058 | dev_kfree_skb_irq(streamer_priv->tx_ring_skb[streamer_priv->tx_ring_last_status]); |
1059 | streamer_priv->streamer_tx_ring[streamer_priv->tx_ring_last_status].buffer = 0xdeadbeef; | 1059 | streamer_priv->streamer_tx_ring[streamer_priv->tx_ring_last_status].buffer = 0xdeadbeef; |
1060 | streamer_priv->streamer_tx_ring[streamer_priv->tx_ring_last_status].status = 0; | 1060 | streamer_priv->streamer_tx_ring[streamer_priv->tx_ring_last_status].status = 0; |
@@ -1484,13 +1484,6 @@ static void streamer_srb_bh(struct net_device *dev) | |||
1484 | } /* switch srb[0] */ | 1484 | } /* switch srb[0] */ |
1485 | } | 1485 | } |
1486 | 1486 | ||
1487 | static struct net_device_stats *streamer_get_stats(struct net_device *dev) | ||
1488 | { | ||
1489 | struct streamer_private *streamer_priv; | ||
1490 | streamer_priv = netdev_priv(dev); | ||
1491 | return (struct net_device_stats *) &streamer_priv->streamer_stats; | ||
1492 | } | ||
1493 | |||
1494 | static int streamer_set_mac_address(struct net_device *dev, void *addr) | 1487 | static int streamer_set_mac_address(struct net_device *dev, void *addr) |
1495 | { | 1488 | { |
1496 | struct sockaddr *saddr = addr; | 1489 | struct sockaddr *saddr = addr; |
diff --git a/drivers/net/tokenring/lanstreamer.h b/drivers/net/tokenring/lanstreamer.h index 13ccee6449c1..3c58d6a3fbc9 100644 --- a/drivers/net/tokenring/lanstreamer.h +++ b/drivers/net/tokenring/lanstreamer.h | |||
@@ -299,7 +299,6 @@ struct streamer_private { | |||
299 | int tx_ring_free, tx_ring_last_status, rx_ring_last_received, | 299 | int tx_ring_free, tx_ring_last_status, rx_ring_last_received, |
300 | free_tx_ring_entries; | 300 | free_tx_ring_entries; |
301 | 301 | ||
302 | struct net_device_stats streamer_stats; | ||
303 | __u16 streamer_lan_status; | 302 | __u16 streamer_lan_status; |
304 | __u8 streamer_ring_speed; | 303 | __u8 streamer_ring_speed; |
305 | __u16 pkt_buf_sz; | 304 | __u16 pkt_buf_sz; |
diff --git a/drivers/net/tokenring/olympic.c b/drivers/net/tokenring/olympic.c index ecb5c7c96910..77dc9da4c0b9 100644 --- a/drivers/net/tokenring/olympic.c +++ b/drivers/net/tokenring/olympic.c | |||
@@ -187,7 +187,6 @@ static int olympic_close(struct net_device *dev); | |||
187 | static void olympic_set_rx_mode(struct net_device *dev); | 187 | static void olympic_set_rx_mode(struct net_device *dev); |
188 | static void olympic_freemem(struct net_device *dev) ; | 188 | static void olympic_freemem(struct net_device *dev) ; |
189 | static irqreturn_t olympic_interrupt(int irq, void *dev_id); | 189 | static irqreturn_t olympic_interrupt(int irq, void *dev_id); |
190 | static struct net_device_stats * olympic_get_stats(struct net_device *dev); | ||
191 | static int olympic_set_mac_address(struct net_device *dev, void *addr) ; | 190 | static int olympic_set_mac_address(struct net_device *dev, void *addr) ; |
192 | static void olympic_arb_cmd(struct net_device *dev); | 191 | static void olympic_arb_cmd(struct net_device *dev); |
193 | static int olympic_change_mtu(struct net_device *dev, int mtu); | 192 | static int olympic_change_mtu(struct net_device *dev, int mtu); |
@@ -195,6 +194,15 @@ static void olympic_srb_bh(struct net_device *dev) ; | |||
195 | static void olympic_asb_bh(struct net_device *dev) ; | 194 | static void olympic_asb_bh(struct net_device *dev) ; |
196 | static int olympic_proc_info(char *buffer, char **start, off_t offset, int length, int *eof, void *data) ; | 195 | static int olympic_proc_info(char *buffer, char **start, off_t offset, int length, int *eof, void *data) ; |
197 | 196 | ||
197 | static const struct net_device_ops olympic_netdev_ops = { | ||
198 | .ndo_open = olympic_open, | ||
199 | .ndo_stop = olympic_close, | ||
200 | .ndo_start_xmit = olympic_xmit, | ||
201 | .ndo_change_mtu = olympic_change_mtu, | ||
202 | .ndo_set_multicast_list = olympic_set_rx_mode, | ||
203 | .ndo_set_mac_address = olympic_set_mac_address, | ||
204 | }; | ||
205 | |||
198 | static int __devinit olympic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | 206 | static int __devinit olympic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) |
199 | { | 207 | { |
200 | struct net_device *dev ; | 208 | struct net_device *dev ; |
@@ -253,14 +261,7 @@ static int __devinit olympic_probe(struct pci_dev *pdev, const struct pci_device | |||
253 | goto op_free_iomap; | 261 | goto op_free_iomap; |
254 | } | 262 | } |
255 | 263 | ||
256 | dev->open=&olympic_open; | 264 | dev->netdev_ops = &olympic_netdev_ops; |
257 | dev->hard_start_xmit=&olympic_xmit; | ||
258 | dev->change_mtu=&olympic_change_mtu; | ||
259 | dev->stop=&olympic_close; | ||
260 | dev->do_ioctl=NULL; | ||
261 | dev->set_multicast_list=&olympic_set_rx_mode; | ||
262 | dev->get_stats=&olympic_get_stats ; | ||
263 | dev->set_mac_address=&olympic_set_mac_address ; | ||
264 | SET_NETDEV_DEV(dev, &pdev->dev); | 265 | SET_NETDEV_DEV(dev, &pdev->dev); |
265 | 266 | ||
266 | pci_set_drvdata(pdev,dev) ; | 267 | pci_set_drvdata(pdev,dev) ; |
@@ -785,7 +786,7 @@ static void olympic_rx(struct net_device *dev) | |||
785 | } | 786 | } |
786 | olympic_priv->rx_ring_last_received += i ; | 787 | olympic_priv->rx_ring_last_received += i ; |
787 | olympic_priv->rx_ring_last_received &= (OLYMPIC_RX_RING_SIZE -1) ; | 788 | olympic_priv->rx_ring_last_received &= (OLYMPIC_RX_RING_SIZE -1) ; |
788 | olympic_priv->olympic_stats.rx_errors++; | 789 | dev->stats.rx_errors++; |
789 | } else { | 790 | } else { |
790 | 791 | ||
791 | if (buffer_cnt == 1) { | 792 | if (buffer_cnt == 1) { |
@@ -796,7 +797,7 @@ static void olympic_rx(struct net_device *dev) | |||
796 | 797 | ||
797 | if (skb == NULL) { | 798 | if (skb == NULL) { |
798 | printk(KERN_WARNING "%s: Not enough memory to copy packet to upper layers. \n",dev->name) ; | 799 | printk(KERN_WARNING "%s: Not enough memory to copy packet to upper layers. \n",dev->name) ; |
799 | olympic_priv->olympic_stats.rx_dropped++ ; | 800 | dev->stats.rx_dropped++; |
800 | /* Update counters even though we don't transfer the frame */ | 801 | /* Update counters even though we don't transfer the frame */ |
801 | olympic_priv->rx_ring_last_received += i ; | 802 | olympic_priv->rx_ring_last_received += i ; |
802 | olympic_priv->rx_ring_last_received &= (OLYMPIC_RX_RING_SIZE -1) ; | 803 | olympic_priv->rx_ring_last_received &= (OLYMPIC_RX_RING_SIZE -1) ; |
@@ -862,8 +863,8 @@ static void olympic_rx(struct net_device *dev) | |||
862 | skb->protocol = tr_type_trans(skb,dev); | 863 | skb->protocol = tr_type_trans(skb,dev); |
863 | netif_rx(skb) ; | 864 | netif_rx(skb) ; |
864 | } | 865 | } |
865 | olympic_priv->olympic_stats.rx_packets++ ; | 866 | dev->stats.rx_packets++ ; |
866 | olympic_priv->olympic_stats.rx_bytes += length ; | 867 | dev->stats.rx_bytes += length ; |
867 | } /* if skb == null */ | 868 | } /* if skb == null */ |
868 | } /* If status & 0x3b */ | 869 | } /* If status & 0x3b */ |
869 | 870 | ||
@@ -971,8 +972,8 @@ static irqreturn_t olympic_interrupt(int irq, void *dev_id) | |||
971 | olympic_priv->tx_ring_last_status++; | 972 | olympic_priv->tx_ring_last_status++; |
972 | olympic_priv->tx_ring_last_status &= (OLYMPIC_TX_RING_SIZE-1); | 973 | olympic_priv->tx_ring_last_status &= (OLYMPIC_TX_RING_SIZE-1); |
973 | olympic_priv->free_tx_ring_entries++; | 974 | olympic_priv->free_tx_ring_entries++; |
974 | olympic_priv->olympic_stats.tx_bytes += olympic_priv->tx_ring_skb[olympic_priv->tx_ring_last_status]->len; | 975 | dev->stats.tx_bytes += olympic_priv->tx_ring_skb[olympic_priv->tx_ring_last_status]->len; |
975 | olympic_priv->olympic_stats.tx_packets++ ; | 976 | dev->stats.tx_packets++ ; |
976 | pci_unmap_single(olympic_priv->pdev, | 977 | pci_unmap_single(olympic_priv->pdev, |
977 | le32_to_cpu(olympic_priv->olympic_tx_ring[olympic_priv->tx_ring_last_status].buffer), | 978 | le32_to_cpu(olympic_priv->olympic_tx_ring[olympic_priv->tx_ring_last_status].buffer), |
978 | olympic_priv->tx_ring_skb[olympic_priv->tx_ring_last_status]->len,PCI_DMA_TODEVICE); | 979 | olympic_priv->tx_ring_skb[olympic_priv->tx_ring_last_status]->len,PCI_DMA_TODEVICE); |
@@ -1344,13 +1345,6 @@ static void olympic_srb_bh(struct net_device *dev) | |||
1344 | 1345 | ||
1345 | } | 1346 | } |
1346 | 1347 | ||
1347 | static struct net_device_stats * olympic_get_stats(struct net_device *dev) | ||
1348 | { | ||
1349 | struct olympic_private *olympic_priv ; | ||
1350 | olympic_priv=netdev_priv(dev); | ||
1351 | return (struct net_device_stats *) &olympic_priv->olympic_stats; | ||
1352 | } | ||
1353 | |||
1354 | static int olympic_set_mac_address (struct net_device *dev, void *addr) | 1348 | static int olympic_set_mac_address (struct net_device *dev, void *addr) |
1355 | { | 1349 | { |
1356 | struct sockaddr *saddr = addr ; | 1350 | struct sockaddr *saddr = addr ; |
diff --git a/drivers/net/tokenring/olympic.h b/drivers/net/tokenring/olympic.h index 10fbba08978f..30631bae4c94 100644 --- a/drivers/net/tokenring/olympic.h +++ b/drivers/net/tokenring/olympic.h | |||
@@ -275,7 +275,6 @@ struct olympic_private { | |||
275 | struct sk_buff *tx_ring_skb[OLYMPIC_TX_RING_SIZE], *rx_ring_skb[OLYMPIC_RX_RING_SIZE]; | 275 | struct sk_buff *tx_ring_skb[OLYMPIC_TX_RING_SIZE], *rx_ring_skb[OLYMPIC_RX_RING_SIZE]; |
276 | int tx_ring_free, tx_ring_last_status, rx_ring_last_received,rx_status_last_received, free_tx_ring_entries; | 276 | int tx_ring_free, tx_ring_last_status, rx_ring_last_received,rx_status_last_received, free_tx_ring_entries; |
277 | 277 | ||
278 | struct net_device_stats olympic_stats ; | ||
279 | u16 olympic_lan_status ; | 278 | u16 olympic_lan_status ; |
280 | u8 olympic_ring_speed ; | 279 | u8 olympic_ring_speed ; |
281 | u16 pkt_buf_sz ; | 280 | u16 pkt_buf_sz ; |
diff --git a/drivers/net/tokenring/tms380tr.c b/drivers/net/tokenring/tms380tr.c index 5be34c2fd483..b11bb72dc7ab 100644 --- a/drivers/net/tokenring/tms380tr.c +++ b/drivers/net/tokenring/tms380tr.c | |||
@@ -2330,6 +2330,17 @@ void tmsdev_term(struct net_device *dev) | |||
2330 | DMA_BIDIRECTIONAL); | 2330 | DMA_BIDIRECTIONAL); |
2331 | } | 2331 | } |
2332 | 2332 | ||
2333 | const struct net_device_ops tms380tr_netdev_ops = { | ||
2334 | .ndo_open = tms380tr_open, | ||
2335 | .ndo_stop = tms380tr_close, | ||
2336 | .ndo_start_xmit = tms380tr_send_packet, | ||
2337 | .ndo_tx_timeout = tms380tr_timeout, | ||
2338 | .ndo_get_stats = tms380tr_get_stats, | ||
2339 | .ndo_set_multicast_list = tms380tr_set_multicast_list, | ||
2340 | .ndo_set_mac_address = tms380tr_set_mac_address, | ||
2341 | }; | ||
2342 | EXPORT_SYMBOL(tms380tr_netdev_ops); | ||
2343 | |||
2333 | int tmsdev_init(struct net_device *dev, struct device *pdev) | 2344 | int tmsdev_init(struct net_device *dev, struct device *pdev) |
2334 | { | 2345 | { |
2335 | struct net_local *tms_local; | 2346 | struct net_local *tms_local; |
@@ -2353,16 +2364,8 @@ int tmsdev_init(struct net_device *dev, struct device *pdev) | |||
2353 | return -ENOMEM; | 2364 | return -ENOMEM; |
2354 | } | 2365 | } |
2355 | 2366 | ||
2356 | /* These can be overridden by the card driver if needed */ | 2367 | dev->netdev_ops = &tms380tr_netdev_ops; |
2357 | dev->open = tms380tr_open; | ||
2358 | dev->stop = tms380tr_close; | ||
2359 | dev->do_ioctl = NULL; | ||
2360 | dev->hard_start_xmit = tms380tr_send_packet; | ||
2361 | dev->tx_timeout = tms380tr_timeout; | ||
2362 | dev->watchdog_timeo = HZ; | 2368 | dev->watchdog_timeo = HZ; |
2363 | dev->get_stats = tms380tr_get_stats; | ||
2364 | dev->set_multicast_list = &tms380tr_set_multicast_list; | ||
2365 | dev->set_mac_address = tms380tr_set_mac_address; | ||
2366 | 2369 | ||
2367 | return 0; | 2370 | return 0; |
2368 | } | 2371 | } |
diff --git a/drivers/net/tokenring/tms380tr.h b/drivers/net/tokenring/tms380tr.h index 7af76d708849..60b30ee38dcb 100644 --- a/drivers/net/tokenring/tms380tr.h +++ b/drivers/net/tokenring/tms380tr.h | |||
@@ -14,6 +14,7 @@ | |||
14 | #include <linux/interrupt.h> | 14 | #include <linux/interrupt.h> |
15 | 15 | ||
16 | /* module prototypes */ | 16 | /* module prototypes */ |
17 | extern const struct net_device_ops tms380tr_netdev_ops; | ||
17 | int tms380tr_open(struct net_device *dev); | 18 | int tms380tr_open(struct net_device *dev); |
18 | int tms380tr_close(struct net_device *dev); | 19 | int tms380tr_close(struct net_device *dev); |
19 | irqreturn_t tms380tr_interrupt(int irq, void *dev_id); | 20 | irqreturn_t tms380tr_interrupt(int irq, void *dev_id); |
diff --git a/drivers/net/tokenring/tmspci.c b/drivers/net/tokenring/tmspci.c index 5f601773c260..b397e8785d6d 100644 --- a/drivers/net/tokenring/tmspci.c +++ b/drivers/net/tokenring/tmspci.c | |||
@@ -157,8 +157,8 @@ static int __devinit tms_pci_attach(struct pci_dev *pdev, const struct pci_devic | |||
157 | 157 | ||
158 | tp->tmspriv = cardinfo; | 158 | tp->tmspriv = cardinfo; |
159 | 159 | ||
160 | dev->open = tms380tr_open; | 160 | dev->netdev_ops = &tms380tr_netdev_ops; |
161 | dev->stop = tms380tr_close; | 161 | |
162 | pci_set_drvdata(pdev, dev); | 162 | pci_set_drvdata(pdev, dev); |
163 | SET_NETDEV_DEV(dev, &pdev->dev); | 163 | SET_NETDEV_DEV(dev, &pdev->dev); |
164 | 164 | ||
diff --git a/drivers/net/tsi108_eth.c b/drivers/net/tsi108_eth.c index a9fd2b2ccaf6..bb43e7fb2a50 100644 --- a/drivers/net/tsi108_eth.c +++ b/drivers/net/tsi108_eth.c | |||
@@ -888,7 +888,7 @@ static int tsi108_poll(struct napi_struct *napi, int budget) | |||
888 | 888 | ||
889 | if (num_received < budget) { | 889 | if (num_received < budget) { |
890 | data->rxpending = 0; | 890 | data->rxpending = 0; |
891 | netif_rx_complete(napi); | 891 | napi_complete(napi); |
892 | 892 | ||
893 | TSI_WRITE(TSI108_EC_INTMASK, | 893 | TSI_WRITE(TSI108_EC_INTMASK, |
894 | TSI_READ(TSI108_EC_INTMASK) | 894 | TSI_READ(TSI108_EC_INTMASK) |
@@ -915,11 +915,11 @@ static void tsi108_rx_int(struct net_device *dev) | |||
915 | * | 915 | * |
916 | * This can happen if this code races with tsi108_poll(), which masks | 916 | * This can happen if this code races with tsi108_poll(), which masks |
917 | * the interrupts after tsi108_irq_one() read the mask, but before | 917 | * the interrupts after tsi108_irq_one() read the mask, but before |
918 | * netif_rx_schedule is called. It could also happen due to calls | 918 | * napi_schedule is called. It could also happen due to calls |
919 | * from tsi108_check_rxring(). | 919 | * from tsi108_check_rxring(). |
920 | */ | 920 | */ |
921 | 921 | ||
922 | if (netif_rx_schedule_prep(&data->napi)) { | 922 | if (napi_schedule_prep(&data->napi)) { |
923 | /* Mask, rather than ack, the receive interrupts. The ack | 923 | /* Mask, rather than ack, the receive interrupts. The ack |
924 | * will happen in tsi108_poll(). | 924 | * will happen in tsi108_poll(). |
925 | */ | 925 | */ |
@@ -930,7 +930,7 @@ static void tsi108_rx_int(struct net_device *dev) | |||
930 | | TSI108_INT_RXTHRESH | | 930 | | TSI108_INT_RXTHRESH | |
931 | TSI108_INT_RXOVERRUN | TSI108_INT_RXERROR | | 931 | TSI108_INT_RXOVERRUN | TSI108_INT_RXERROR | |
932 | TSI108_INT_RXWAIT); | 932 | TSI108_INT_RXWAIT); |
933 | __netif_rx_schedule(&data->napi); | 933 | __napi_schedule(&data->napi); |
934 | } else { | 934 | } else { |
935 | if (!netif_running(dev)) { | 935 | if (!netif_running(dev)) { |
936 | /* This can happen if an interrupt occurs while the | 936 | /* This can happen if an interrupt occurs while the |
diff --git a/drivers/net/tulip/interrupt.c b/drivers/net/tulip/interrupt.c index 6c3428a37c0b..9f946d421088 100644 --- a/drivers/net/tulip/interrupt.c +++ b/drivers/net/tulip/interrupt.c | |||
@@ -103,7 +103,7 @@ void oom_timer(unsigned long data) | |||
103 | { | 103 | { |
104 | struct net_device *dev = (struct net_device *)data; | 104 | struct net_device *dev = (struct net_device *)data; |
105 | struct tulip_private *tp = netdev_priv(dev); | 105 | struct tulip_private *tp = netdev_priv(dev); |
106 | netif_rx_schedule(&tp->napi); | 106 | napi_schedule(&tp->napi); |
107 | } | 107 | } |
108 | 108 | ||
109 | int tulip_poll(struct napi_struct *napi, int budget) | 109 | int tulip_poll(struct napi_struct *napi, int budget) |
@@ -300,7 +300,7 @@ int tulip_poll(struct napi_struct *napi, int budget) | |||
300 | 300 | ||
301 | /* Remove us from polling list and enable RX intr. */ | 301 | /* Remove us from polling list and enable RX intr. */ |
302 | 302 | ||
303 | netif_rx_complete(napi); | 303 | napi_complete(napi); |
304 | iowrite32(tulip_tbl[tp->chip_id].valid_intrs, tp->base_addr+CSR7); | 304 | iowrite32(tulip_tbl[tp->chip_id].valid_intrs, tp->base_addr+CSR7); |
305 | 305 | ||
306 | /* The last op happens after poll completion. Which means the following: | 306 | /* The last op happens after poll completion. Which means the following: |
@@ -333,10 +333,10 @@ int tulip_poll(struct napi_struct *napi, int budget) | |||
333 | 333 | ||
334 | /* Think: timer_pending() was an explicit signature of bug. | 334 | /* Think: timer_pending() was an explicit signature of bug. |
335 | * Timer can be pending now but fired and completed | 335 | * Timer can be pending now but fired and completed |
336 | * before we did netif_rx_complete(). See? We would lose it. */ | 336 | * before we did napi_complete(). See? We would lose it. */ |
337 | 337 | ||
338 | /* remove ourselves from the polling list */ | 338 | /* remove ourselves from the polling list */ |
339 | netif_rx_complete(napi); | 339 | napi_complete(napi); |
340 | 340 | ||
341 | return work_done; | 341 | return work_done; |
342 | } | 342 | } |
@@ -519,7 +519,7 @@ irqreturn_t tulip_interrupt(int irq, void *dev_instance) | |||
519 | rxd++; | 519 | rxd++; |
520 | /* Mask RX intrs and add the device to poll list. */ | 520 | /* Mask RX intrs and add the device to poll list. */ |
521 | iowrite32(tulip_tbl[tp->chip_id].valid_intrs&~RxPollInt, ioaddr + CSR7); | 521 | iowrite32(tulip_tbl[tp->chip_id].valid_intrs&~RxPollInt, ioaddr + CSR7); |
522 | netif_rx_schedule(&tp->napi); | 522 | napi_schedule(&tp->napi); |
523 | 523 | ||
524 | if (!(csr5&~(AbnormalIntr|NormalIntr|RxPollInt|TPLnkPass))) | 524 | if (!(csr5&~(AbnormalIntr|NormalIntr|RxPollInt|TPLnkPass))) |
525 | break; | 525 | break; |
diff --git a/drivers/net/tun.c b/drivers/net/tun.c index d7b81e4fdd56..15d67635bb10 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c | |||
@@ -63,6 +63,7 @@ | |||
63 | #include <linux/virtio_net.h> | 63 | #include <linux/virtio_net.h> |
64 | #include <net/net_namespace.h> | 64 | #include <net/net_namespace.h> |
65 | #include <net/netns/generic.h> | 65 | #include <net/netns/generic.h> |
66 | #include <net/rtnetlink.h> | ||
66 | 67 | ||
67 | #include <asm/system.h> | 68 | #include <asm/system.h> |
68 | #include <asm/uaccess.h> | 69 | #include <asm/uaccess.h> |
@@ -87,14 +88,19 @@ struct tap_filter { | |||
87 | unsigned char addr[FLT_EXACT_COUNT][ETH_ALEN]; | 88 | unsigned char addr[FLT_EXACT_COUNT][ETH_ALEN]; |
88 | }; | 89 | }; |
89 | 90 | ||
91 | struct tun_file { | ||
92 | atomic_t count; | ||
93 | struct tun_struct *tun; | ||
94 | struct net *net; | ||
95 | wait_queue_head_t read_wait; | ||
96 | }; | ||
97 | |||
90 | struct tun_struct { | 98 | struct tun_struct { |
91 | struct list_head list; | 99 | struct tun_file *tfile; |
92 | unsigned int flags; | 100 | unsigned int flags; |
93 | int attached; | ||
94 | uid_t owner; | 101 | uid_t owner; |
95 | gid_t group; | 102 | gid_t group; |
96 | 103 | ||
97 | wait_queue_head_t read_wait; | ||
98 | struct sk_buff_head readq; | 104 | struct sk_buff_head readq; |
99 | 105 | ||
100 | struct net_device *dev; | 106 | struct net_device *dev; |
@@ -107,6 +113,88 @@ struct tun_struct { | |||
107 | #endif | 113 | #endif |
108 | }; | 114 | }; |
109 | 115 | ||
116 | static int tun_attach(struct tun_struct *tun, struct file *file) | ||
117 | { | ||
118 | struct tun_file *tfile = file->private_data; | ||
119 | const struct cred *cred = current_cred(); | ||
120 | int err; | ||
121 | |||
122 | ASSERT_RTNL(); | ||
123 | |||
124 | /* Check permissions */ | ||
125 | if (((tun->owner != -1 && cred->euid != tun->owner) || | ||
126 | (tun->group != -1 && !in_egroup_p(tun->group))) && | ||
127 | !capable(CAP_NET_ADMIN)) | ||
128 | return -EPERM; | ||
129 | |||
130 | netif_tx_lock_bh(tun->dev); | ||
131 | |||
132 | err = -EINVAL; | ||
133 | if (tfile->tun) | ||
134 | goto out; | ||
135 | |||
136 | err = -EBUSY; | ||
137 | if (tun->tfile) | ||
138 | goto out; | ||
139 | |||
140 | err = 0; | ||
141 | tfile->tun = tun; | ||
142 | tun->tfile = tfile; | ||
143 | dev_hold(tun->dev); | ||
144 | atomic_inc(&tfile->count); | ||
145 | |||
146 | out: | ||
147 | netif_tx_unlock_bh(tun->dev); | ||
148 | return err; | ||
149 | } | ||
150 | |||
151 | static void __tun_detach(struct tun_struct *tun) | ||
152 | { | ||
153 | struct tun_file *tfile = tun->tfile; | ||
154 | |||
155 | /* Detach from net device */ | ||
156 | netif_tx_lock_bh(tun->dev); | ||
157 | tfile->tun = NULL; | ||
158 | tun->tfile = NULL; | ||
159 | netif_tx_unlock_bh(tun->dev); | ||
160 | |||
161 | /* Drop read queue */ | ||
162 | skb_queue_purge(&tun->readq); | ||
163 | |||
164 | /* Drop the extra count on the net device */ | ||
165 | dev_put(tun->dev); | ||
166 | } | ||
167 | |||
168 | static void tun_detach(struct tun_struct *tun) | ||
169 | { | ||
170 | rtnl_lock(); | ||
171 | __tun_detach(tun); | ||
172 | rtnl_unlock(); | ||
173 | } | ||
174 | |||
175 | static struct tun_struct *__tun_get(struct tun_file *tfile) | ||
176 | { | ||
177 | struct tun_struct *tun = NULL; | ||
178 | |||
179 | if (atomic_inc_not_zero(&tfile->count)) | ||
180 | tun = tfile->tun; | ||
181 | |||
182 | return tun; | ||
183 | } | ||
184 | |||
185 | static struct tun_struct *tun_get(struct file *file) | ||
186 | { | ||
187 | return __tun_get(file->private_data); | ||
188 | } | ||
189 | |||
190 | static void tun_put(struct tun_struct *tun) | ||
191 | { | ||
192 | struct tun_file *tfile = tun->tfile; | ||
193 | |||
194 | if (atomic_dec_and_test(&tfile->count)) | ||
195 | tun_detach(tfile->tun); | ||
196 | } | ||
197 | |||
110 | /* TAP filterting */ | 198 | /* TAP filterting */ |
111 | static void addr_hash_set(u32 *mask, const u8 *addr) | 199 | static void addr_hash_set(u32 *mask, const u8 *addr) |
112 | { | 200 | { |
@@ -213,13 +301,23 @@ static int check_filter(struct tap_filter *filter, const struct sk_buff *skb) | |||
213 | 301 | ||
214 | /* Network device part of the driver */ | 302 | /* Network device part of the driver */ |
215 | 303 | ||
216 | static int tun_net_id; | ||
217 | struct tun_net { | ||
218 | struct list_head dev_list; | ||
219 | }; | ||
220 | |||
221 | static const struct ethtool_ops tun_ethtool_ops; | 304 | static const struct ethtool_ops tun_ethtool_ops; |
222 | 305 | ||
306 | /* Net device detach from fd. */ | ||
307 | static void tun_net_uninit(struct net_device *dev) | ||
308 | { | ||
309 | struct tun_struct *tun = netdev_priv(dev); | ||
310 | struct tun_file *tfile = tun->tfile; | ||
311 | |||
312 | /* Inform the methods they need to stop using the dev. | ||
313 | */ | ||
314 | if (tfile) { | ||
315 | wake_up_all(&tfile->read_wait); | ||
316 | if (atomic_dec_and_test(&tfile->count)) | ||
317 | __tun_detach(tun); | ||
318 | } | ||
319 | } | ||
320 | |||
223 | /* Net device open. */ | 321 | /* Net device open. */ |
224 | static int tun_net_open(struct net_device *dev) | 322 | static int tun_net_open(struct net_device *dev) |
225 | { | 323 | { |
@@ -242,7 +340,7 @@ static int tun_net_xmit(struct sk_buff *skb, struct net_device *dev) | |||
242 | DBG(KERN_INFO "%s: tun_net_xmit %d\n", tun->dev->name, skb->len); | 340 | DBG(KERN_INFO "%s: tun_net_xmit %d\n", tun->dev->name, skb->len); |
243 | 341 | ||
244 | /* Drop packet if interface is not attached */ | 342 | /* Drop packet if interface is not attached */ |
245 | if (!tun->attached) | 343 | if (!tun->tfile) |
246 | goto drop; | 344 | goto drop; |
247 | 345 | ||
248 | /* Drop if the filter does not like it. | 346 | /* Drop if the filter does not like it. |
@@ -274,7 +372,7 @@ static int tun_net_xmit(struct sk_buff *skb, struct net_device *dev) | |||
274 | /* Notify and wake up reader process */ | 372 | /* Notify and wake up reader process */ |
275 | if (tun->flags & TUN_FASYNC) | 373 | if (tun->flags & TUN_FASYNC) |
276 | kill_fasync(&tun->fasync, SIGIO, POLL_IN); | 374 | kill_fasync(&tun->fasync, SIGIO, POLL_IN); |
277 | wake_up_interruptible(&tun->read_wait); | 375 | wake_up_interruptible(&tun->tfile->read_wait); |
278 | return 0; | 376 | return 0; |
279 | 377 | ||
280 | drop: | 378 | drop: |
@@ -306,6 +404,7 @@ tun_net_change_mtu(struct net_device *dev, int new_mtu) | |||
306 | } | 404 | } |
307 | 405 | ||
308 | static const struct net_device_ops tun_netdev_ops = { | 406 | static const struct net_device_ops tun_netdev_ops = { |
407 | .ndo_uninit = tun_net_uninit, | ||
309 | .ndo_open = tun_net_open, | 408 | .ndo_open = tun_net_open, |
310 | .ndo_stop = tun_net_close, | 409 | .ndo_stop = tun_net_close, |
311 | .ndo_start_xmit = tun_net_xmit, | 410 | .ndo_start_xmit = tun_net_xmit, |
@@ -313,6 +412,7 @@ static const struct net_device_ops tun_netdev_ops = { | |||
313 | }; | 412 | }; |
314 | 413 | ||
315 | static const struct net_device_ops tap_netdev_ops = { | 414 | static const struct net_device_ops tap_netdev_ops = { |
415 | .ndo_uninit = tun_net_uninit, | ||
316 | .ndo_open = tun_net_open, | 416 | .ndo_open = tun_net_open, |
317 | .ndo_stop = tun_net_close, | 417 | .ndo_stop = tun_net_close, |
318 | .ndo_start_xmit = tun_net_xmit, | 418 | .ndo_start_xmit = tun_net_xmit, |
@@ -359,19 +459,24 @@ static void tun_net_init(struct net_device *dev) | |||
359 | /* Poll */ | 459 | /* Poll */ |
360 | static unsigned int tun_chr_poll(struct file *file, poll_table * wait) | 460 | static unsigned int tun_chr_poll(struct file *file, poll_table * wait) |
361 | { | 461 | { |
362 | struct tun_struct *tun = file->private_data; | 462 | struct tun_file *tfile = file->private_data; |
463 | struct tun_struct *tun = __tun_get(tfile); | ||
363 | unsigned int mask = POLLOUT | POLLWRNORM; | 464 | unsigned int mask = POLLOUT | POLLWRNORM; |
364 | 465 | ||
365 | if (!tun) | 466 | if (!tun) |
366 | return -EBADFD; | 467 | return POLLERR; |
367 | 468 | ||
368 | DBG(KERN_INFO "%s: tun_chr_poll\n", tun->dev->name); | 469 | DBG(KERN_INFO "%s: tun_chr_poll\n", tun->dev->name); |
369 | 470 | ||
370 | poll_wait(file, &tun->read_wait, wait); | 471 | poll_wait(file, &tfile->read_wait, wait); |
371 | 472 | ||
372 | if (!skb_queue_empty(&tun->readq)) | 473 | if (!skb_queue_empty(&tun->readq)) |
373 | mask |= POLLIN | POLLRDNORM; | 474 | mask |= POLLIN | POLLRDNORM; |
374 | 475 | ||
476 | if (tun->dev->reg_state != NETREG_REGISTERED) | ||
477 | mask = POLLERR; | ||
478 | |||
479 | tun_put(tun); | ||
375 | return mask; | 480 | return mask; |
376 | } | 481 | } |
377 | 482 | ||
@@ -438,7 +543,7 @@ static struct sk_buff *tun_alloc_skb(size_t prepad, size_t len, size_t linear, | |||
438 | /* Get packet from user space buffer */ | 543 | /* Get packet from user space buffer */ |
439 | static __inline__ ssize_t tun_get_user(struct tun_struct *tun, struct iovec *iv, size_t count) | 544 | static __inline__ ssize_t tun_get_user(struct tun_struct *tun, struct iovec *iv, size_t count) |
440 | { | 545 | { |
441 | struct tun_pi pi = { 0, __constant_htons(ETH_P_IP) }; | 546 | struct tun_pi pi = { 0, cpu_to_be16(ETH_P_IP) }; |
442 | struct sk_buff *skb; | 547 | struct sk_buff *skb; |
443 | size_t len = count, align = 0; | 548 | size_t len = count, align = 0; |
444 | struct virtio_net_hdr gso = { 0 }; | 549 | struct virtio_net_hdr gso = { 0 }; |
@@ -556,14 +661,18 @@ static __inline__ ssize_t tun_get_user(struct tun_struct *tun, struct iovec *iv, | |||
556 | static ssize_t tun_chr_aio_write(struct kiocb *iocb, const struct iovec *iv, | 661 | static ssize_t tun_chr_aio_write(struct kiocb *iocb, const struct iovec *iv, |
557 | unsigned long count, loff_t pos) | 662 | unsigned long count, loff_t pos) |
558 | { | 663 | { |
559 | struct tun_struct *tun = iocb->ki_filp->private_data; | 664 | struct tun_struct *tun = tun_get(iocb->ki_filp); |
665 | ssize_t result; | ||
560 | 666 | ||
561 | if (!tun) | 667 | if (!tun) |
562 | return -EBADFD; | 668 | return -EBADFD; |
563 | 669 | ||
564 | DBG(KERN_INFO "%s: tun_chr_write %ld\n", tun->dev->name, count); | 670 | DBG(KERN_INFO "%s: tun_chr_write %ld\n", tun->dev->name, count); |
565 | 671 | ||
566 | return tun_get_user(tun, (struct iovec *) iv, iov_length(iv, count)); | 672 | result = tun_get_user(tun, (struct iovec *) iv, iov_length(iv, count)); |
673 | |||
674 | tun_put(tun); | ||
675 | return result; | ||
567 | } | 676 | } |
568 | 677 | ||
569 | /* Put packet to the user space buffer */ | 678 | /* Put packet to the user space buffer */ |
@@ -636,7 +745,8 @@ static ssize_t tun_chr_aio_read(struct kiocb *iocb, const struct iovec *iv, | |||
636 | unsigned long count, loff_t pos) | 745 | unsigned long count, loff_t pos) |
637 | { | 746 | { |
638 | struct file *file = iocb->ki_filp; | 747 | struct file *file = iocb->ki_filp; |
639 | struct tun_struct *tun = file->private_data; | 748 | struct tun_file *tfile = file->private_data; |
749 | struct tun_struct *tun = __tun_get(tfile); | ||
640 | DECLARE_WAITQUEUE(wait, current); | 750 | DECLARE_WAITQUEUE(wait, current); |
641 | struct sk_buff *skb; | 751 | struct sk_buff *skb; |
642 | ssize_t len, ret = 0; | 752 | ssize_t len, ret = 0; |
@@ -647,10 +757,12 @@ static ssize_t tun_chr_aio_read(struct kiocb *iocb, const struct iovec *iv, | |||
647 | DBG(KERN_INFO "%s: tun_chr_read\n", tun->dev->name); | 757 | DBG(KERN_INFO "%s: tun_chr_read\n", tun->dev->name); |
648 | 758 | ||
649 | len = iov_length(iv, count); | 759 | len = iov_length(iv, count); |
650 | if (len < 0) | 760 | if (len < 0) { |
651 | return -EINVAL; | 761 | ret = -EINVAL; |
762 | goto out; | ||
763 | } | ||
652 | 764 | ||
653 | add_wait_queue(&tun->read_wait, &wait); | 765 | add_wait_queue(&tfile->read_wait, &wait); |
654 | while (len) { | 766 | while (len) { |
655 | current->state = TASK_INTERRUPTIBLE; | 767 | current->state = TASK_INTERRUPTIBLE; |
656 | 768 | ||
@@ -664,6 +776,10 @@ static ssize_t tun_chr_aio_read(struct kiocb *iocb, const struct iovec *iv, | |||
664 | ret = -ERESTARTSYS; | 776 | ret = -ERESTARTSYS; |
665 | break; | 777 | break; |
666 | } | 778 | } |
779 | if (tun->dev->reg_state != NETREG_REGISTERED) { | ||
780 | ret = -EIO; | ||
781 | break; | ||
782 | } | ||
667 | 783 | ||
668 | /* Nothing to read, let's sleep */ | 784 | /* Nothing to read, let's sleep */ |
669 | schedule(); | 785 | schedule(); |
@@ -677,8 +793,10 @@ static ssize_t tun_chr_aio_read(struct kiocb *iocb, const struct iovec *iv, | |||
677 | } | 793 | } |
678 | 794 | ||
679 | current->state = TASK_RUNNING; | 795 | current->state = TASK_RUNNING; |
680 | remove_wait_queue(&tun->read_wait, &wait); | 796 | remove_wait_queue(&tfile->read_wait, &wait); |
681 | 797 | ||
798 | out: | ||
799 | tun_put(tun); | ||
682 | return ret; | 800 | return ret; |
683 | } | 801 | } |
684 | 802 | ||
@@ -687,54 +805,49 @@ static void tun_setup(struct net_device *dev) | |||
687 | struct tun_struct *tun = netdev_priv(dev); | 805 | struct tun_struct *tun = netdev_priv(dev); |
688 | 806 | ||
689 | skb_queue_head_init(&tun->readq); | 807 | skb_queue_head_init(&tun->readq); |
690 | init_waitqueue_head(&tun->read_wait); | ||
691 | 808 | ||
692 | tun->owner = -1; | 809 | tun->owner = -1; |
693 | tun->group = -1; | 810 | tun->group = -1; |
694 | 811 | ||
695 | dev->ethtool_ops = &tun_ethtool_ops; | 812 | dev->ethtool_ops = &tun_ethtool_ops; |
696 | dev->destructor = free_netdev; | 813 | dev->destructor = free_netdev; |
697 | dev->features |= NETIF_F_NETNS_LOCAL; | ||
698 | } | 814 | } |
699 | 815 | ||
700 | static struct tun_struct *tun_get_by_name(struct tun_net *tn, const char *name) | 816 | /* Trivial set of netlink ops to allow deleting tun or tap |
817 | * device with netlink. | ||
818 | */ | ||
819 | static int tun_validate(struct nlattr *tb[], struct nlattr *data[]) | ||
701 | { | 820 | { |
702 | struct tun_struct *tun; | 821 | return -EINVAL; |
822 | } | ||
703 | 823 | ||
704 | ASSERT_RTNL(); | 824 | static struct rtnl_link_ops tun_link_ops __read_mostly = { |
705 | list_for_each_entry(tun, &tn->dev_list, list) { | 825 | .kind = DRV_NAME, |
706 | if (!strncmp(tun->dev->name, name, IFNAMSIZ)) | 826 | .priv_size = sizeof(struct tun_struct), |
707 | return tun; | 827 | .setup = tun_setup, |
708 | } | 828 | .validate = tun_validate, |
829 | }; | ||
709 | 830 | ||
710 | return NULL; | ||
711 | } | ||
712 | 831 | ||
713 | static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr) | 832 | static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr) |
714 | { | 833 | { |
715 | struct tun_net *tn; | ||
716 | struct tun_struct *tun; | 834 | struct tun_struct *tun; |
717 | struct net_device *dev; | 835 | struct net_device *dev; |
718 | const struct cred *cred = current_cred(); | ||
719 | int err; | 836 | int err; |
720 | 837 | ||
721 | tn = net_generic(net, tun_net_id); | 838 | dev = __dev_get_by_name(net, ifr->ifr_name); |
722 | tun = tun_get_by_name(tn, ifr->ifr_name); | 839 | if (dev) { |
723 | if (tun) { | 840 | if ((ifr->ifr_flags & IFF_TUN) && dev->netdev_ops == &tun_netdev_ops) |
724 | if (tun->attached) | 841 | tun = netdev_priv(dev); |
725 | return -EBUSY; | 842 | else if ((ifr->ifr_flags & IFF_TAP) && dev->netdev_ops == &tap_netdev_ops) |
726 | 843 | tun = netdev_priv(dev); | |
727 | /* Check permissions */ | 844 | else |
728 | if (((tun->owner != -1 && | 845 | return -EINVAL; |
729 | cred->euid != tun->owner) || | 846 | |
730 | (tun->group != -1 && | 847 | err = tun_attach(tun, file); |
731 | cred->egid != tun->group)) && | 848 | if (err < 0) |
732 | !capable(CAP_NET_ADMIN)) { | 849 | return err; |
733 | return -EPERM; | ||
734 | } | ||
735 | } | 850 | } |
736 | else if (__dev_get_by_name(net, ifr->ifr_name)) | ||
737 | return -EINVAL; | ||
738 | else { | 851 | else { |
739 | char *name; | 852 | char *name; |
740 | unsigned long flags = 0; | 853 | unsigned long flags = 0; |
@@ -765,6 +878,7 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr) | |||
765 | return -ENOMEM; | 878 | return -ENOMEM; |
766 | 879 | ||
767 | dev_net_set(dev, net); | 880 | dev_net_set(dev, net); |
881 | dev->rtnl_link_ops = &tun_link_ops; | ||
768 | 882 | ||
769 | tun = netdev_priv(dev); | 883 | tun = netdev_priv(dev); |
770 | tun->dev = dev; | 884 | tun->dev = dev; |
@@ -783,7 +897,9 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr) | |||
783 | if (err < 0) | 897 | if (err < 0) |
784 | goto err_free_dev; | 898 | goto err_free_dev; |
785 | 899 | ||
786 | list_add(&tun->list, &tn->dev_list); | 900 | err = tun_attach(tun, file); |
901 | if (err < 0) | ||
902 | goto err_free_dev; | ||
787 | } | 903 | } |
788 | 904 | ||
789 | DBG(KERN_INFO "%s: tun_set_iff\n", tun->dev->name); | 905 | DBG(KERN_INFO "%s: tun_set_iff\n", tun->dev->name); |
@@ -803,10 +919,6 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr) | |||
803 | else | 919 | else |
804 | tun->flags &= ~TUN_VNET_HDR; | 920 | tun->flags &= ~TUN_VNET_HDR; |
805 | 921 | ||
806 | file->private_data = tun; | ||
807 | tun->attached = 1; | ||
808 | get_net(dev_net(tun->dev)); | ||
809 | |||
810 | /* Make sure persistent devices do not get stuck in | 922 | /* Make sure persistent devices do not get stuck in |
811 | * xoff state. | 923 | * xoff state. |
812 | */ | 924 | */ |
@@ -824,7 +936,7 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr) | |||
824 | 936 | ||
825 | static int tun_get_iff(struct net *net, struct file *file, struct ifreq *ifr) | 937 | static int tun_get_iff(struct net *net, struct file *file, struct ifreq *ifr) |
826 | { | 938 | { |
827 | struct tun_struct *tun = file->private_data; | 939 | struct tun_struct *tun = tun_get(file); |
828 | 940 | ||
829 | if (!tun) | 941 | if (!tun) |
830 | return -EBADFD; | 942 | return -EBADFD; |
@@ -849,6 +961,7 @@ static int tun_get_iff(struct net *net, struct file *file, struct ifreq *ifr) | |||
849 | if (tun->flags & TUN_VNET_HDR) | 961 | if (tun->flags & TUN_VNET_HDR) |
850 | ifr->ifr_flags |= IFF_VNET_HDR; | 962 | ifr->ifr_flags |= IFF_VNET_HDR; |
851 | 963 | ||
964 | tun_put(tun); | ||
852 | return 0; | 965 | return 0; |
853 | } | 966 | } |
854 | 967 | ||
@@ -895,7 +1008,8 @@ static int set_offload(struct net_device *dev, unsigned long arg) | |||
895 | static int tun_chr_ioctl(struct inode *inode, struct file *file, | 1008 | static int tun_chr_ioctl(struct inode *inode, struct file *file, |
896 | unsigned int cmd, unsigned long arg) | 1009 | unsigned int cmd, unsigned long arg) |
897 | { | 1010 | { |
898 | struct tun_struct *tun = file->private_data; | 1011 | struct tun_file *tfile = file->private_data; |
1012 | struct tun_struct *tun; | ||
899 | void __user* argp = (void __user*)arg; | 1013 | void __user* argp = (void __user*)arg; |
900 | struct ifreq ifr; | 1014 | struct ifreq ifr; |
901 | int ret; | 1015 | int ret; |
@@ -904,13 +1018,23 @@ static int tun_chr_ioctl(struct inode *inode, struct file *file, | |||
904 | if (copy_from_user(&ifr, argp, sizeof ifr)) | 1018 | if (copy_from_user(&ifr, argp, sizeof ifr)) |
905 | return -EFAULT; | 1019 | return -EFAULT; |
906 | 1020 | ||
1021 | if (cmd == TUNGETFEATURES) { | ||
1022 | /* Currently this just means: "what IFF flags are valid?". | ||
1023 | * This is needed because we never checked for invalid flags on | ||
1024 | * TUNSETIFF. */ | ||
1025 | return put_user(IFF_TUN | IFF_TAP | IFF_NO_PI | IFF_ONE_QUEUE | | ||
1026 | IFF_VNET_HDR, | ||
1027 | (unsigned int __user*)argp); | ||
1028 | } | ||
1029 | |||
1030 | tun = __tun_get(tfile); | ||
907 | if (cmd == TUNSETIFF && !tun) { | 1031 | if (cmd == TUNSETIFF && !tun) { |
908 | int err; | 1032 | int err; |
909 | 1033 | ||
910 | ifr.ifr_name[IFNAMSIZ-1] = '\0'; | 1034 | ifr.ifr_name[IFNAMSIZ-1] = '\0'; |
911 | 1035 | ||
912 | rtnl_lock(); | 1036 | rtnl_lock(); |
913 | err = tun_set_iff(current->nsproxy->net_ns, file, &ifr); | 1037 | err = tun_set_iff(tfile->net, file, &ifr); |
914 | rtnl_unlock(); | 1038 | rtnl_unlock(); |
915 | 1039 | ||
916 | if (err) | 1040 | if (err) |
@@ -921,28 +1045,21 @@ static int tun_chr_ioctl(struct inode *inode, struct file *file, | |||
921 | return 0; | 1045 | return 0; |
922 | } | 1046 | } |
923 | 1047 | ||
924 | if (cmd == TUNGETFEATURES) { | ||
925 | /* Currently this just means: "what IFF flags are valid?". | ||
926 | * This is needed because we never checked for invalid flags on | ||
927 | * TUNSETIFF. */ | ||
928 | return put_user(IFF_TUN | IFF_TAP | IFF_NO_PI | IFF_ONE_QUEUE | | ||
929 | IFF_VNET_HDR, | ||
930 | (unsigned int __user*)argp); | ||
931 | } | ||
932 | 1048 | ||
933 | if (!tun) | 1049 | if (!tun) |
934 | return -EBADFD; | 1050 | return -EBADFD; |
935 | 1051 | ||
936 | DBG(KERN_INFO "%s: tun_chr_ioctl cmd %d\n", tun->dev->name, cmd); | 1052 | DBG(KERN_INFO "%s: tun_chr_ioctl cmd %d\n", tun->dev->name, cmd); |
937 | 1053 | ||
1054 | ret = 0; | ||
938 | switch (cmd) { | 1055 | switch (cmd) { |
939 | case TUNGETIFF: | 1056 | case TUNGETIFF: |
940 | ret = tun_get_iff(current->nsproxy->net_ns, file, &ifr); | 1057 | ret = tun_get_iff(current->nsproxy->net_ns, file, &ifr); |
941 | if (ret) | 1058 | if (ret) |
942 | return ret; | 1059 | break; |
943 | 1060 | ||
944 | if (copy_to_user(argp, &ifr, sizeof(ifr))) | 1061 | if (copy_to_user(argp, &ifr, sizeof(ifr))) |
945 | return -EFAULT; | 1062 | ret = -EFAULT; |
946 | break; | 1063 | break; |
947 | 1064 | ||
948 | case TUNSETNOCSUM: | 1065 | case TUNSETNOCSUM: |
@@ -994,7 +1111,7 @@ static int tun_chr_ioctl(struct inode *inode, struct file *file, | |||
994 | ret = 0; | 1111 | ret = 0; |
995 | } | 1112 | } |
996 | rtnl_unlock(); | 1113 | rtnl_unlock(); |
997 | return ret; | 1114 | break; |
998 | 1115 | ||
999 | #ifdef TUN_DEBUG | 1116 | #ifdef TUN_DEBUG |
1000 | case TUNSETDEBUG: | 1117 | case TUNSETDEBUG: |
@@ -1005,24 +1122,25 @@ static int tun_chr_ioctl(struct inode *inode, struct file *file, | |||
1005 | rtnl_lock(); | 1122 | rtnl_lock(); |
1006 | ret = set_offload(tun->dev, arg); | 1123 | ret = set_offload(tun->dev, arg); |
1007 | rtnl_unlock(); | 1124 | rtnl_unlock(); |
1008 | return ret; | 1125 | break; |
1009 | 1126 | ||
1010 | case TUNSETTXFILTER: | 1127 | case TUNSETTXFILTER: |
1011 | /* Can be set only for TAPs */ | 1128 | /* Can be set only for TAPs */ |
1129 | ret = -EINVAL; | ||
1012 | if ((tun->flags & TUN_TYPE_MASK) != TUN_TAP_DEV) | 1130 | if ((tun->flags & TUN_TYPE_MASK) != TUN_TAP_DEV) |
1013 | return -EINVAL; | 1131 | break; |
1014 | rtnl_lock(); | 1132 | rtnl_lock(); |
1015 | ret = update_filter(&tun->txflt, (void __user *)arg); | 1133 | ret = update_filter(&tun->txflt, (void __user *)arg); |
1016 | rtnl_unlock(); | 1134 | rtnl_unlock(); |
1017 | return ret; | 1135 | break; |
1018 | 1136 | ||
1019 | case SIOCGIFHWADDR: | 1137 | case SIOCGIFHWADDR: |
1020 | /* Get hw addres */ | 1138 | /* Get hw addres */ |
1021 | memcpy(ifr.ifr_hwaddr.sa_data, tun->dev->dev_addr, ETH_ALEN); | 1139 | memcpy(ifr.ifr_hwaddr.sa_data, tun->dev->dev_addr, ETH_ALEN); |
1022 | ifr.ifr_hwaddr.sa_family = tun->dev->type; | 1140 | ifr.ifr_hwaddr.sa_family = tun->dev->type; |
1023 | if (copy_to_user(argp, &ifr, sizeof ifr)) | 1141 | if (copy_to_user(argp, &ifr, sizeof ifr)) |
1024 | return -EFAULT; | 1142 | ret = -EFAULT; |
1025 | return 0; | 1143 | break; |
1026 | 1144 | ||
1027 | case SIOCSIFHWADDR: | 1145 | case SIOCSIFHWADDR: |
1028 | /* Set hw address */ | 1146 | /* Set hw address */ |
@@ -1032,18 +1150,19 @@ static int tun_chr_ioctl(struct inode *inode, struct file *file, | |||
1032 | rtnl_lock(); | 1150 | rtnl_lock(); |
1033 | ret = dev_set_mac_address(tun->dev, &ifr.ifr_hwaddr); | 1151 | ret = dev_set_mac_address(tun->dev, &ifr.ifr_hwaddr); |
1034 | rtnl_unlock(); | 1152 | rtnl_unlock(); |
1035 | return ret; | 1153 | break; |
1036 | |||
1037 | default: | 1154 | default: |
1038 | return -EINVAL; | 1155 | ret = -EINVAL; |
1156 | break; | ||
1039 | }; | 1157 | }; |
1040 | 1158 | ||
1041 | return 0; | 1159 | tun_put(tun); |
1160 | return ret; | ||
1042 | } | 1161 | } |
1043 | 1162 | ||
1044 | static int tun_chr_fasync(int fd, struct file *file, int on) | 1163 | static int tun_chr_fasync(int fd, struct file *file, int on) |
1045 | { | 1164 | { |
1046 | struct tun_struct *tun = file->private_data; | 1165 | struct tun_struct *tun = tun_get(file); |
1047 | int ret; | 1166 | int ret; |
1048 | 1167 | ||
1049 | if (!tun) | 1168 | if (!tun) |
@@ -1065,42 +1184,48 @@ static int tun_chr_fasync(int fd, struct file *file, int on) | |||
1065 | ret = 0; | 1184 | ret = 0; |
1066 | out: | 1185 | out: |
1067 | unlock_kernel(); | 1186 | unlock_kernel(); |
1187 | tun_put(tun); | ||
1068 | return ret; | 1188 | return ret; |
1069 | } | 1189 | } |
1070 | 1190 | ||
1071 | static int tun_chr_open(struct inode *inode, struct file * file) | 1191 | static int tun_chr_open(struct inode *inode, struct file * file) |
1072 | { | 1192 | { |
1193 | struct tun_file *tfile; | ||
1073 | cycle_kernel_lock(); | 1194 | cycle_kernel_lock(); |
1074 | DBG1(KERN_INFO "tunX: tun_chr_open\n"); | 1195 | DBG1(KERN_INFO "tunX: tun_chr_open\n"); |
1075 | file->private_data = NULL; | 1196 | |
1197 | tfile = kmalloc(sizeof(*tfile), GFP_KERNEL); | ||
1198 | if (!tfile) | ||
1199 | return -ENOMEM; | ||
1200 | atomic_set(&tfile->count, 0); | ||
1201 | tfile->tun = NULL; | ||
1202 | tfile->net = get_net(current->nsproxy->net_ns); | ||
1203 | init_waitqueue_head(&tfile->read_wait); | ||
1204 | file->private_data = tfile; | ||
1076 | return 0; | 1205 | return 0; |
1077 | } | 1206 | } |
1078 | 1207 | ||
1079 | static int tun_chr_close(struct inode *inode, struct file *file) | 1208 | static int tun_chr_close(struct inode *inode, struct file *file) |
1080 | { | 1209 | { |
1081 | struct tun_struct *tun = file->private_data; | 1210 | struct tun_file *tfile = file->private_data; |
1211 | struct tun_struct *tun = __tun_get(tfile); | ||
1082 | 1212 | ||
1083 | if (!tun) | ||
1084 | return 0; | ||
1085 | |||
1086 | DBG(KERN_INFO "%s: tun_chr_close\n", tun->dev->name); | ||
1087 | 1213 | ||
1088 | rtnl_lock(); | 1214 | if (tun) { |
1215 | DBG(KERN_INFO "%s: tun_chr_close\n", tun->dev->name); | ||
1089 | 1216 | ||
1090 | /* Detach from net device */ | 1217 | rtnl_lock(); |
1091 | file->private_data = NULL; | 1218 | __tun_detach(tun); |
1092 | tun->attached = 0; | ||
1093 | put_net(dev_net(tun->dev)); | ||
1094 | 1219 | ||
1095 | /* Drop read queue */ | 1220 | /* If desireable, unregister the netdevice. */ |
1096 | skb_queue_purge(&tun->readq); | 1221 | if (!(tun->flags & TUN_PERSIST)) |
1222 | unregister_netdevice(tun->dev); | ||
1097 | 1223 | ||
1098 | if (!(tun->flags & TUN_PERSIST)) { | 1224 | rtnl_unlock(); |
1099 | list_del(&tun->list); | ||
1100 | unregister_netdevice(tun->dev); | ||
1101 | } | 1225 | } |
1102 | 1226 | ||
1103 | rtnl_unlock(); | 1227 | put_net(tfile->net); |
1228 | kfree(tfile); | ||
1104 | 1229 | ||
1105 | return 0; | 1230 | return 0; |
1106 | } | 1231 | } |
@@ -1181,7 +1306,7 @@ static void tun_set_msglevel(struct net_device *dev, u32 value) | |||
1181 | static u32 tun_get_link(struct net_device *dev) | 1306 | static u32 tun_get_link(struct net_device *dev) |
1182 | { | 1307 | { |
1183 | struct tun_struct *tun = netdev_priv(dev); | 1308 | struct tun_struct *tun = netdev_priv(dev); |
1184 | return tun->attached; | 1309 | return !!tun->tfile; |
1185 | } | 1310 | } |
1186 | 1311 | ||
1187 | static u32 tun_get_rx_csum(struct net_device *dev) | 1312 | static u32 tun_get_rx_csum(struct net_device *dev) |
@@ -1210,45 +1335,6 @@ static const struct ethtool_ops tun_ethtool_ops = { | |||
1210 | .set_rx_csum = tun_set_rx_csum | 1335 | .set_rx_csum = tun_set_rx_csum |
1211 | }; | 1336 | }; |
1212 | 1337 | ||
1213 | static int tun_init_net(struct net *net) | ||
1214 | { | ||
1215 | struct tun_net *tn; | ||
1216 | |||
1217 | tn = kmalloc(sizeof(*tn), GFP_KERNEL); | ||
1218 | if (tn == NULL) | ||
1219 | return -ENOMEM; | ||
1220 | |||
1221 | INIT_LIST_HEAD(&tn->dev_list); | ||
1222 | |||
1223 | if (net_assign_generic(net, tun_net_id, tn)) { | ||
1224 | kfree(tn); | ||
1225 | return -ENOMEM; | ||
1226 | } | ||
1227 | |||
1228 | return 0; | ||
1229 | } | ||
1230 | |||
1231 | static void tun_exit_net(struct net *net) | ||
1232 | { | ||
1233 | struct tun_net *tn; | ||
1234 | struct tun_struct *tun, *nxt; | ||
1235 | |||
1236 | tn = net_generic(net, tun_net_id); | ||
1237 | |||
1238 | rtnl_lock(); | ||
1239 | list_for_each_entry_safe(tun, nxt, &tn->dev_list, list) { | ||
1240 | DBG(KERN_INFO "%s cleaned up\n", tun->dev->name); | ||
1241 | unregister_netdevice(tun->dev); | ||
1242 | } | ||
1243 | rtnl_unlock(); | ||
1244 | |||
1245 | kfree(tn); | ||
1246 | } | ||
1247 | |||
1248 | static struct pernet_operations tun_net_ops = { | ||
1249 | .init = tun_init_net, | ||
1250 | .exit = tun_exit_net, | ||
1251 | }; | ||
1252 | 1338 | ||
1253 | static int __init tun_init(void) | 1339 | static int __init tun_init(void) |
1254 | { | 1340 | { |
@@ -1257,10 +1343,10 @@ static int __init tun_init(void) | |||
1257 | printk(KERN_INFO "tun: %s, %s\n", DRV_DESCRIPTION, DRV_VERSION); | 1343 | printk(KERN_INFO "tun: %s, %s\n", DRV_DESCRIPTION, DRV_VERSION); |
1258 | printk(KERN_INFO "tun: %s\n", DRV_COPYRIGHT); | 1344 | printk(KERN_INFO "tun: %s\n", DRV_COPYRIGHT); |
1259 | 1345 | ||
1260 | ret = register_pernet_gen_device(&tun_net_id, &tun_net_ops); | 1346 | ret = rtnl_link_register(&tun_link_ops); |
1261 | if (ret) { | 1347 | if (ret) { |
1262 | printk(KERN_ERR "tun: Can't register pernet ops\n"); | 1348 | printk(KERN_ERR "tun: Can't register link_ops\n"); |
1263 | goto err_pernet; | 1349 | goto err_linkops; |
1264 | } | 1350 | } |
1265 | 1351 | ||
1266 | ret = misc_register(&tun_miscdev); | 1352 | ret = misc_register(&tun_miscdev); |
@@ -1268,18 +1354,17 @@ static int __init tun_init(void) | |||
1268 | printk(KERN_ERR "tun: Can't register misc device %d\n", TUN_MINOR); | 1354 | printk(KERN_ERR "tun: Can't register misc device %d\n", TUN_MINOR); |
1269 | goto err_misc; | 1355 | goto err_misc; |
1270 | } | 1356 | } |
1271 | return 0; | 1357 | return 0; |
1272 | |||
1273 | err_misc: | 1358 | err_misc: |
1274 | unregister_pernet_gen_device(tun_net_id, &tun_net_ops); | 1359 | rtnl_link_unregister(&tun_link_ops); |
1275 | err_pernet: | 1360 | err_linkops: |
1276 | return ret; | 1361 | return ret; |
1277 | } | 1362 | } |
1278 | 1363 | ||
1279 | static void tun_cleanup(void) | 1364 | static void tun_cleanup(void) |
1280 | { | 1365 | { |
1281 | misc_deregister(&tun_miscdev); | 1366 | misc_deregister(&tun_miscdev); |
1282 | unregister_pernet_gen_device(tun_net_id, &tun_net_ops); | 1367 | rtnl_link_unregister(&tun_link_ops); |
1283 | } | 1368 | } |
1284 | 1369 | ||
1285 | module_init(tun_init); | 1370 | module_init(tun_init); |
diff --git a/drivers/net/typhoon.c b/drivers/net/typhoon.c index 3af9a9516ccb..a8e5651f3165 100644 --- a/drivers/net/typhoon.c +++ b/drivers/net/typhoon.c | |||
@@ -1783,7 +1783,7 @@ typhoon_poll(struct napi_struct *napi, int budget) | |||
1783 | } | 1783 | } |
1784 | 1784 | ||
1785 | if (work_done < budget) { | 1785 | if (work_done < budget) { |
1786 | netif_rx_complete(napi); | 1786 | napi_complete(napi); |
1787 | iowrite32(TYPHOON_INTR_NONE, | 1787 | iowrite32(TYPHOON_INTR_NONE, |
1788 | tp->ioaddr + TYPHOON_REG_INTR_MASK); | 1788 | tp->ioaddr + TYPHOON_REG_INTR_MASK); |
1789 | typhoon_post_pci_writes(tp->ioaddr); | 1789 | typhoon_post_pci_writes(tp->ioaddr); |
@@ -1806,10 +1806,10 @@ typhoon_interrupt(int irq, void *dev_instance) | |||
1806 | 1806 | ||
1807 | iowrite32(intr_status, ioaddr + TYPHOON_REG_INTR_STATUS); | 1807 | iowrite32(intr_status, ioaddr + TYPHOON_REG_INTR_STATUS); |
1808 | 1808 | ||
1809 | if (netif_rx_schedule_prep(&tp->napi)) { | 1809 | if (napi_schedule_prep(&tp->napi)) { |
1810 | iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK); | 1810 | iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK); |
1811 | typhoon_post_pci_writes(ioaddr); | 1811 | typhoon_post_pci_writes(ioaddr); |
1812 | __netif_rx_schedule(&tp->napi); | 1812 | __napi_schedule(&tp->napi); |
1813 | } else { | 1813 | } else { |
1814 | printk(KERN_ERR "%s: Error, poll already scheduled\n", | 1814 | printk(KERN_ERR "%s: Error, poll already scheduled\n", |
1815 | dev->name); | 1815 | dev->name); |
@@ -1944,7 +1944,7 @@ typhoon_start_runtime(struct typhoon *tp) | |||
1944 | goto error_out; | 1944 | goto error_out; |
1945 | 1945 | ||
1946 | INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_VLAN_TYPE_WRITE); | 1946 | INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_VLAN_TYPE_WRITE); |
1947 | xp_cmd.parm1 = __constant_cpu_to_le16(ETH_P_8021Q); | 1947 | xp_cmd.parm1 = cpu_to_le16(ETH_P_8021Q); |
1948 | err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL); | 1948 | err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL); |
1949 | if(err < 0) | 1949 | if(err < 0) |
1950 | goto error_out; | 1950 | goto error_out; |
diff --git a/drivers/net/typhoon.h b/drivers/net/typhoon.h index dd7022ca7354..673fd5125914 100644 --- a/drivers/net/typhoon.h +++ b/drivers/net/typhoon.h | |||
@@ -174,18 +174,18 @@ struct tx_desc { | |||
174 | u64 tx_addr; /* opaque for hardware, for TX_DESC */ | 174 | u64 tx_addr; /* opaque for hardware, for TX_DESC */ |
175 | }; | 175 | }; |
176 | __le32 processFlags; | 176 | __le32 processFlags; |
177 | #define TYPHOON_TX_PF_NO_CRC __constant_cpu_to_le32(0x00000001) | 177 | #define TYPHOON_TX_PF_NO_CRC cpu_to_le32(0x00000001) |
178 | #define TYPHOON_TX_PF_IP_CHKSUM __constant_cpu_to_le32(0x00000002) | 178 | #define TYPHOON_TX_PF_IP_CHKSUM cpu_to_le32(0x00000002) |
179 | #define TYPHOON_TX_PF_TCP_CHKSUM __constant_cpu_to_le32(0x00000004) | 179 | #define TYPHOON_TX_PF_TCP_CHKSUM cpu_to_le32(0x00000004) |
180 | #define TYPHOON_TX_PF_TCP_SEGMENT __constant_cpu_to_le32(0x00000008) | 180 | #define TYPHOON_TX_PF_TCP_SEGMENT cpu_to_le32(0x00000008) |
181 | #define TYPHOON_TX_PF_INSERT_VLAN __constant_cpu_to_le32(0x00000010) | 181 | #define TYPHOON_TX_PF_INSERT_VLAN cpu_to_le32(0x00000010) |
182 | #define TYPHOON_TX_PF_IPSEC __constant_cpu_to_le32(0x00000020) | 182 | #define TYPHOON_TX_PF_IPSEC cpu_to_le32(0x00000020) |
183 | #define TYPHOON_TX_PF_VLAN_PRIORITY __constant_cpu_to_le32(0x00000040) | 183 | #define TYPHOON_TX_PF_VLAN_PRIORITY cpu_to_le32(0x00000040) |
184 | #define TYPHOON_TX_PF_UDP_CHKSUM __constant_cpu_to_le32(0x00000080) | 184 | #define TYPHOON_TX_PF_UDP_CHKSUM cpu_to_le32(0x00000080) |
185 | #define TYPHOON_TX_PF_PAD_FRAME __constant_cpu_to_le32(0x00000100) | 185 | #define TYPHOON_TX_PF_PAD_FRAME cpu_to_le32(0x00000100) |
186 | #define TYPHOON_TX_PF_RESERVED __constant_cpu_to_le32(0x00000e00) | 186 | #define TYPHOON_TX_PF_RESERVED cpu_to_le32(0x00000e00) |
187 | #define TYPHOON_TX_PF_VLAN_MASK __constant_cpu_to_le32(0x0ffff000) | 187 | #define TYPHOON_TX_PF_VLAN_MASK cpu_to_le32(0x0ffff000) |
188 | #define TYPHOON_TX_PF_INTERNAL __constant_cpu_to_le32(0xf0000000) | 188 | #define TYPHOON_TX_PF_INTERNAL cpu_to_le32(0xf0000000) |
189 | #define TYPHOON_TX_PF_VLAN_TAG_SHIFT 12 | 189 | #define TYPHOON_TX_PF_VLAN_TAG_SHIFT 12 |
190 | } __attribute__ ((packed)); | 190 | } __attribute__ ((packed)); |
191 | 191 | ||
@@ -203,8 +203,8 @@ struct tcpopt_desc { | |||
203 | u8 flags; | 203 | u8 flags; |
204 | u8 numDesc; | 204 | u8 numDesc; |
205 | __le16 mss_flags; | 205 | __le16 mss_flags; |
206 | #define TYPHOON_TSO_FIRST __constant_cpu_to_le16(0x1000) | 206 | #define TYPHOON_TSO_FIRST cpu_to_le16(0x1000) |
207 | #define TYPHOON_TSO_LAST __constant_cpu_to_le16(0x2000) | 207 | #define TYPHOON_TSO_LAST cpu_to_le16(0x2000) |
208 | __le32 respAddrLo; | 208 | __le32 respAddrLo; |
209 | __le32 bytesTx; | 209 | __le32 bytesTx; |
210 | __le32 status; | 210 | __le32 status; |
@@ -222,8 +222,8 @@ struct ipsec_desc { | |||
222 | u8 flags; | 222 | u8 flags; |
223 | u8 numDesc; | 223 | u8 numDesc; |
224 | __le16 ipsecFlags; | 224 | __le16 ipsecFlags; |
225 | #define TYPHOON_IPSEC_GEN_IV __constant_cpu_to_le16(0x0000) | 225 | #define TYPHOON_IPSEC_GEN_IV cpu_to_le16(0x0000) |
226 | #define TYPHOON_IPSEC_USE_IV __constant_cpu_to_le16(0x0001) | 226 | #define TYPHOON_IPSEC_USE_IV cpu_to_le16(0x0001) |
227 | __le32 sa1; | 227 | __le32 sa1; |
228 | __le32 sa2; | 228 | __le32 sa2; |
229 | __le32 reserved; | 229 | __le32 reserved; |
@@ -248,41 +248,41 @@ struct rx_desc { | |||
248 | u32 addr; /* opaque, comes from virtAddr */ | 248 | u32 addr; /* opaque, comes from virtAddr */ |
249 | u32 addrHi; /* opaque, comes from virtAddrHi */ | 249 | u32 addrHi; /* opaque, comes from virtAddrHi */ |
250 | __le32 rxStatus; | 250 | __le32 rxStatus; |
251 | #define TYPHOON_RX_ERR_INTERNAL __constant_cpu_to_le32(0x00000000) | 251 | #define TYPHOON_RX_ERR_INTERNAL cpu_to_le32(0x00000000) |
252 | #define TYPHOON_RX_ERR_FIFO_UNDERRUN __constant_cpu_to_le32(0x00000001) | 252 | #define TYPHOON_RX_ERR_FIFO_UNDERRUN cpu_to_le32(0x00000001) |
253 | #define TYPHOON_RX_ERR_BAD_SSD __constant_cpu_to_le32(0x00000002) | 253 | #define TYPHOON_RX_ERR_BAD_SSD cpu_to_le32(0x00000002) |
254 | #define TYPHOON_RX_ERR_RUNT __constant_cpu_to_le32(0x00000003) | 254 | #define TYPHOON_RX_ERR_RUNT cpu_to_le32(0x00000003) |
255 | #define TYPHOON_RX_ERR_CRC __constant_cpu_to_le32(0x00000004) | 255 | #define TYPHOON_RX_ERR_CRC cpu_to_le32(0x00000004) |
256 | #define TYPHOON_RX_ERR_OVERSIZE __constant_cpu_to_le32(0x00000005) | 256 | #define TYPHOON_RX_ERR_OVERSIZE cpu_to_le32(0x00000005) |
257 | #define TYPHOON_RX_ERR_ALIGN __constant_cpu_to_le32(0x00000006) | 257 | #define TYPHOON_RX_ERR_ALIGN cpu_to_le32(0x00000006) |
258 | #define TYPHOON_RX_ERR_DRIBBLE __constant_cpu_to_le32(0x00000007) | 258 | #define TYPHOON_RX_ERR_DRIBBLE cpu_to_le32(0x00000007) |
259 | #define TYPHOON_RX_PROTO_MASK __constant_cpu_to_le32(0x00000003) | 259 | #define TYPHOON_RX_PROTO_MASK cpu_to_le32(0x00000003) |
260 | #define TYPHOON_RX_PROTO_UNKNOWN __constant_cpu_to_le32(0x00000000) | 260 | #define TYPHOON_RX_PROTO_UNKNOWN cpu_to_le32(0x00000000) |
261 | #define TYPHOON_RX_PROTO_IP __constant_cpu_to_le32(0x00000001) | 261 | #define TYPHOON_RX_PROTO_IP cpu_to_le32(0x00000001) |
262 | #define TYPHOON_RX_PROTO_IPX __constant_cpu_to_le32(0x00000002) | 262 | #define TYPHOON_RX_PROTO_IPX cpu_to_le32(0x00000002) |
263 | #define TYPHOON_RX_VLAN __constant_cpu_to_le32(0x00000004) | 263 | #define TYPHOON_RX_VLAN cpu_to_le32(0x00000004) |
264 | #define TYPHOON_RX_IP_FRAG __constant_cpu_to_le32(0x00000008) | 264 | #define TYPHOON_RX_IP_FRAG cpu_to_le32(0x00000008) |
265 | #define TYPHOON_RX_IPSEC __constant_cpu_to_le32(0x00000010) | 265 | #define TYPHOON_RX_IPSEC cpu_to_le32(0x00000010) |
266 | #define TYPHOON_RX_IP_CHK_FAIL __constant_cpu_to_le32(0x00000020) | 266 | #define TYPHOON_RX_IP_CHK_FAIL cpu_to_le32(0x00000020) |
267 | #define TYPHOON_RX_TCP_CHK_FAIL __constant_cpu_to_le32(0x00000040) | 267 | #define TYPHOON_RX_TCP_CHK_FAIL cpu_to_le32(0x00000040) |
268 | #define TYPHOON_RX_UDP_CHK_FAIL __constant_cpu_to_le32(0x00000080) | 268 | #define TYPHOON_RX_UDP_CHK_FAIL cpu_to_le32(0x00000080) |
269 | #define TYPHOON_RX_IP_CHK_GOOD __constant_cpu_to_le32(0x00000100) | 269 | #define TYPHOON_RX_IP_CHK_GOOD cpu_to_le32(0x00000100) |
270 | #define TYPHOON_RX_TCP_CHK_GOOD __constant_cpu_to_le32(0x00000200) | 270 | #define TYPHOON_RX_TCP_CHK_GOOD cpu_to_le32(0x00000200) |
271 | #define TYPHOON_RX_UDP_CHK_GOOD __constant_cpu_to_le32(0x00000400) | 271 | #define TYPHOON_RX_UDP_CHK_GOOD cpu_to_le32(0x00000400) |
272 | __le16 filterResults; | 272 | __le16 filterResults; |
273 | #define TYPHOON_RX_FILTER_MASK __constant_cpu_to_le16(0x7fff) | 273 | #define TYPHOON_RX_FILTER_MASK cpu_to_le16(0x7fff) |
274 | #define TYPHOON_RX_FILTERED __constant_cpu_to_le16(0x8000) | 274 | #define TYPHOON_RX_FILTERED cpu_to_le16(0x8000) |
275 | __le16 ipsecResults; | 275 | __le16 ipsecResults; |
276 | #define TYPHOON_RX_OUTER_AH_GOOD __constant_cpu_to_le16(0x0001) | 276 | #define TYPHOON_RX_OUTER_AH_GOOD cpu_to_le16(0x0001) |
277 | #define TYPHOON_RX_OUTER_ESP_GOOD __constant_cpu_to_le16(0x0002) | 277 | #define TYPHOON_RX_OUTER_ESP_GOOD cpu_to_le16(0x0002) |
278 | #define TYPHOON_RX_INNER_AH_GOOD __constant_cpu_to_le16(0x0004) | 278 | #define TYPHOON_RX_INNER_AH_GOOD cpu_to_le16(0x0004) |
279 | #define TYPHOON_RX_INNER_ESP_GOOD __constant_cpu_to_le16(0x0008) | 279 | #define TYPHOON_RX_INNER_ESP_GOOD cpu_to_le16(0x0008) |
280 | #define TYPHOON_RX_OUTER_AH_FAIL __constant_cpu_to_le16(0x0010) | 280 | #define TYPHOON_RX_OUTER_AH_FAIL cpu_to_le16(0x0010) |
281 | #define TYPHOON_RX_OUTER_ESP_FAIL __constant_cpu_to_le16(0x0020) | 281 | #define TYPHOON_RX_OUTER_ESP_FAIL cpu_to_le16(0x0020) |
282 | #define TYPHOON_RX_INNER_AH_FAIL __constant_cpu_to_le16(0x0040) | 282 | #define TYPHOON_RX_INNER_AH_FAIL cpu_to_le16(0x0040) |
283 | #define TYPHOON_RX_INNER_ESP_FAIL __constant_cpu_to_le16(0x0080) | 283 | #define TYPHOON_RX_INNER_ESP_FAIL cpu_to_le16(0x0080) |
284 | #define TYPHOON_RX_UNKNOWN_SA __constant_cpu_to_le16(0x0100) | 284 | #define TYPHOON_RX_UNKNOWN_SA cpu_to_le16(0x0100) |
285 | #define TYPHOON_RX_ESP_FORMAT_ERR __constant_cpu_to_le16(0x0200) | 285 | #define TYPHOON_RX_ESP_FORMAT_ERR cpu_to_le16(0x0200) |
286 | __be32 vlanTag; | 286 | __be32 vlanTag; |
287 | } __attribute__ ((packed)); | 287 | } __attribute__ ((packed)); |
288 | 288 | ||
@@ -318,31 +318,31 @@ struct cmd_desc { | |||
318 | u8 flags; | 318 | u8 flags; |
319 | u8 numDesc; | 319 | u8 numDesc; |
320 | __le16 cmd; | 320 | __le16 cmd; |
321 | #define TYPHOON_CMD_TX_ENABLE __constant_cpu_to_le16(0x0001) | 321 | #define TYPHOON_CMD_TX_ENABLE cpu_to_le16(0x0001) |
322 | #define TYPHOON_CMD_TX_DISABLE __constant_cpu_to_le16(0x0002) | 322 | #define TYPHOON_CMD_TX_DISABLE cpu_to_le16(0x0002) |
323 | #define TYPHOON_CMD_RX_ENABLE __constant_cpu_to_le16(0x0003) | 323 | #define TYPHOON_CMD_RX_ENABLE cpu_to_le16(0x0003) |
324 | #define TYPHOON_CMD_RX_DISABLE __constant_cpu_to_le16(0x0004) | 324 | #define TYPHOON_CMD_RX_DISABLE cpu_to_le16(0x0004) |
325 | #define TYPHOON_CMD_SET_RX_FILTER __constant_cpu_to_le16(0x0005) | 325 | #define TYPHOON_CMD_SET_RX_FILTER cpu_to_le16(0x0005) |
326 | #define TYPHOON_CMD_READ_STATS __constant_cpu_to_le16(0x0007) | 326 | #define TYPHOON_CMD_READ_STATS cpu_to_le16(0x0007) |
327 | #define TYPHOON_CMD_XCVR_SELECT __constant_cpu_to_le16(0x0013) | 327 | #define TYPHOON_CMD_XCVR_SELECT cpu_to_le16(0x0013) |
328 | #define TYPHOON_CMD_SET_MAX_PKT_SIZE __constant_cpu_to_le16(0x001a) | 328 | #define TYPHOON_CMD_SET_MAX_PKT_SIZE cpu_to_le16(0x001a) |
329 | #define TYPHOON_CMD_READ_MEDIA_STATUS __constant_cpu_to_le16(0x001b) | 329 | #define TYPHOON_CMD_READ_MEDIA_STATUS cpu_to_le16(0x001b) |
330 | #define TYPHOON_CMD_GOTO_SLEEP __constant_cpu_to_le16(0x0023) | 330 | #define TYPHOON_CMD_GOTO_SLEEP cpu_to_le16(0x0023) |
331 | #define TYPHOON_CMD_SET_MULTICAST_HASH __constant_cpu_to_le16(0x0025) | 331 | #define TYPHOON_CMD_SET_MULTICAST_HASH cpu_to_le16(0x0025) |
332 | #define TYPHOON_CMD_SET_MAC_ADDRESS __constant_cpu_to_le16(0x0026) | 332 | #define TYPHOON_CMD_SET_MAC_ADDRESS cpu_to_le16(0x0026) |
333 | #define TYPHOON_CMD_READ_MAC_ADDRESS __constant_cpu_to_le16(0x0027) | 333 | #define TYPHOON_CMD_READ_MAC_ADDRESS cpu_to_le16(0x0027) |
334 | #define TYPHOON_CMD_VLAN_TYPE_WRITE __constant_cpu_to_le16(0x002b) | 334 | #define TYPHOON_CMD_VLAN_TYPE_WRITE cpu_to_le16(0x002b) |
335 | #define TYPHOON_CMD_CREATE_SA __constant_cpu_to_le16(0x0034) | 335 | #define TYPHOON_CMD_CREATE_SA cpu_to_le16(0x0034) |
336 | #define TYPHOON_CMD_DELETE_SA __constant_cpu_to_le16(0x0035) | 336 | #define TYPHOON_CMD_DELETE_SA cpu_to_le16(0x0035) |
337 | #define TYPHOON_CMD_READ_VERSIONS __constant_cpu_to_le16(0x0043) | 337 | #define TYPHOON_CMD_READ_VERSIONS cpu_to_le16(0x0043) |
338 | #define TYPHOON_CMD_IRQ_COALESCE_CTRL __constant_cpu_to_le16(0x0045) | 338 | #define TYPHOON_CMD_IRQ_COALESCE_CTRL cpu_to_le16(0x0045) |
339 | #define TYPHOON_CMD_ENABLE_WAKE_EVENTS __constant_cpu_to_le16(0x0049) | 339 | #define TYPHOON_CMD_ENABLE_WAKE_EVENTS cpu_to_le16(0x0049) |
340 | #define TYPHOON_CMD_SET_OFFLOAD_TASKS __constant_cpu_to_le16(0x004f) | 340 | #define TYPHOON_CMD_SET_OFFLOAD_TASKS cpu_to_le16(0x004f) |
341 | #define TYPHOON_CMD_HELLO_RESP __constant_cpu_to_le16(0x0057) | 341 | #define TYPHOON_CMD_HELLO_RESP cpu_to_le16(0x0057) |
342 | #define TYPHOON_CMD_HALT __constant_cpu_to_le16(0x005d) | 342 | #define TYPHOON_CMD_HALT cpu_to_le16(0x005d) |
343 | #define TYPHOON_CMD_READ_IPSEC_INFO __constant_cpu_to_le16(0x005e) | 343 | #define TYPHOON_CMD_READ_IPSEC_INFO cpu_to_le16(0x005e) |
344 | #define TYPHOON_CMD_GET_IPSEC_ENABLE __constant_cpu_to_le16(0x0067) | 344 | #define TYPHOON_CMD_GET_IPSEC_ENABLE cpu_to_le16(0x0067) |
345 | #define TYPHOON_CMD_GET_CMD_LVL __constant_cpu_to_le16(0x0069) | 345 | #define TYPHOON_CMD_GET_CMD_LVL cpu_to_le16(0x0069) |
346 | u16 seqNo; | 346 | u16 seqNo; |
347 | __le16 parm1; | 347 | __le16 parm1; |
348 | __le32 parm2; | 348 | __le32 parm2; |
@@ -380,11 +380,11 @@ struct resp_desc { | |||
380 | 380 | ||
381 | /* TYPHOON_CMD_SET_RX_FILTER filter bits (cmd.parm1) | 381 | /* TYPHOON_CMD_SET_RX_FILTER filter bits (cmd.parm1) |
382 | */ | 382 | */ |
383 | #define TYPHOON_RX_FILTER_DIRECTED __constant_cpu_to_le16(0x0001) | 383 | #define TYPHOON_RX_FILTER_DIRECTED cpu_to_le16(0x0001) |
384 | #define TYPHOON_RX_FILTER_ALL_MCAST __constant_cpu_to_le16(0x0002) | 384 | #define TYPHOON_RX_FILTER_ALL_MCAST cpu_to_le16(0x0002) |
385 | #define TYPHOON_RX_FILTER_BROADCAST __constant_cpu_to_le16(0x0004) | 385 | #define TYPHOON_RX_FILTER_BROADCAST cpu_to_le16(0x0004) |
386 | #define TYPHOON_RX_FILTER_PROMISCOUS __constant_cpu_to_le16(0x0008) | 386 | #define TYPHOON_RX_FILTER_PROMISCOUS cpu_to_le16(0x0008) |
387 | #define TYPHOON_RX_FILTER_MCAST_HASH __constant_cpu_to_le16(0x0010) | 387 | #define TYPHOON_RX_FILTER_MCAST_HASH cpu_to_le16(0x0010) |
388 | 388 | ||
389 | /* TYPHOON_CMD_READ_STATS response format | 389 | /* TYPHOON_CMD_READ_STATS response format |
390 | */ | 390 | */ |
@@ -416,40 +416,40 @@ struct stats_resp { | |||
416 | __le32 rxOverflow; | 416 | __le32 rxOverflow; |
417 | __le32 rxFiltered; | 417 | __le32 rxFiltered; |
418 | __le32 linkStatus; | 418 | __le32 linkStatus; |
419 | #define TYPHOON_LINK_STAT_MASK __constant_cpu_to_le32(0x00000001) | 419 | #define TYPHOON_LINK_STAT_MASK cpu_to_le32(0x00000001) |
420 | #define TYPHOON_LINK_GOOD __constant_cpu_to_le32(0x00000001) | 420 | #define TYPHOON_LINK_GOOD cpu_to_le32(0x00000001) |
421 | #define TYPHOON_LINK_BAD __constant_cpu_to_le32(0x00000000) | 421 | #define TYPHOON_LINK_BAD cpu_to_le32(0x00000000) |
422 | #define TYPHOON_LINK_SPEED_MASK __constant_cpu_to_le32(0x00000002) | 422 | #define TYPHOON_LINK_SPEED_MASK cpu_to_le32(0x00000002) |
423 | #define TYPHOON_LINK_100MBPS __constant_cpu_to_le32(0x00000002) | 423 | #define TYPHOON_LINK_100MBPS cpu_to_le32(0x00000002) |
424 | #define TYPHOON_LINK_10MBPS __constant_cpu_to_le32(0x00000000) | 424 | #define TYPHOON_LINK_10MBPS cpu_to_le32(0x00000000) |
425 | #define TYPHOON_LINK_DUPLEX_MASK __constant_cpu_to_le32(0x00000004) | 425 | #define TYPHOON_LINK_DUPLEX_MASK cpu_to_le32(0x00000004) |
426 | #define TYPHOON_LINK_FULL_DUPLEX __constant_cpu_to_le32(0x00000004) | 426 | #define TYPHOON_LINK_FULL_DUPLEX cpu_to_le32(0x00000004) |
427 | #define TYPHOON_LINK_HALF_DUPLEX __constant_cpu_to_le32(0x00000000) | 427 | #define TYPHOON_LINK_HALF_DUPLEX cpu_to_le32(0x00000000) |
428 | __le32 unused2; | 428 | __le32 unused2; |
429 | __le32 unused3; | 429 | __le32 unused3; |
430 | } __attribute__ ((packed)); | 430 | } __attribute__ ((packed)); |
431 | 431 | ||
432 | /* TYPHOON_CMD_XCVR_SELECT xcvr values (resp.parm1) | 432 | /* TYPHOON_CMD_XCVR_SELECT xcvr values (resp.parm1) |
433 | */ | 433 | */ |
434 | #define TYPHOON_XCVR_10HALF __constant_cpu_to_le16(0x0000) | 434 | #define TYPHOON_XCVR_10HALF cpu_to_le16(0x0000) |
435 | #define TYPHOON_XCVR_10FULL __constant_cpu_to_le16(0x0001) | 435 | #define TYPHOON_XCVR_10FULL cpu_to_le16(0x0001) |
436 | #define TYPHOON_XCVR_100HALF __constant_cpu_to_le16(0x0002) | 436 | #define TYPHOON_XCVR_100HALF cpu_to_le16(0x0002) |
437 | #define TYPHOON_XCVR_100FULL __constant_cpu_to_le16(0x0003) | 437 | #define TYPHOON_XCVR_100FULL cpu_to_le16(0x0003) |
438 | #define TYPHOON_XCVR_AUTONEG __constant_cpu_to_le16(0x0004) | 438 | #define TYPHOON_XCVR_AUTONEG cpu_to_le16(0x0004) |
439 | 439 | ||
440 | /* TYPHOON_CMD_READ_MEDIA_STATUS (resp.parm1) | 440 | /* TYPHOON_CMD_READ_MEDIA_STATUS (resp.parm1) |
441 | */ | 441 | */ |
442 | #define TYPHOON_MEDIA_STAT_CRC_STRIP_DISABLE __constant_cpu_to_le16(0x0004) | 442 | #define TYPHOON_MEDIA_STAT_CRC_STRIP_DISABLE cpu_to_le16(0x0004) |
443 | #define TYPHOON_MEDIA_STAT_COLLISION_DETECT __constant_cpu_to_le16(0x0010) | 443 | #define TYPHOON_MEDIA_STAT_COLLISION_DETECT cpu_to_le16(0x0010) |
444 | #define TYPHOON_MEDIA_STAT_CARRIER_SENSE __constant_cpu_to_le16(0x0020) | 444 | #define TYPHOON_MEDIA_STAT_CARRIER_SENSE cpu_to_le16(0x0020) |
445 | #define TYPHOON_MEDIA_STAT_POLARITY_REV __constant_cpu_to_le16(0x0400) | 445 | #define TYPHOON_MEDIA_STAT_POLARITY_REV cpu_to_le16(0x0400) |
446 | #define TYPHOON_MEDIA_STAT_NO_LINK __constant_cpu_to_le16(0x0800) | 446 | #define TYPHOON_MEDIA_STAT_NO_LINK cpu_to_le16(0x0800) |
447 | 447 | ||
448 | /* TYPHOON_CMD_SET_MULTICAST_HASH enable values (cmd.parm1) | 448 | /* TYPHOON_CMD_SET_MULTICAST_HASH enable values (cmd.parm1) |
449 | */ | 449 | */ |
450 | #define TYPHOON_MCAST_HASH_DISABLE __constant_cpu_to_le16(0x0000) | 450 | #define TYPHOON_MCAST_HASH_DISABLE cpu_to_le16(0x0000) |
451 | #define TYPHOON_MCAST_HASH_ENABLE __constant_cpu_to_le16(0x0001) | 451 | #define TYPHOON_MCAST_HASH_ENABLE cpu_to_le16(0x0001) |
452 | #define TYPHOON_MCAST_HASH_SET __constant_cpu_to_le16(0x0002) | 452 | #define TYPHOON_MCAST_HASH_SET cpu_to_le16(0x0002) |
453 | 453 | ||
454 | /* TYPHOON_CMD_CREATE_SA descriptor and settings | 454 | /* TYPHOON_CMD_CREATE_SA descriptor and settings |
455 | */ | 455 | */ |
@@ -459,9 +459,9 @@ struct sa_descriptor { | |||
459 | u16 cmd; | 459 | u16 cmd; |
460 | u16 seqNo; | 460 | u16 seqNo; |
461 | u16 mode; | 461 | u16 mode; |
462 | #define TYPHOON_SA_MODE_NULL __constant_cpu_to_le16(0x0000) | 462 | #define TYPHOON_SA_MODE_NULL cpu_to_le16(0x0000) |
463 | #define TYPHOON_SA_MODE_AH __constant_cpu_to_le16(0x0001) | 463 | #define TYPHOON_SA_MODE_AH cpu_to_le16(0x0001) |
464 | #define TYPHOON_SA_MODE_ESP __constant_cpu_to_le16(0x0002) | 464 | #define TYPHOON_SA_MODE_ESP cpu_to_le16(0x0002) |
465 | u8 hashFlags; | 465 | u8 hashFlags; |
466 | #define TYPHOON_SA_HASH_ENABLE 0x01 | 466 | #define TYPHOON_SA_HASH_ENABLE 0x01 |
467 | #define TYPHOON_SA_HASH_SHA1 0x02 | 467 | #define TYPHOON_SA_HASH_SHA1 0x02 |
@@ -493,22 +493,22 @@ struct sa_descriptor { | |||
493 | /* TYPHOON_CMD_SET_OFFLOAD_TASKS bits (cmd.parm2 (Tx) & cmd.parm3 (Rx)) | 493 | /* TYPHOON_CMD_SET_OFFLOAD_TASKS bits (cmd.parm2 (Tx) & cmd.parm3 (Rx)) |
494 | * This is all for IPv4. | 494 | * This is all for IPv4. |
495 | */ | 495 | */ |
496 | #define TYPHOON_OFFLOAD_TCP_CHKSUM __constant_cpu_to_le32(0x00000002) | 496 | #define TYPHOON_OFFLOAD_TCP_CHKSUM cpu_to_le32(0x00000002) |
497 | #define TYPHOON_OFFLOAD_UDP_CHKSUM __constant_cpu_to_le32(0x00000004) | 497 | #define TYPHOON_OFFLOAD_UDP_CHKSUM cpu_to_le32(0x00000004) |
498 | #define TYPHOON_OFFLOAD_IP_CHKSUM __constant_cpu_to_le32(0x00000008) | 498 | #define TYPHOON_OFFLOAD_IP_CHKSUM cpu_to_le32(0x00000008) |
499 | #define TYPHOON_OFFLOAD_IPSEC __constant_cpu_to_le32(0x00000010) | 499 | #define TYPHOON_OFFLOAD_IPSEC cpu_to_le32(0x00000010) |
500 | #define TYPHOON_OFFLOAD_BCAST_THROTTLE __constant_cpu_to_le32(0x00000020) | 500 | #define TYPHOON_OFFLOAD_BCAST_THROTTLE cpu_to_le32(0x00000020) |
501 | #define TYPHOON_OFFLOAD_DHCP_PREVENT __constant_cpu_to_le32(0x00000040) | 501 | #define TYPHOON_OFFLOAD_DHCP_PREVENT cpu_to_le32(0x00000040) |
502 | #define TYPHOON_OFFLOAD_VLAN __constant_cpu_to_le32(0x00000080) | 502 | #define TYPHOON_OFFLOAD_VLAN cpu_to_le32(0x00000080) |
503 | #define TYPHOON_OFFLOAD_FILTERING __constant_cpu_to_le32(0x00000100) | 503 | #define TYPHOON_OFFLOAD_FILTERING cpu_to_le32(0x00000100) |
504 | #define TYPHOON_OFFLOAD_TCP_SEGMENT __constant_cpu_to_le32(0x00000200) | 504 | #define TYPHOON_OFFLOAD_TCP_SEGMENT cpu_to_le32(0x00000200) |
505 | 505 | ||
506 | /* TYPHOON_CMD_ENABLE_WAKE_EVENTS bits (cmd.parm1) | 506 | /* TYPHOON_CMD_ENABLE_WAKE_EVENTS bits (cmd.parm1) |
507 | */ | 507 | */ |
508 | #define TYPHOON_WAKE_MAGIC_PKT __constant_cpu_to_le16(0x01) | 508 | #define TYPHOON_WAKE_MAGIC_PKT cpu_to_le16(0x01) |
509 | #define TYPHOON_WAKE_LINK_EVENT __constant_cpu_to_le16(0x02) | 509 | #define TYPHOON_WAKE_LINK_EVENT cpu_to_le16(0x02) |
510 | #define TYPHOON_WAKE_ICMP_ECHO __constant_cpu_to_le16(0x04) | 510 | #define TYPHOON_WAKE_ICMP_ECHO cpu_to_le16(0x04) |
511 | #define TYPHOON_WAKE_ARP __constant_cpu_to_le16(0x08) | 511 | #define TYPHOON_WAKE_ARP cpu_to_le16(0x08) |
512 | 512 | ||
513 | /* These are used to load the firmware image on the NIC | 513 | /* These are used to load the firmware image on the NIC |
514 | */ | 514 | */ |
diff --git a/drivers/net/ucc_geth.c b/drivers/net/ucc_geth.c index e87986867ba5..4a8d5747204a 100644 --- a/drivers/net/ucc_geth.c +++ b/drivers/net/ucc_geth.c | |||
@@ -3266,7 +3266,7 @@ static int ucc_geth_poll(struct napi_struct *napi, int budget) | |||
3266 | howmany += ucc_geth_rx(ugeth, i, budget - howmany); | 3266 | howmany += ucc_geth_rx(ugeth, i, budget - howmany); |
3267 | 3267 | ||
3268 | if (howmany < budget) { | 3268 | if (howmany < budget) { |
3269 | netif_rx_complete(napi); | 3269 | napi_complete(napi); |
3270 | setbits32(ugeth->uccf->p_uccm, UCCE_RX_EVENTS); | 3270 | setbits32(ugeth->uccf->p_uccm, UCCE_RX_EVENTS); |
3271 | } | 3271 | } |
3272 | 3272 | ||
@@ -3297,10 +3297,10 @@ static irqreturn_t ucc_geth_irq_handler(int irq, void *info) | |||
3297 | 3297 | ||
3298 | /* check for receive events that require processing */ | 3298 | /* check for receive events that require processing */ |
3299 | if (ucce & UCCE_RX_EVENTS) { | 3299 | if (ucce & UCCE_RX_EVENTS) { |
3300 | if (netif_rx_schedule_prep(&ugeth->napi)) { | 3300 | if (napi_schedule_prep(&ugeth->napi)) { |
3301 | uccm &= ~UCCE_RX_EVENTS; | 3301 | uccm &= ~UCCE_RX_EVENTS; |
3302 | out_be32(uccf->p_uccm, uccm); | 3302 | out_be32(uccf->p_uccm, uccm); |
3303 | __netif_rx_schedule(&ugeth->napi); | 3303 | __napi_schedule(&ugeth->napi); |
3304 | } | 3304 | } |
3305 | } | 3305 | } |
3306 | 3306 | ||
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c index 0d0fa91c0251..806cc5da56ce 100644 --- a/drivers/net/usb/hso.c +++ b/drivers/net/usb/hso.c | |||
@@ -934,8 +934,7 @@ static void packetizeRx(struct hso_net *odev, unsigned char *ip_pkt, | |||
934 | if (!odev->rx_buf_missing) { | 934 | if (!odev->rx_buf_missing) { |
935 | /* Packet is complete. Inject into stack. */ | 935 | /* Packet is complete. Inject into stack. */ |
936 | /* We have IP packet here */ | 936 | /* We have IP packet here */ |
937 | odev->skb_rx_buf->protocol = | 937 | odev->skb_rx_buf->protocol = cpu_to_be16(ETH_P_IP); |
938 | __constant_htons(ETH_P_IP); | ||
939 | /* don't check it */ | 938 | /* don't check it */ |
940 | odev->skb_rx_buf->ip_summed = | 939 | odev->skb_rx_buf->ip_summed = |
941 | CHECKSUM_UNNECESSARY; | 940 | CHECKSUM_UNNECESSARY; |
diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c index 5574abe29c73..5b0b9647382c 100644 --- a/drivers/net/usb/smsc95xx.c +++ b/drivers/net/usb/smsc95xx.c | |||
@@ -55,7 +55,6 @@ struct smsc95xx_priv { | |||
55 | 55 | ||
56 | struct usb_context { | 56 | struct usb_context { |
57 | struct usb_ctrlrequest req; | 57 | struct usb_ctrlrequest req; |
58 | struct completion notify; | ||
59 | struct usbnet *dev; | 58 | struct usbnet *dev; |
60 | }; | 59 | }; |
61 | 60 | ||
@@ -307,7 +306,7 @@ static int smsc95xx_write_eeprom(struct usbnet *dev, u32 offset, u32 length, | |||
307 | return 0; | 306 | return 0; |
308 | } | 307 | } |
309 | 308 | ||
310 | static void smsc95xx_async_cmd_callback(struct urb *urb, struct pt_regs *regs) | 309 | static void smsc95xx_async_cmd_callback(struct urb *urb) |
311 | { | 310 | { |
312 | struct usb_context *usb_context = urb->context; | 311 | struct usb_context *usb_context = urb->context; |
313 | struct usbnet *dev = usb_context->dev; | 312 | struct usbnet *dev = usb_context->dev; |
@@ -316,8 +315,6 @@ static void smsc95xx_async_cmd_callback(struct urb *urb, struct pt_regs *regs) | |||
316 | if (status < 0) | 315 | if (status < 0) |
317 | devwarn(dev, "async callback failed with %d", status); | 316 | devwarn(dev, "async callback failed with %d", status); |
318 | 317 | ||
319 | complete(&usb_context->notify); | ||
320 | |||
321 | kfree(usb_context); | 318 | kfree(usb_context); |
322 | usb_free_urb(urb); | 319 | usb_free_urb(urb); |
323 | } | 320 | } |
@@ -348,11 +345,10 @@ static int smsc95xx_write_reg_async(struct usbnet *dev, u16 index, u32 *data) | |||
348 | usb_context->req.wValue = 00; | 345 | usb_context->req.wValue = 00; |
349 | usb_context->req.wIndex = cpu_to_le16(index); | 346 | usb_context->req.wIndex = cpu_to_le16(index); |
350 | usb_context->req.wLength = cpu_to_le16(size); | 347 | usb_context->req.wLength = cpu_to_le16(size); |
351 | init_completion(&usb_context->notify); | ||
352 | 348 | ||
353 | usb_fill_control_urb(urb, dev->udev, usb_sndctrlpipe(dev->udev, 0), | 349 | usb_fill_control_urb(urb, dev->udev, usb_sndctrlpipe(dev->udev, 0), |
354 | (void *)&usb_context->req, data, size, | 350 | (void *)&usb_context->req, data, size, |
355 | (usb_complete_t)smsc95xx_async_cmd_callback, | 351 | smsc95xx_async_cmd_callback, |
356 | (void *)usb_context); | 352 | (void *)usb_context); |
357 | 353 | ||
358 | status = usb_submit_urb(urb, GFP_ATOMIC); | 354 | status = usb_submit_urb(urb, GFP_ATOMIC); |
diff --git a/drivers/net/via-rhine.c b/drivers/net/via-rhine.c index 3b8e63254277..4671436ecf0e 100644 --- a/drivers/net/via-rhine.c +++ b/drivers/net/via-rhine.c | |||
@@ -589,7 +589,7 @@ static int rhine_napipoll(struct napi_struct *napi, int budget) | |||
589 | work_done = rhine_rx(dev, budget); | 589 | work_done = rhine_rx(dev, budget); |
590 | 590 | ||
591 | if (work_done < budget) { | 591 | if (work_done < budget) { |
592 | netif_rx_complete(napi); | 592 | napi_complete(napi); |
593 | 593 | ||
594 | iowrite16(IntrRxDone | IntrRxErr | IntrRxEmpty| IntrRxOverflow | | 594 | iowrite16(IntrRxDone | IntrRxErr | IntrRxEmpty| IntrRxOverflow | |
595 | IntrRxDropped | IntrRxNoBuf | IntrTxAborted | | 595 | IntrRxDropped | IntrRxNoBuf | IntrTxAborted | |
@@ -1319,7 +1319,7 @@ static irqreturn_t rhine_interrupt(int irq, void *dev_instance) | |||
1319 | IntrPCIErr | IntrStatsMax | IntrLinkChange, | 1319 | IntrPCIErr | IntrStatsMax | IntrLinkChange, |
1320 | ioaddr + IntrEnable); | 1320 | ioaddr + IntrEnable); |
1321 | 1321 | ||
1322 | netif_rx_schedule(&rp->napi); | 1322 | napi_schedule(&rp->napi); |
1323 | } | 1323 | } |
1324 | 1324 | ||
1325 | if (intr_status & (IntrTxErrSummary | IntrTxDone)) { | 1325 | if (intr_status & (IntrTxErrSummary | IntrTxDone)) { |
diff --git a/drivers/net/via-velocity.h b/drivers/net/via-velocity.h index 29a33090d3d4..ea43e1832afb 100644 --- a/drivers/net/via-velocity.h +++ b/drivers/net/via-velocity.h | |||
@@ -183,7 +183,7 @@ struct rdesc1 { | |||
183 | }; | 183 | }; |
184 | 184 | ||
185 | enum { | 185 | enum { |
186 | RX_INTEN = __constant_cpu_to_le16(0x8000) | 186 | RX_INTEN = cpu_to_le16(0x8000) |
187 | }; | 187 | }; |
188 | 188 | ||
189 | struct rx_desc { | 189 | struct rx_desc { |
@@ -210,7 +210,7 @@ struct tdesc1 { | |||
210 | } __attribute__ ((__packed__)); | 210 | } __attribute__ ((__packed__)); |
211 | 211 | ||
212 | enum { | 212 | enum { |
213 | TD_QUEUE = __constant_cpu_to_le16(0x8000) | 213 | TD_QUEUE = cpu_to_le16(0x8000) |
214 | }; | 214 | }; |
215 | 215 | ||
216 | struct td_buf { | 216 | struct td_buf { |
@@ -242,7 +242,7 @@ struct velocity_td_info { | |||
242 | 242 | ||
243 | enum velocity_owner { | 243 | enum velocity_owner { |
244 | OWNED_BY_HOST = 0, | 244 | OWNED_BY_HOST = 0, |
245 | OWNED_BY_NIC = __constant_cpu_to_le16(0x8000) | 245 | OWNED_BY_NIC = cpu_to_le16(0x8000) |
246 | }; | 246 | }; |
247 | 247 | ||
248 | 248 | ||
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index c68808336c8c..fe576e75a538 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c | |||
@@ -43,6 +43,7 @@ struct virtnet_info | |||
43 | struct virtqueue *rvq, *svq; | 43 | struct virtqueue *rvq, *svq; |
44 | struct net_device *dev; | 44 | struct net_device *dev; |
45 | struct napi_struct napi; | 45 | struct napi_struct napi; |
46 | unsigned int status; | ||
46 | 47 | ||
47 | /* The skb we couldn't send because buffers were full. */ | 48 | /* The skb we couldn't send because buffers were full. */ |
48 | struct sk_buff *last_xmit_skb; | 49 | struct sk_buff *last_xmit_skb; |
@@ -375,9 +376,9 @@ static void skb_recv_done(struct virtqueue *rvq) | |||
375 | { | 376 | { |
376 | struct virtnet_info *vi = rvq->vdev->priv; | 377 | struct virtnet_info *vi = rvq->vdev->priv; |
377 | /* Schedule NAPI, Suppress further interrupts if successful. */ | 378 | /* Schedule NAPI, Suppress further interrupts if successful. */ |
378 | if (netif_rx_schedule_prep(&vi->napi)) { | 379 | if (napi_schedule_prep(&vi->napi)) { |
379 | rvq->vq_ops->disable_cb(rvq); | 380 | rvq->vq_ops->disable_cb(rvq); |
380 | __netif_rx_schedule(&vi->napi); | 381 | __napi_schedule(&vi->napi); |
381 | } | 382 | } |
382 | } | 383 | } |
383 | 384 | ||
@@ -403,11 +404,11 @@ again: | |||
403 | 404 | ||
404 | /* Out of packets? */ | 405 | /* Out of packets? */ |
405 | if (received < budget) { | 406 | if (received < budget) { |
406 | netif_rx_complete(napi); | 407 | napi_complete(napi); |
407 | if (unlikely(!vi->rvq->vq_ops->enable_cb(vi->rvq)) | 408 | if (unlikely(!vi->rvq->vq_ops->enable_cb(vi->rvq)) |
408 | && napi_schedule_prep(napi)) { | 409 | && napi_schedule_prep(napi)) { |
409 | vi->rvq->vq_ops->disable_cb(vi->rvq); | 410 | vi->rvq->vq_ops->disable_cb(vi->rvq); |
410 | __netif_rx_schedule(napi); | 411 | __napi_schedule(napi); |
411 | goto again; | 412 | goto again; |
412 | } | 413 | } |
413 | } | 414 | } |
@@ -581,9 +582,9 @@ static int virtnet_open(struct net_device *dev) | |||
581 | * won't get another interrupt, so process any outstanding packets | 582 | * won't get another interrupt, so process any outstanding packets |
582 | * now. virtnet_poll wants re-enable the queue, so we disable here. | 583 | * now. virtnet_poll wants re-enable the queue, so we disable here. |
583 | * We synchronize against interrupts via NAPI_STATE_SCHED */ | 584 | * We synchronize against interrupts via NAPI_STATE_SCHED */ |
584 | if (netif_rx_schedule_prep(&vi->napi)) { | 585 | if (napi_schedule_prep(&vi->napi)) { |
585 | vi->rvq->vq_ops->disable_cb(vi->rvq); | 586 | vi->rvq->vq_ops->disable_cb(vi->rvq); |
586 | __netif_rx_schedule(&vi->napi); | 587 | __napi_schedule(&vi->napi); |
587 | } | 588 | } |
588 | return 0; | 589 | return 0; |
589 | } | 590 | } |
@@ -612,6 +613,7 @@ static struct ethtool_ops virtnet_ethtool_ops = { | |||
612 | .set_tx_csum = virtnet_set_tx_csum, | 613 | .set_tx_csum = virtnet_set_tx_csum, |
613 | .set_sg = ethtool_op_set_sg, | 614 | .set_sg = ethtool_op_set_sg, |
614 | .set_tso = ethtool_op_set_tso, | 615 | .set_tso = ethtool_op_set_tso, |
616 | .get_link = ethtool_op_get_link, | ||
615 | }; | 617 | }; |
616 | 618 | ||
617 | #define MIN_MTU 68 | 619 | #define MIN_MTU 68 |
@@ -637,6 +639,41 @@ static const struct net_device_ops virtnet_netdev = { | |||
637 | #endif | 639 | #endif |
638 | }; | 640 | }; |
639 | 641 | ||
642 | static void virtnet_update_status(struct virtnet_info *vi) | ||
643 | { | ||
644 | u16 v; | ||
645 | |||
646 | if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS)) | ||
647 | return; | ||
648 | |||
649 | vi->vdev->config->get(vi->vdev, | ||
650 | offsetof(struct virtio_net_config, status), | ||
651 | &v, sizeof(v)); | ||
652 | |||
653 | /* Ignore unknown (future) status bits */ | ||
654 | v &= VIRTIO_NET_S_LINK_UP; | ||
655 | |||
656 | if (vi->status == v) | ||
657 | return; | ||
658 | |||
659 | vi->status = v; | ||
660 | |||
661 | if (vi->status & VIRTIO_NET_S_LINK_UP) { | ||
662 | netif_carrier_on(vi->dev); | ||
663 | netif_wake_queue(vi->dev); | ||
664 | } else { | ||
665 | netif_carrier_off(vi->dev); | ||
666 | netif_stop_queue(vi->dev); | ||
667 | } | ||
668 | } | ||
669 | |||
670 | static void virtnet_config_changed(struct virtio_device *vdev) | ||
671 | { | ||
672 | struct virtnet_info *vi = vdev->priv; | ||
673 | |||
674 | virtnet_update_status(vi); | ||
675 | } | ||
676 | |||
640 | static int virtnet_probe(struct virtio_device *vdev) | 677 | static int virtnet_probe(struct virtio_device *vdev) |
641 | { | 678 | { |
642 | int err; | 679 | int err; |
@@ -739,6 +776,9 @@ static int virtnet_probe(struct virtio_device *vdev) | |||
739 | goto unregister; | 776 | goto unregister; |
740 | } | 777 | } |
741 | 778 | ||
779 | vi->status = VIRTIO_NET_S_LINK_UP; | ||
780 | virtnet_update_status(vi); | ||
781 | |||
742 | pr_debug("virtnet: registered device %s\n", dev->name); | 782 | pr_debug("virtnet: registered device %s\n", dev->name); |
743 | return 0; | 783 | return 0; |
744 | 784 | ||
@@ -794,7 +834,7 @@ static unsigned int features[] = { | |||
794 | VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_UFO, VIRTIO_NET_F_HOST_TSO6, | 834 | VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_UFO, VIRTIO_NET_F_HOST_TSO6, |
795 | VIRTIO_NET_F_HOST_ECN, VIRTIO_NET_F_GUEST_TSO4, VIRTIO_NET_F_GUEST_TSO6, | 835 | VIRTIO_NET_F_HOST_ECN, VIRTIO_NET_F_GUEST_TSO4, VIRTIO_NET_F_GUEST_TSO6, |
796 | VIRTIO_NET_F_GUEST_ECN, /* We don't yet handle UFO input. */ | 836 | VIRTIO_NET_F_GUEST_ECN, /* We don't yet handle UFO input. */ |
797 | VIRTIO_NET_F_MRG_RXBUF, | 837 | VIRTIO_NET_F_MRG_RXBUF, VIRTIO_NET_F_STATUS, |
798 | VIRTIO_F_NOTIFY_ON_EMPTY, | 838 | VIRTIO_F_NOTIFY_ON_EMPTY, |
799 | }; | 839 | }; |
800 | 840 | ||
@@ -806,6 +846,7 @@ static struct virtio_driver virtio_net = { | |||
806 | .id_table = id_table, | 846 | .id_table = id_table, |
807 | .probe = virtnet_probe, | 847 | .probe = virtnet_probe, |
808 | .remove = __devexit_p(virtnet_remove), | 848 | .remove = __devexit_p(virtnet_remove), |
849 | .config_changed = virtnet_config_changed, | ||
809 | }; | 850 | }; |
810 | 851 | ||
811 | static int __init init(void) | 852 | static int __init init(void) |
diff --git a/drivers/net/wan/c101.c b/drivers/net/wan/c101.c index b46897996f7e..9693b0fd323d 100644 --- a/drivers/net/wan/c101.c +++ b/drivers/net/wan/c101.c | |||
@@ -296,7 +296,13 @@ static void c101_destroy_card(card_t *card) | |||
296 | kfree(card); | 296 | kfree(card); |
297 | } | 297 | } |
298 | 298 | ||
299 | 299 | static const struct net_device_ops c101_ops = { | |
300 | .ndo_open = c101_open, | ||
301 | .ndo_stop = c101_close, | ||
302 | .ndo_change_mtu = hdlc_change_mtu, | ||
303 | .ndo_start_xmit = hdlc_start_xmit, | ||
304 | .ndo_do_ioctl = c101_ioctl, | ||
305 | }; | ||
300 | 306 | ||
301 | static int __init c101_run(unsigned long irq, unsigned long winbase) | 307 | static int __init c101_run(unsigned long irq, unsigned long winbase) |
302 | { | 308 | { |
@@ -367,9 +373,7 @@ static int __init c101_run(unsigned long irq, unsigned long winbase) | |||
367 | dev->mem_start = winbase; | 373 | dev->mem_start = winbase; |
368 | dev->mem_end = winbase + C101_MAPPED_RAM_SIZE - 1; | 374 | dev->mem_end = winbase + C101_MAPPED_RAM_SIZE - 1; |
369 | dev->tx_queue_len = 50; | 375 | dev->tx_queue_len = 50; |
370 | dev->do_ioctl = c101_ioctl; | 376 | dev->netdev_ops = &c101_ops; |
371 | dev->open = c101_open; | ||
372 | dev->stop = c101_close; | ||
373 | hdlc->attach = sca_attach; | 377 | hdlc->attach = sca_attach; |
374 | hdlc->xmit = sca_xmit; | 378 | hdlc->xmit = sca_xmit; |
375 | card->settings.clock_type = CLOCK_EXT; | 379 | card->settings.clock_type = CLOCK_EXT; |
diff --git a/drivers/net/wan/cosa.c b/drivers/net/wan/cosa.c index d80b72e22dea..0d7ba117ef60 100644 --- a/drivers/net/wan/cosa.c +++ b/drivers/net/wan/cosa.c | |||
@@ -427,6 +427,15 @@ static void __exit cosa_exit(void) | |||
427 | } | 427 | } |
428 | module_exit(cosa_exit); | 428 | module_exit(cosa_exit); |
429 | 429 | ||
430 | static const struct net_device_ops cosa_ops = { | ||
431 | .ndo_open = cosa_net_open, | ||
432 | .ndo_stop = cosa_net_close, | ||
433 | .ndo_change_mtu = hdlc_change_mtu, | ||
434 | .ndo_start_xmit = hdlc_start_xmit, | ||
435 | .ndo_do_ioctl = cosa_net_ioctl, | ||
436 | .ndo_tx_timeout = cosa_net_timeout, | ||
437 | }; | ||
438 | |||
430 | static int cosa_probe(int base, int irq, int dma) | 439 | static int cosa_probe(int base, int irq, int dma) |
431 | { | 440 | { |
432 | struct cosa_data *cosa = cosa_cards+nr_cards; | 441 | struct cosa_data *cosa = cosa_cards+nr_cards; |
@@ -575,10 +584,7 @@ static int cosa_probe(int base, int irq, int dma) | |||
575 | } | 584 | } |
576 | dev_to_hdlc(chan->netdev)->attach = cosa_net_attach; | 585 | dev_to_hdlc(chan->netdev)->attach = cosa_net_attach; |
577 | dev_to_hdlc(chan->netdev)->xmit = cosa_net_tx; | 586 | dev_to_hdlc(chan->netdev)->xmit = cosa_net_tx; |
578 | chan->netdev->open = cosa_net_open; | 587 | chan->netdev->netdev_ops = &cosa_ops; |
579 | chan->netdev->stop = cosa_net_close; | ||
580 | chan->netdev->do_ioctl = cosa_net_ioctl; | ||
581 | chan->netdev->tx_timeout = cosa_net_timeout; | ||
582 | chan->netdev->watchdog_timeo = TX_TIMEOUT; | 588 | chan->netdev->watchdog_timeo = TX_TIMEOUT; |
583 | chan->netdev->base_addr = chan->cosa->datareg; | 589 | chan->netdev->base_addr = chan->cosa->datareg; |
584 | chan->netdev->irq = chan->cosa->irq; | 590 | chan->netdev->irq = chan->cosa->irq; |
diff --git a/drivers/net/wan/dscc4.c b/drivers/net/wan/dscc4.c index 888025db2f02..8face5db8f32 100644 --- a/drivers/net/wan/dscc4.c +++ b/drivers/net/wan/dscc4.c | |||
@@ -883,6 +883,15 @@ static inline int dscc4_set_quartz(struct dscc4_dev_priv *dpriv, int hz) | |||
883 | return ret; | 883 | return ret; |
884 | } | 884 | } |
885 | 885 | ||
886 | static const struct net_device_ops dscc4_ops = { | ||
887 | .ndo_open = dscc4_open, | ||
888 | .ndo_stop = dscc4_close, | ||
889 | .ndo_change_mtu = hdlc_change_mtu, | ||
890 | .ndo_start_xmit = hdlc_start_xmit, | ||
891 | .ndo_do_ioctl = dscc4_ioctl, | ||
892 | .ndo_tx_timeout = dscc4_tx_timeout, | ||
893 | }; | ||
894 | |||
886 | static int dscc4_found1(struct pci_dev *pdev, void __iomem *ioaddr) | 895 | static int dscc4_found1(struct pci_dev *pdev, void __iomem *ioaddr) |
887 | { | 896 | { |
888 | struct dscc4_pci_priv *ppriv; | 897 | struct dscc4_pci_priv *ppriv; |
@@ -916,13 +925,8 @@ static int dscc4_found1(struct pci_dev *pdev, void __iomem *ioaddr) | |||
916 | hdlc_device *hdlc = dev_to_hdlc(d); | 925 | hdlc_device *hdlc = dev_to_hdlc(d); |
917 | 926 | ||
918 | d->base_addr = (unsigned long)ioaddr; | 927 | d->base_addr = (unsigned long)ioaddr; |
919 | d->init = NULL; | ||
920 | d->irq = pdev->irq; | 928 | d->irq = pdev->irq; |
921 | d->open = dscc4_open; | 929 | d->netdev_ops = &dscc4_ops; |
922 | d->stop = dscc4_close; | ||
923 | d->set_multicast_list = NULL; | ||
924 | d->do_ioctl = dscc4_ioctl; | ||
925 | d->tx_timeout = dscc4_tx_timeout; | ||
926 | d->watchdog_timeo = TX_TIMEOUT; | 930 | d->watchdog_timeo = TX_TIMEOUT; |
927 | SET_NETDEV_DEV(d, &pdev->dev); | 931 | SET_NETDEV_DEV(d, &pdev->dev); |
928 | 932 | ||
@@ -1048,7 +1052,7 @@ static int dscc4_open(struct net_device *dev) | |||
1048 | struct dscc4_pci_priv *ppriv; | 1052 | struct dscc4_pci_priv *ppriv; |
1049 | int ret = -EAGAIN; | 1053 | int ret = -EAGAIN; |
1050 | 1054 | ||
1051 | if ((dscc4_loopback_check(dpriv) < 0) || !dev->hard_start_xmit) | 1055 | if ((dscc4_loopback_check(dpriv) < 0)) |
1052 | goto err; | 1056 | goto err; |
1053 | 1057 | ||
1054 | if ((ret = hdlc_open(dev))) | 1058 | if ((ret = hdlc_open(dev))) |
diff --git a/drivers/net/wan/farsync.c b/drivers/net/wan/farsync.c index 48a2c9d28950..00945f7c1e9b 100644 --- a/drivers/net/wan/farsync.c +++ b/drivers/net/wan/farsync.c | |||
@@ -2424,6 +2424,15 @@ fst_init_card(struct fst_card_info *card) | |||
2424 | type_strings[card->type], card->irq, card->nports); | 2424 | type_strings[card->type], card->irq, card->nports); |
2425 | } | 2425 | } |
2426 | 2426 | ||
2427 | static const struct net_device_ops fst_ops = { | ||
2428 | .ndo_open = fst_open, | ||
2429 | .ndo_stop = fst_close, | ||
2430 | .ndo_change_mtu = hdlc_change_mtu, | ||
2431 | .ndo_start_xmit = hdlc_start_xmit, | ||
2432 | .ndo_do_ioctl = fst_ioctl, | ||
2433 | .ndo_tx_timeout = fst_tx_timeout, | ||
2434 | }; | ||
2435 | |||
2427 | /* | 2436 | /* |
2428 | * Initialise card when detected. | 2437 | * Initialise card when detected. |
2429 | * Returns 0 to indicate success, or errno otherwise. | 2438 | * Returns 0 to indicate success, or errno otherwise. |
@@ -2565,12 +2574,9 @@ fst_add_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
2565 | dev->base_addr = card->pci_conf; | 2574 | dev->base_addr = card->pci_conf; |
2566 | dev->irq = card->irq; | 2575 | dev->irq = card->irq; |
2567 | 2576 | ||
2568 | dev->tx_queue_len = FST_TX_QUEUE_LEN; | 2577 | dev->netdev_ops = &fst_ops; |
2569 | dev->open = fst_open; | 2578 | dev->tx_queue_len = FST_TX_QUEUE_LEN; |
2570 | dev->stop = fst_close; | 2579 | dev->watchdog_timeo = FST_TX_TIMEOUT; |
2571 | dev->do_ioctl = fst_ioctl; | ||
2572 | dev->watchdog_timeo = FST_TX_TIMEOUT; | ||
2573 | dev->tx_timeout = fst_tx_timeout; | ||
2574 | hdlc->attach = fst_attach; | 2580 | hdlc->attach = fst_attach; |
2575 | hdlc->xmit = fst_start_xmit; | 2581 | hdlc->xmit = fst_start_xmit; |
2576 | } | 2582 | } |
diff --git a/drivers/net/wan/hd64572.c b/drivers/net/wan/hd64572.c index 08b3536944fe..497b003d7239 100644 --- a/drivers/net/wan/hd64572.c +++ b/drivers/net/wan/hd64572.c | |||
@@ -341,7 +341,7 @@ static int sca_poll(struct napi_struct *napi, int budget) | |||
341 | received = sca_rx_done(port, budget); | 341 | received = sca_rx_done(port, budget); |
342 | 342 | ||
343 | if (received < budget) { | 343 | if (received < budget) { |
344 | netif_rx_complete(napi); | 344 | napi_complete(napi); |
345 | enable_intr(port); | 345 | enable_intr(port); |
346 | } | 346 | } |
347 | 347 | ||
@@ -359,7 +359,7 @@ static irqreturn_t sca_intr(int irq, void *dev_id) | |||
359 | if (port && (isr0 & (i ? 0x08002200 : 0x00080022))) { | 359 | if (port && (isr0 & (i ? 0x08002200 : 0x00080022))) { |
360 | handled = 1; | 360 | handled = 1; |
361 | disable_intr(port); | 361 | disable_intr(port); |
362 | netif_rx_schedule(&port->napi); | 362 | napi_schedule(&port->napi); |
363 | } | 363 | } |
364 | } | 364 | } |
365 | 365 | ||
diff --git a/drivers/net/wan/hdlc.c b/drivers/net/wan/hdlc.c index 1f2a140c9f7c..5ce437205558 100644 --- a/drivers/net/wan/hdlc.c +++ b/drivers/net/wan/hdlc.c | |||
@@ -44,7 +44,7 @@ static const char* version = "HDLC support module revision 1.22"; | |||
44 | 44 | ||
45 | static struct hdlc_proto *first_proto; | 45 | static struct hdlc_proto *first_proto; |
46 | 46 | ||
47 | static int hdlc_change_mtu(struct net_device *dev, int new_mtu) | 47 | int hdlc_change_mtu(struct net_device *dev, int new_mtu) |
48 | { | 48 | { |
49 | if ((new_mtu < 68) || (new_mtu > HDLC_MAX_MTU)) | 49 | if ((new_mtu < 68) || (new_mtu > HDLC_MAX_MTU)) |
50 | return -EINVAL; | 50 | return -EINVAL; |
@@ -52,15 +52,6 @@ static int hdlc_change_mtu(struct net_device *dev, int new_mtu) | |||
52 | return 0; | 52 | return 0; |
53 | } | 53 | } |
54 | 54 | ||
55 | |||
56 | |||
57 | static struct net_device_stats *hdlc_get_stats(struct net_device *dev) | ||
58 | { | ||
59 | return &dev->stats; | ||
60 | } | ||
61 | |||
62 | |||
63 | |||
64 | static int hdlc_rcv(struct sk_buff *skb, struct net_device *dev, | 55 | static int hdlc_rcv(struct sk_buff *skb, struct net_device *dev, |
65 | struct packet_type *p, struct net_device *orig_dev) | 56 | struct packet_type *p, struct net_device *orig_dev) |
66 | { | 57 | { |
@@ -75,7 +66,15 @@ static int hdlc_rcv(struct sk_buff *skb, struct net_device *dev, | |||
75 | return hdlc->proto->netif_rx(skb); | 66 | return hdlc->proto->netif_rx(skb); |
76 | } | 67 | } |
77 | 68 | ||
69 | int hdlc_start_xmit(struct sk_buff *skb, struct net_device *dev) | ||
70 | { | ||
71 | hdlc_device *hdlc = dev_to_hdlc(dev); | ||
72 | |||
73 | if (hdlc->proto->xmit) | ||
74 | return hdlc->proto->xmit(skb, dev); | ||
78 | 75 | ||
76 | return hdlc->xmit(skb, dev); /* call hardware driver directly */ | ||
77 | } | ||
79 | 78 | ||
80 | static inline void hdlc_proto_start(struct net_device *dev) | 79 | static inline void hdlc_proto_start(struct net_device *dev) |
81 | { | 80 | { |
@@ -102,11 +101,11 @@ static int hdlc_device_event(struct notifier_block *this, unsigned long event, | |||
102 | hdlc_device *hdlc; | 101 | hdlc_device *hdlc; |
103 | unsigned long flags; | 102 | unsigned long flags; |
104 | int on; | 103 | int on; |
105 | 104 | ||
106 | if (dev_net(dev) != &init_net) | 105 | if (dev_net(dev) != &init_net) |
107 | return NOTIFY_DONE; | 106 | return NOTIFY_DONE; |
108 | 107 | ||
109 | if (dev->get_stats != hdlc_get_stats) | 108 | if (!(dev->priv_flags & IFF_WAN_HDLC)) |
110 | return NOTIFY_DONE; /* not an HDLC device */ | 109 | return NOTIFY_DONE; /* not an HDLC device */ |
111 | 110 | ||
112 | if (event != NETDEV_CHANGE) | 111 | if (event != NETDEV_CHANGE) |
@@ -233,15 +232,13 @@ static void hdlc_setup_dev(struct net_device *dev) | |||
233 | /* Re-init all variables changed by HDLC protocol drivers, | 232 | /* Re-init all variables changed by HDLC protocol drivers, |
234 | * including ether_setup() called from hdlc_raw_eth.c. | 233 | * including ether_setup() called from hdlc_raw_eth.c. |
235 | */ | 234 | */ |
236 | dev->get_stats = hdlc_get_stats; | ||
237 | dev->flags = IFF_POINTOPOINT | IFF_NOARP; | 235 | dev->flags = IFF_POINTOPOINT | IFF_NOARP; |
236 | dev->priv_flags = IFF_WAN_HDLC; | ||
238 | dev->mtu = HDLC_MAX_MTU; | 237 | dev->mtu = HDLC_MAX_MTU; |
239 | dev->type = ARPHRD_RAWHDLC; | 238 | dev->type = ARPHRD_RAWHDLC; |
240 | dev->hard_header_len = 16; | 239 | dev->hard_header_len = 16; |
241 | dev->addr_len = 0; | 240 | dev->addr_len = 0; |
242 | dev->header_ops = &hdlc_null_ops; | 241 | dev->header_ops = &hdlc_null_ops; |
243 | |||
244 | dev->change_mtu = hdlc_change_mtu; | ||
245 | } | 242 | } |
246 | 243 | ||
247 | static void hdlc_setup(struct net_device *dev) | 244 | static void hdlc_setup(struct net_device *dev) |
@@ -339,6 +336,8 @@ MODULE_AUTHOR("Krzysztof Halasa <khc@pm.waw.pl>"); | |||
339 | MODULE_DESCRIPTION("HDLC support module"); | 336 | MODULE_DESCRIPTION("HDLC support module"); |
340 | MODULE_LICENSE("GPL v2"); | 337 | MODULE_LICENSE("GPL v2"); |
341 | 338 | ||
339 | EXPORT_SYMBOL(hdlc_change_mtu); | ||
340 | EXPORT_SYMBOL(hdlc_start_xmit); | ||
342 | EXPORT_SYMBOL(hdlc_open); | 341 | EXPORT_SYMBOL(hdlc_open); |
343 | EXPORT_SYMBOL(hdlc_close); | 342 | EXPORT_SYMBOL(hdlc_close); |
344 | EXPORT_SYMBOL(hdlc_ioctl); | 343 | EXPORT_SYMBOL(hdlc_ioctl); |
@@ -350,7 +349,7 @@ EXPORT_SYMBOL(attach_hdlc_protocol); | |||
350 | EXPORT_SYMBOL(detach_hdlc_protocol); | 349 | EXPORT_SYMBOL(detach_hdlc_protocol); |
351 | 350 | ||
352 | static struct packet_type hdlc_packet_type = { | 351 | static struct packet_type hdlc_packet_type = { |
353 | .type = __constant_htons(ETH_P_HDLC), | 352 | .type = cpu_to_be16(ETH_P_HDLC), |
354 | .func = hdlc_rcv, | 353 | .func = hdlc_rcv, |
355 | }; | 354 | }; |
356 | 355 | ||
diff --git a/drivers/net/wan/hdlc_cisco.c b/drivers/net/wan/hdlc_cisco.c index 44e64b15dbd1..cf5fd17ad707 100644 --- a/drivers/net/wan/hdlc_cisco.c +++ b/drivers/net/wan/hdlc_cisco.c | |||
@@ -117,7 +117,7 @@ static void cisco_keepalive_send(struct net_device *dev, u32 type, | |||
117 | data->type = htonl(type); | 117 | data->type = htonl(type); |
118 | data->par1 = par1; | 118 | data->par1 = par1; |
119 | data->par2 = par2; | 119 | data->par2 = par2; |
120 | data->rel = __constant_htons(0xFFFF); | 120 | data->rel = cpu_to_be16(0xFFFF); |
121 | /* we will need do_div here if 1000 % HZ != 0 */ | 121 | /* we will need do_div here if 1000 % HZ != 0 */ |
122 | data->time = htonl((jiffies - INITIAL_JIFFIES) * (1000 / HZ)); | 122 | data->time = htonl((jiffies - INITIAL_JIFFIES) * (1000 / HZ)); |
123 | 123 | ||
@@ -136,20 +136,20 @@ static __be16 cisco_type_trans(struct sk_buff *skb, struct net_device *dev) | |||
136 | struct hdlc_header *data = (struct hdlc_header*)skb->data; | 136 | struct hdlc_header *data = (struct hdlc_header*)skb->data; |
137 | 137 | ||
138 | if (skb->len < sizeof(struct hdlc_header)) | 138 | if (skb->len < sizeof(struct hdlc_header)) |
139 | return __constant_htons(ETH_P_HDLC); | 139 | return cpu_to_be16(ETH_P_HDLC); |
140 | 140 | ||
141 | if (data->address != CISCO_MULTICAST && | 141 | if (data->address != CISCO_MULTICAST && |
142 | data->address != CISCO_UNICAST) | 142 | data->address != CISCO_UNICAST) |
143 | return __constant_htons(ETH_P_HDLC); | 143 | return cpu_to_be16(ETH_P_HDLC); |
144 | 144 | ||
145 | switch(data->protocol) { | 145 | switch(data->protocol) { |
146 | case __constant_htons(ETH_P_IP): | 146 | case cpu_to_be16(ETH_P_IP): |
147 | case __constant_htons(ETH_P_IPX): | 147 | case cpu_to_be16(ETH_P_IPX): |
148 | case __constant_htons(ETH_P_IPV6): | 148 | case cpu_to_be16(ETH_P_IPV6): |
149 | skb_pull(skb, sizeof(struct hdlc_header)); | 149 | skb_pull(skb, sizeof(struct hdlc_header)); |
150 | return data->protocol; | 150 | return data->protocol; |
151 | default: | 151 | default: |
152 | return __constant_htons(ETH_P_HDLC); | 152 | return cpu_to_be16(ETH_P_HDLC); |
153 | } | 153 | } |
154 | } | 154 | } |
155 | 155 | ||
@@ -194,7 +194,7 @@ static int cisco_rx(struct sk_buff *skb) | |||
194 | case CISCO_ADDR_REQ: /* Stolen from syncppp.c :-) */ | 194 | case CISCO_ADDR_REQ: /* Stolen from syncppp.c :-) */ |
195 | in_dev = dev->ip_ptr; | 195 | in_dev = dev->ip_ptr; |
196 | addr = 0; | 196 | addr = 0; |
197 | mask = __constant_htonl(~0); /* is the mask correct? */ | 197 | mask = ~cpu_to_be32(0); /* is the mask correct? */ |
198 | 198 | ||
199 | if (in_dev != NULL) { | 199 | if (in_dev != NULL) { |
200 | struct in_ifaddr **ifap = &in_dev->ifa_list; | 200 | struct in_ifaddr **ifap = &in_dev->ifa_list; |
@@ -382,7 +382,6 @@ static int cisco_ioctl(struct net_device *dev, struct ifreq *ifr) | |||
382 | 382 | ||
383 | memcpy(&state(hdlc)->settings, &new_settings, size); | 383 | memcpy(&state(hdlc)->settings, &new_settings, size); |
384 | spin_lock_init(&state(hdlc)->lock); | 384 | spin_lock_init(&state(hdlc)->lock); |
385 | dev->hard_start_xmit = hdlc->xmit; | ||
386 | dev->header_ops = &cisco_header_ops; | 385 | dev->header_ops = &cisco_header_ops; |
387 | dev->type = ARPHRD_CISCO; | 386 | dev->type = ARPHRD_CISCO; |
388 | netif_dormant_on(dev); | 387 | netif_dormant_on(dev); |
diff --git a/drivers/net/wan/hdlc_fr.c b/drivers/net/wan/hdlc_fr.c index f1ddd7c3459c..800530101093 100644 --- a/drivers/net/wan/hdlc_fr.c +++ b/drivers/net/wan/hdlc_fr.c | |||
@@ -278,31 +278,31 @@ static int fr_hard_header(struct sk_buff **skb_p, u16 dlci) | |||
278 | struct sk_buff *skb = *skb_p; | 278 | struct sk_buff *skb = *skb_p; |
279 | 279 | ||
280 | switch (skb->protocol) { | 280 | switch (skb->protocol) { |
281 | case __constant_htons(NLPID_CCITT_ANSI_LMI): | 281 | case cpu_to_be16(NLPID_CCITT_ANSI_LMI): |
282 | head_len = 4; | 282 | head_len = 4; |
283 | skb_push(skb, head_len); | 283 | skb_push(skb, head_len); |
284 | skb->data[3] = NLPID_CCITT_ANSI_LMI; | 284 | skb->data[3] = NLPID_CCITT_ANSI_LMI; |
285 | break; | 285 | break; |
286 | 286 | ||
287 | case __constant_htons(NLPID_CISCO_LMI): | 287 | case cpu_to_be16(NLPID_CISCO_LMI): |
288 | head_len = 4; | 288 | head_len = 4; |
289 | skb_push(skb, head_len); | 289 | skb_push(skb, head_len); |
290 | skb->data[3] = NLPID_CISCO_LMI; | 290 | skb->data[3] = NLPID_CISCO_LMI; |
291 | break; | 291 | break; |
292 | 292 | ||
293 | case __constant_htons(ETH_P_IP): | 293 | case cpu_to_be16(ETH_P_IP): |
294 | head_len = 4; | 294 | head_len = 4; |
295 | skb_push(skb, head_len); | 295 | skb_push(skb, head_len); |
296 | skb->data[3] = NLPID_IP; | 296 | skb->data[3] = NLPID_IP; |
297 | break; | 297 | break; |
298 | 298 | ||
299 | case __constant_htons(ETH_P_IPV6): | 299 | case cpu_to_be16(ETH_P_IPV6): |
300 | head_len = 4; | 300 | head_len = 4; |
301 | skb_push(skb, head_len); | 301 | skb_push(skb, head_len); |
302 | skb->data[3] = NLPID_IPV6; | 302 | skb->data[3] = NLPID_IPV6; |
303 | break; | 303 | break; |
304 | 304 | ||
305 | case __constant_htons(ETH_P_802_3): | 305 | case cpu_to_be16(ETH_P_802_3): |
306 | head_len = 10; | 306 | head_len = 10; |
307 | if (skb_headroom(skb) < head_len) { | 307 | if (skb_headroom(skb) < head_len) { |
308 | struct sk_buff *skb2 = skb_realloc_headroom(skb, | 308 | struct sk_buff *skb2 = skb_realloc_headroom(skb, |
@@ -426,7 +426,7 @@ static int pvc_xmit(struct sk_buff *skb, struct net_device *dev) | |||
426 | skb_put(skb, pad); | 426 | skb_put(skb, pad); |
427 | memset(skb->data + len, 0, pad); | 427 | memset(skb->data + len, 0, pad); |
428 | } | 428 | } |
429 | skb->protocol = __constant_htons(ETH_P_802_3); | 429 | skb->protocol = cpu_to_be16(ETH_P_802_3); |
430 | } | 430 | } |
431 | if (!fr_hard_header(&skb, pvc->dlci)) { | 431 | if (!fr_hard_header(&skb, pvc->dlci)) { |
432 | dev->stats.tx_bytes += skb->len; | 432 | dev->stats.tx_bytes += skb->len; |
@@ -444,18 +444,6 @@ static int pvc_xmit(struct sk_buff *skb, struct net_device *dev) | |||
444 | return 0; | 444 | return 0; |
445 | } | 445 | } |
446 | 446 | ||
447 | |||
448 | |||
449 | static int pvc_change_mtu(struct net_device *dev, int new_mtu) | ||
450 | { | ||
451 | if ((new_mtu < 68) || (new_mtu > HDLC_MAX_MTU)) | ||
452 | return -EINVAL; | ||
453 | dev->mtu = new_mtu; | ||
454 | return 0; | ||
455 | } | ||
456 | |||
457 | |||
458 | |||
459 | static inline void fr_log_dlci_active(pvc_device *pvc) | 447 | static inline void fr_log_dlci_active(pvc_device *pvc) |
460 | { | 448 | { |
461 | printk(KERN_INFO "%s: DLCI %d [%s%s%s]%s %s\n", | 449 | printk(KERN_INFO "%s: DLCI %d [%s%s%s]%s %s\n", |
@@ -508,10 +496,10 @@ static void fr_lmi_send(struct net_device *dev, int fullrep) | |||
508 | memset(skb->data, 0, len); | 496 | memset(skb->data, 0, len); |
509 | skb_reserve(skb, 4); | 497 | skb_reserve(skb, 4); |
510 | if (lmi == LMI_CISCO) { | 498 | if (lmi == LMI_CISCO) { |
511 | skb->protocol = __constant_htons(NLPID_CISCO_LMI); | 499 | skb->protocol = cpu_to_be16(NLPID_CISCO_LMI); |
512 | fr_hard_header(&skb, LMI_CISCO_DLCI); | 500 | fr_hard_header(&skb, LMI_CISCO_DLCI); |
513 | } else { | 501 | } else { |
514 | skb->protocol = __constant_htons(NLPID_CCITT_ANSI_LMI); | 502 | skb->protocol = cpu_to_be16(NLPID_CCITT_ANSI_LMI); |
515 | fr_hard_header(&skb, LMI_CCITT_ANSI_DLCI); | 503 | fr_hard_header(&skb, LMI_CCITT_ANSI_DLCI); |
516 | } | 504 | } |
517 | data = skb_tail_pointer(skb); | 505 | data = skb_tail_pointer(skb); |
@@ -1068,6 +1056,14 @@ static void pvc_setup(struct net_device *dev) | |||
1068 | dev->addr_len = 2; | 1056 | dev->addr_len = 2; |
1069 | } | 1057 | } |
1070 | 1058 | ||
1059 | static const struct net_device_ops pvc_ops = { | ||
1060 | .ndo_open = pvc_open, | ||
1061 | .ndo_stop = pvc_close, | ||
1062 | .ndo_change_mtu = hdlc_change_mtu, | ||
1063 | .ndo_start_xmit = pvc_xmit, | ||
1064 | .ndo_do_ioctl = pvc_ioctl, | ||
1065 | }; | ||
1066 | |||
1071 | static int fr_add_pvc(struct net_device *frad, unsigned int dlci, int type) | 1067 | static int fr_add_pvc(struct net_device *frad, unsigned int dlci, int type) |
1072 | { | 1068 | { |
1073 | hdlc_device *hdlc = dev_to_hdlc(frad); | 1069 | hdlc_device *hdlc = dev_to_hdlc(frad); |
@@ -1104,11 +1100,7 @@ static int fr_add_pvc(struct net_device *frad, unsigned int dlci, int type) | |||
1104 | *(__be16*)dev->dev_addr = htons(dlci); | 1100 | *(__be16*)dev->dev_addr = htons(dlci); |
1105 | dlci_to_q922(dev->broadcast, dlci); | 1101 | dlci_to_q922(dev->broadcast, dlci); |
1106 | } | 1102 | } |
1107 | dev->hard_start_xmit = pvc_xmit; | 1103 | dev->netdev_ops = &pvc_ops; |
1108 | dev->open = pvc_open; | ||
1109 | dev->stop = pvc_close; | ||
1110 | dev->do_ioctl = pvc_ioctl; | ||
1111 | dev->change_mtu = pvc_change_mtu; | ||
1112 | dev->mtu = HDLC_MAX_MTU; | 1104 | dev->mtu = HDLC_MAX_MTU; |
1113 | dev->tx_queue_len = 0; | 1105 | dev->tx_queue_len = 0; |
1114 | dev->ml_priv = pvc; | 1106 | dev->ml_priv = pvc; |
@@ -1260,8 +1252,6 @@ static int fr_ioctl(struct net_device *dev, struct ifreq *ifr) | |||
1260 | state(hdlc)->dce_pvc_count = 0; | 1252 | state(hdlc)->dce_pvc_count = 0; |
1261 | } | 1253 | } |
1262 | memcpy(&state(hdlc)->settings, &new_settings, size); | 1254 | memcpy(&state(hdlc)->settings, &new_settings, size); |
1263 | |||
1264 | dev->hard_start_xmit = hdlc->xmit; | ||
1265 | dev->type = ARPHRD_FRAD; | 1255 | dev->type = ARPHRD_FRAD; |
1266 | return 0; | 1256 | return 0; |
1267 | 1257 | ||
diff --git a/drivers/net/wan/hdlc_ppp.c b/drivers/net/wan/hdlc_ppp.c index 57fe714c1c7f..72a7cdab4245 100644 --- a/drivers/net/wan/hdlc_ppp.c +++ b/drivers/net/wan/hdlc_ppp.c | |||
@@ -150,11 +150,11 @@ static __be16 ppp_type_trans(struct sk_buff *skb, struct net_device *dev) | |||
150 | return htons(ETH_P_HDLC); | 150 | return htons(ETH_P_HDLC); |
151 | 151 | ||
152 | switch (data->protocol) { | 152 | switch (data->protocol) { |
153 | case __constant_htons(PID_IP): | 153 | case cpu_to_be16(PID_IP): |
154 | skb_pull(skb, sizeof(struct hdlc_header)); | 154 | skb_pull(skb, sizeof(struct hdlc_header)); |
155 | return htons(ETH_P_IP); | 155 | return htons(ETH_P_IP); |
156 | 156 | ||
157 | case __constant_htons(PID_IPV6): | 157 | case cpu_to_be16(PID_IPV6): |
158 | skb_pull(skb, sizeof(struct hdlc_header)); | 158 | skb_pull(skb, sizeof(struct hdlc_header)); |
159 | return htons(ETH_P_IPV6); | 159 | return htons(ETH_P_IPV6); |
160 | 160 | ||
@@ -558,7 +558,6 @@ out: | |||
558 | return NET_RX_DROP; | 558 | return NET_RX_DROP; |
559 | } | 559 | } |
560 | 560 | ||
561 | |||
562 | static void ppp_timer(unsigned long arg) | 561 | static void ppp_timer(unsigned long arg) |
563 | { | 562 | { |
564 | struct proto *proto = (struct proto *)arg; | 563 | struct proto *proto = (struct proto *)arg; |
@@ -679,7 +678,6 @@ static int ppp_ioctl(struct net_device *dev, struct ifreq *ifr) | |||
679 | ppp->keepalive_interval = 10; | 678 | ppp->keepalive_interval = 10; |
680 | ppp->keepalive_timeout = 60; | 679 | ppp->keepalive_timeout = 60; |
681 | 680 | ||
682 | dev->hard_start_xmit = hdlc->xmit; | ||
683 | dev->hard_header_len = sizeof(struct hdlc_header); | 681 | dev->hard_header_len = sizeof(struct hdlc_header); |
684 | dev->header_ops = &ppp_header_ops; | 682 | dev->header_ops = &ppp_header_ops; |
685 | dev->type = ARPHRD_PPP; | 683 | dev->type = ARPHRD_PPP; |
diff --git a/drivers/net/wan/hdlc_raw.c b/drivers/net/wan/hdlc_raw.c index 8612311748f4..19f51fdd5522 100644 --- a/drivers/net/wan/hdlc_raw.c +++ b/drivers/net/wan/hdlc_raw.c | |||
@@ -27,11 +27,9 @@ static int raw_ioctl(struct net_device *dev, struct ifreq *ifr); | |||
27 | 27 | ||
28 | static __be16 raw_type_trans(struct sk_buff *skb, struct net_device *dev) | 28 | static __be16 raw_type_trans(struct sk_buff *skb, struct net_device *dev) |
29 | { | 29 | { |
30 | return __constant_htons(ETH_P_IP); | 30 | return cpu_to_be16(ETH_P_IP); |
31 | } | 31 | } |
32 | 32 | ||
33 | |||
34 | |||
35 | static struct hdlc_proto proto = { | 33 | static struct hdlc_proto proto = { |
36 | .type_trans = raw_type_trans, | 34 | .type_trans = raw_type_trans, |
37 | .ioctl = raw_ioctl, | 35 | .ioctl = raw_ioctl, |
@@ -86,7 +84,6 @@ static int raw_ioctl(struct net_device *dev, struct ifreq *ifr) | |||
86 | if (result) | 84 | if (result) |
87 | return result; | 85 | return result; |
88 | memcpy(hdlc->state, &new_settings, size); | 86 | memcpy(hdlc->state, &new_settings, size); |
89 | dev->hard_start_xmit = hdlc->xmit; | ||
90 | dev->type = ARPHRD_RAWHDLC; | 87 | dev->type = ARPHRD_RAWHDLC; |
91 | netif_dormant_off(dev); | 88 | netif_dormant_off(dev); |
92 | return 0; | 89 | return 0; |
diff --git a/drivers/net/wan/hdlc_raw_eth.c b/drivers/net/wan/hdlc_raw_eth.c index a13fc3207520..49e68f5ca5f2 100644 --- a/drivers/net/wan/hdlc_raw_eth.c +++ b/drivers/net/wan/hdlc_raw_eth.c | |||
@@ -45,6 +45,7 @@ static int eth_tx(struct sk_buff *skb, struct net_device *dev) | |||
45 | 45 | ||
46 | static struct hdlc_proto proto = { | 46 | static struct hdlc_proto proto = { |
47 | .type_trans = eth_type_trans, | 47 | .type_trans = eth_type_trans, |
48 | .xmit = eth_tx, | ||
48 | .ioctl = raw_eth_ioctl, | 49 | .ioctl = raw_eth_ioctl, |
49 | .module = THIS_MODULE, | 50 | .module = THIS_MODULE, |
50 | }; | 51 | }; |
@@ -56,9 +57,7 @@ static int raw_eth_ioctl(struct net_device *dev, struct ifreq *ifr) | |||
56 | const size_t size = sizeof(raw_hdlc_proto); | 57 | const size_t size = sizeof(raw_hdlc_proto); |
57 | raw_hdlc_proto new_settings; | 58 | raw_hdlc_proto new_settings; |
58 | hdlc_device *hdlc = dev_to_hdlc(dev); | 59 | hdlc_device *hdlc = dev_to_hdlc(dev); |
59 | int result; | 60 | int result, old_qlen; |
60 | int (*old_ch_mtu)(struct net_device *, int); | ||
61 | int old_qlen; | ||
62 | 61 | ||
63 | switch (ifr->ifr_settings.type) { | 62 | switch (ifr->ifr_settings.type) { |
64 | case IF_GET_PROTO: | 63 | case IF_GET_PROTO: |
@@ -99,11 +98,8 @@ static int raw_eth_ioctl(struct net_device *dev, struct ifreq *ifr) | |||
99 | if (result) | 98 | if (result) |
100 | return result; | 99 | return result; |
101 | memcpy(hdlc->state, &new_settings, size); | 100 | memcpy(hdlc->state, &new_settings, size); |
102 | dev->hard_start_xmit = eth_tx; | ||
103 | old_ch_mtu = dev->change_mtu; | ||
104 | old_qlen = dev->tx_queue_len; | 101 | old_qlen = dev->tx_queue_len; |
105 | ether_setup(dev); | 102 | ether_setup(dev); |
106 | dev->change_mtu = old_ch_mtu; | ||
107 | dev->tx_queue_len = old_qlen; | 103 | dev->tx_queue_len = old_qlen; |
108 | random_ether_addr(dev->dev_addr); | 104 | random_ether_addr(dev->dev_addr); |
109 | netif_dormant_off(dev); | 105 | netif_dormant_off(dev); |
diff --git a/drivers/net/wan/hdlc_x25.c b/drivers/net/wan/hdlc_x25.c index cbcbf6f0414c..b1dc29ed1583 100644 --- a/drivers/net/wan/hdlc_x25.c +++ b/drivers/net/wan/hdlc_x25.c | |||
@@ -184,6 +184,7 @@ static struct hdlc_proto proto = { | |||
184 | .close = x25_close, | 184 | .close = x25_close, |
185 | .ioctl = x25_ioctl, | 185 | .ioctl = x25_ioctl, |
186 | .netif_rx = x25_rx, | 186 | .netif_rx = x25_rx, |
187 | .xmit = x25_xmit, | ||
187 | .module = THIS_MODULE, | 188 | .module = THIS_MODULE, |
188 | }; | 189 | }; |
189 | 190 | ||
@@ -213,7 +214,6 @@ static int x25_ioctl(struct net_device *dev, struct ifreq *ifr) | |||
213 | 214 | ||
214 | if ((result = attach_hdlc_protocol(dev, &proto, 0))) | 215 | if ((result = attach_hdlc_protocol(dev, &proto, 0))) |
215 | return result; | 216 | return result; |
216 | dev->hard_start_xmit = x25_xmit; | ||
217 | dev->type = ARPHRD_X25; | 217 | dev->type = ARPHRD_X25; |
218 | netif_dormant_off(dev); | 218 | netif_dormant_off(dev); |
219 | return 0; | 219 | return 0; |
diff --git a/drivers/net/wan/hostess_sv11.c b/drivers/net/wan/hostess_sv11.c index af54f0cf1b35..567d4f5062d6 100644 --- a/drivers/net/wan/hostess_sv11.c +++ b/drivers/net/wan/hostess_sv11.c | |||
@@ -173,6 +173,14 @@ static int hostess_attach(struct net_device *dev, unsigned short encoding, | |||
173 | * Description block for a Comtrol Hostess SV11 card | 173 | * Description block for a Comtrol Hostess SV11 card |
174 | */ | 174 | */ |
175 | 175 | ||
176 | static const struct net_device_ops hostess_ops = { | ||
177 | .ndo_open = hostess_open, | ||
178 | .ndo_stop = hostess_close, | ||
179 | .ndo_change_mtu = hdlc_change_mtu, | ||
180 | .ndo_start_xmit = hdlc_start_xmit, | ||
181 | .ndo_do_ioctl = hostess_ioctl, | ||
182 | }; | ||
183 | |||
176 | static struct z8530_dev *sv11_init(int iobase, int irq) | 184 | static struct z8530_dev *sv11_init(int iobase, int irq) |
177 | { | 185 | { |
178 | struct z8530_dev *sv; | 186 | struct z8530_dev *sv; |
@@ -267,9 +275,7 @@ static struct z8530_dev *sv11_init(int iobase, int irq) | |||
267 | 275 | ||
268 | dev_to_hdlc(netdev)->attach = hostess_attach; | 276 | dev_to_hdlc(netdev)->attach = hostess_attach; |
269 | dev_to_hdlc(netdev)->xmit = hostess_queue_xmit; | 277 | dev_to_hdlc(netdev)->xmit = hostess_queue_xmit; |
270 | netdev->open = hostess_open; | 278 | netdev->netdev_ops = &hostess_ops; |
271 | netdev->stop = hostess_close; | ||
272 | netdev->do_ioctl = hostess_ioctl; | ||
273 | netdev->base_addr = iobase; | 279 | netdev->base_addr = iobase; |
274 | netdev->irq = irq; | 280 | netdev->irq = irq; |
275 | 281 | ||
diff --git a/drivers/net/wan/ixp4xx_hss.c b/drivers/net/wan/ixp4xx_hss.c index 0dbd85b0162d..3bf7d3f447db 100644 --- a/drivers/net/wan/ixp4xx_hss.c +++ b/drivers/net/wan/ixp4xx_hss.c | |||
@@ -622,7 +622,7 @@ static void hss_hdlc_rx_irq(void *pdev) | |||
622 | printk(KERN_DEBUG "%s: hss_hdlc_rx_irq\n", dev->name); | 622 | printk(KERN_DEBUG "%s: hss_hdlc_rx_irq\n", dev->name); |
623 | #endif | 623 | #endif |
624 | qmgr_disable_irq(queue_ids[port->id].rx); | 624 | qmgr_disable_irq(queue_ids[port->id].rx); |
625 | netif_rx_schedule(&port->napi); | 625 | napi_schedule(&port->napi); |
626 | } | 626 | } |
627 | 627 | ||
628 | static int hss_hdlc_poll(struct napi_struct *napi, int budget) | 628 | static int hss_hdlc_poll(struct napi_struct *napi, int budget) |
@@ -649,15 +649,15 @@ static int hss_hdlc_poll(struct napi_struct *napi, int budget) | |||
649 | if ((n = queue_get_desc(rxq, port, 0)) < 0) { | 649 | if ((n = queue_get_desc(rxq, port, 0)) < 0) { |
650 | #if DEBUG_RX | 650 | #if DEBUG_RX |
651 | printk(KERN_DEBUG "%s: hss_hdlc_poll" | 651 | printk(KERN_DEBUG "%s: hss_hdlc_poll" |
652 | " netif_rx_complete\n", dev->name); | 652 | " napi_complete\n", dev->name); |
653 | #endif | 653 | #endif |
654 | netif_rx_complete(napi); | 654 | napi_complete(napi); |
655 | qmgr_enable_irq(rxq); | 655 | qmgr_enable_irq(rxq); |
656 | if (!qmgr_stat_empty(rxq) && | 656 | if (!qmgr_stat_empty(rxq) && |
657 | netif_rx_reschedule(napi)) { | 657 | napi_reschedule(napi)) { |
658 | #if DEBUG_RX | 658 | #if DEBUG_RX |
659 | printk(KERN_DEBUG "%s: hss_hdlc_poll" | 659 | printk(KERN_DEBUG "%s: hss_hdlc_poll" |
660 | " netif_rx_reschedule succeeded\n", | 660 | " napi_reschedule succeeded\n", |
661 | dev->name); | 661 | dev->name); |
662 | #endif | 662 | #endif |
663 | qmgr_disable_irq(rxq); | 663 | qmgr_disable_irq(rxq); |
@@ -1069,7 +1069,7 @@ static int hss_hdlc_open(struct net_device *dev) | |||
1069 | hss_start_hdlc(port); | 1069 | hss_start_hdlc(port); |
1070 | 1070 | ||
1071 | /* we may already have RX data, enables IRQ */ | 1071 | /* we may already have RX data, enables IRQ */ |
1072 | netif_rx_schedule(&port->napi); | 1072 | napi_schedule(&port->napi); |
1073 | return 0; | 1073 | return 0; |
1074 | 1074 | ||
1075 | err_unlock: | 1075 | err_unlock: |
@@ -1230,6 +1230,14 @@ static int hss_hdlc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) | |||
1230 | * initialization | 1230 | * initialization |
1231 | ****************************************************************************/ | 1231 | ****************************************************************************/ |
1232 | 1232 | ||
1233 | static const struct net_device_ops hss_hdlc_ops = { | ||
1234 | .ndo_open = hss_hdlc_open, | ||
1235 | .ndo_stop = hss_hdlc_close, | ||
1236 | .ndo_change_mtu = hdlc_change_mtu, | ||
1237 | .ndo_start_xmit = hdlc_start_xmit, | ||
1238 | .ndo_do_ioctl = hss_hdlc_ioctl, | ||
1239 | }; | ||
1240 | |||
1233 | static int __devinit hss_init_one(struct platform_device *pdev) | 1241 | static int __devinit hss_init_one(struct platform_device *pdev) |
1234 | { | 1242 | { |
1235 | struct port *port; | 1243 | struct port *port; |
@@ -1254,9 +1262,7 @@ static int __devinit hss_init_one(struct platform_device *pdev) | |||
1254 | hdlc = dev_to_hdlc(dev); | 1262 | hdlc = dev_to_hdlc(dev); |
1255 | hdlc->attach = hss_hdlc_attach; | 1263 | hdlc->attach = hss_hdlc_attach; |
1256 | hdlc->xmit = hss_hdlc_xmit; | 1264 | hdlc->xmit = hss_hdlc_xmit; |
1257 | dev->open = hss_hdlc_open; | 1265 | dev->netdev_ops = &hss_hdlc_ops; |
1258 | dev->stop = hss_hdlc_close; | ||
1259 | dev->do_ioctl = hss_hdlc_ioctl; | ||
1260 | dev->tx_queue_len = 100; | 1266 | dev->tx_queue_len = 100; |
1261 | port->clock_type = CLOCK_EXT; | 1267 | port->clock_type = CLOCK_EXT; |
1262 | port->clock_rate = 2048000; | 1268 | port->clock_rate = 2048000; |
diff --git a/drivers/net/wan/lapbether.c b/drivers/net/wan/lapbether.c index 5b61b3eef45f..da9dcf59de24 100644 --- a/drivers/net/wan/lapbether.c +++ b/drivers/net/wan/lapbether.c | |||
@@ -422,7 +422,7 @@ static int lapbeth_device_event(struct notifier_block *this, | |||
422 | /* ------------------------------------------------------------------------ */ | 422 | /* ------------------------------------------------------------------------ */ |
423 | 423 | ||
424 | static struct packet_type lapbeth_packet_type = { | 424 | static struct packet_type lapbeth_packet_type = { |
425 | .type = __constant_htons(ETH_P_DEC), | 425 | .type = cpu_to_be16(ETH_P_DEC), |
426 | .func = lapbeth_rcv, | 426 | .func = lapbeth_rcv, |
427 | }; | 427 | }; |
428 | 428 | ||
diff --git a/drivers/net/wan/lmc/lmc_main.c b/drivers/net/wan/lmc/lmc_main.c index feac3b99f8fe..45b1822c962d 100644 --- a/drivers/net/wan/lmc/lmc_main.c +++ b/drivers/net/wan/lmc/lmc_main.c | |||
@@ -806,6 +806,16 @@ static int lmc_attach(struct net_device *dev, unsigned short encoding, | |||
806 | return -EINVAL; | 806 | return -EINVAL; |
807 | } | 807 | } |
808 | 808 | ||
809 | static const struct net_device_ops lmc_ops = { | ||
810 | .ndo_open = lmc_open, | ||
811 | .ndo_stop = lmc_close, | ||
812 | .ndo_change_mtu = hdlc_change_mtu, | ||
813 | .ndo_start_xmit = hdlc_start_xmit, | ||
814 | .ndo_do_ioctl = lmc_ioctl, | ||
815 | .ndo_tx_timeout = lmc_driver_timeout, | ||
816 | .ndo_get_stats = lmc_get_stats, | ||
817 | }; | ||
818 | |||
809 | static int __devinit lmc_init_one(struct pci_dev *pdev, | 819 | static int __devinit lmc_init_one(struct pci_dev *pdev, |
810 | const struct pci_device_id *ent) | 820 | const struct pci_device_id *ent) |
811 | { | 821 | { |
@@ -849,11 +859,7 @@ static int __devinit lmc_init_one(struct pci_dev *pdev, | |||
849 | dev->type = ARPHRD_HDLC; | 859 | dev->type = ARPHRD_HDLC; |
850 | dev_to_hdlc(dev)->xmit = lmc_start_xmit; | 860 | dev_to_hdlc(dev)->xmit = lmc_start_xmit; |
851 | dev_to_hdlc(dev)->attach = lmc_attach; | 861 | dev_to_hdlc(dev)->attach = lmc_attach; |
852 | dev->open = lmc_open; | 862 | dev->netdev_ops = &lmc_ops; |
853 | dev->stop = lmc_close; | ||
854 | dev->get_stats = lmc_get_stats; | ||
855 | dev->do_ioctl = lmc_ioctl; | ||
856 | dev->tx_timeout = lmc_driver_timeout; | ||
857 | dev->watchdog_timeo = HZ; /* 1 second */ | 863 | dev->watchdog_timeo = HZ; /* 1 second */ |
858 | dev->tx_queue_len = 100; | 864 | dev->tx_queue_len = 100; |
859 | sc->lmc_device = dev; | 865 | sc->lmc_device = dev; |
@@ -1059,9 +1065,6 @@ static int lmc_open(struct net_device *dev) | |||
1059 | if ((err = lmc_proto_open(sc)) != 0) | 1065 | if ((err = lmc_proto_open(sc)) != 0) |
1060 | return err; | 1066 | return err; |
1061 | 1067 | ||
1062 | dev->do_ioctl = lmc_ioctl; | ||
1063 | |||
1064 | |||
1065 | netif_start_queue(dev); | 1068 | netif_start_queue(dev); |
1066 | sc->extra_stats.tx_tbusy0++; | 1069 | sc->extra_stats.tx_tbusy0++; |
1067 | 1070 | ||
diff --git a/drivers/net/wan/lmc/lmc_proto.c b/drivers/net/wan/lmc/lmc_proto.c index 94b4c208b013..044a48175c42 100644 --- a/drivers/net/wan/lmc/lmc_proto.c +++ b/drivers/net/wan/lmc/lmc_proto.c | |||
@@ -51,30 +51,15 @@ | |||
51 | void lmc_proto_attach(lmc_softc_t *sc) /*FOLD00*/ | 51 | void lmc_proto_attach(lmc_softc_t *sc) /*FOLD00*/ |
52 | { | 52 | { |
53 | lmc_trace(sc->lmc_device, "lmc_proto_attach in"); | 53 | lmc_trace(sc->lmc_device, "lmc_proto_attach in"); |
54 | switch(sc->if_type){ | 54 | if (sc->if_type == LMC_NET) { |
55 | case LMC_PPP: | ||
56 | { | ||
57 | struct net_device *dev = sc->lmc_device; | ||
58 | dev->do_ioctl = lmc_ioctl; | ||
59 | } | ||
60 | break; | ||
61 | case LMC_NET: | ||
62 | { | ||
63 | struct net_device *dev = sc->lmc_device; | 55 | struct net_device *dev = sc->lmc_device; |
64 | /* | 56 | /* |
65 | * They set a few basics because they don't use HDLC | 57 | * They set a few basics because they don't use HDLC |
66 | */ | 58 | */ |
67 | dev->flags |= IFF_POINTOPOINT; | 59 | dev->flags |= IFF_POINTOPOINT; |
68 | |||
69 | dev->hard_header_len = 0; | 60 | dev->hard_header_len = 0; |
70 | dev->addr_len = 0; | 61 | dev->addr_len = 0; |
71 | } | 62 | } |
72 | case LMC_RAW: /* Setup the task queue, maybe we should notify someone? */ | ||
73 | { | ||
74 | } | ||
75 | default: | ||
76 | break; | ||
77 | } | ||
78 | lmc_trace(sc->lmc_device, "lmc_proto_attach out"); | 63 | lmc_trace(sc->lmc_device, "lmc_proto_attach out"); |
79 | } | 64 | } |
80 | 65 | ||
diff --git a/drivers/net/wan/n2.c b/drivers/net/wan/n2.c index 697715ae80f4..83da596e2052 100644 --- a/drivers/net/wan/n2.c +++ b/drivers/net/wan/n2.c | |||
@@ -324,7 +324,13 @@ static void n2_destroy_card(card_t *card) | |||
324 | kfree(card); | 324 | kfree(card); |
325 | } | 325 | } |
326 | 326 | ||
327 | 327 | static const struct net_device_ops n2_ops = { | |
328 | .ndo_open = n2_open, | ||
329 | .ndo_stop = n2_close, | ||
330 | .ndo_change_mtu = hdlc_change_mtu, | ||
331 | .ndo_start_xmit = hdlc_start_xmit, | ||
332 | .ndo_do_ioctl = n2_ioctl, | ||
333 | }; | ||
328 | 334 | ||
329 | static int __init n2_run(unsigned long io, unsigned long irq, | 335 | static int __init n2_run(unsigned long io, unsigned long irq, |
330 | unsigned long winbase, long valid0, long valid1) | 336 | unsigned long winbase, long valid0, long valid1) |
@@ -460,9 +466,7 @@ static int __init n2_run(unsigned long io, unsigned long irq, | |||
460 | dev->mem_start = winbase; | 466 | dev->mem_start = winbase; |
461 | dev->mem_end = winbase + USE_WINDOWSIZE - 1; | 467 | dev->mem_end = winbase + USE_WINDOWSIZE - 1; |
462 | dev->tx_queue_len = 50; | 468 | dev->tx_queue_len = 50; |
463 | dev->do_ioctl = n2_ioctl; | 469 | dev->netdev_ops = &n2_ops; |
464 | dev->open = n2_open; | ||
465 | dev->stop = n2_close; | ||
466 | hdlc->attach = sca_attach; | 470 | hdlc->attach = sca_attach; |
467 | hdlc->xmit = sca_xmit; | 471 | hdlc->xmit = sca_xmit; |
468 | port->settings.clock_type = CLOCK_EXT; | 472 | port->settings.clock_type = CLOCK_EXT; |
diff --git a/drivers/net/wan/pc300too.c b/drivers/net/wan/pc300too.c index f247e5d9002a..60ece54bdd94 100644 --- a/drivers/net/wan/pc300too.c +++ b/drivers/net/wan/pc300too.c | |||
@@ -287,7 +287,13 @@ static void pc300_pci_remove_one(struct pci_dev *pdev) | |||
287 | kfree(card); | 287 | kfree(card); |
288 | } | 288 | } |
289 | 289 | ||
290 | 290 | static const struct net_device_ops pc300_ops = { | |
291 | .ndo_open = pc300_open, | ||
292 | .ndo_stop = pc300_close, | ||
293 | .ndo_change_mtu = hdlc_change_mtu, | ||
294 | .ndo_start_xmit = hdlc_start_xmit, | ||
295 | .ndo_do_ioctl = pc300_ioctl, | ||
296 | }; | ||
291 | 297 | ||
292 | static int __devinit pc300_pci_init_one(struct pci_dev *pdev, | 298 | static int __devinit pc300_pci_init_one(struct pci_dev *pdev, |
293 | const struct pci_device_id *ent) | 299 | const struct pci_device_id *ent) |
@@ -448,9 +454,7 @@ static int __devinit pc300_pci_init_one(struct pci_dev *pdev, | |||
448 | dev->mem_start = ramphys; | 454 | dev->mem_start = ramphys; |
449 | dev->mem_end = ramphys + ramsize - 1; | 455 | dev->mem_end = ramphys + ramsize - 1; |
450 | dev->tx_queue_len = 50; | 456 | dev->tx_queue_len = 50; |
451 | dev->do_ioctl = pc300_ioctl; | 457 | dev->netdev_ops = &pc300_ops; |
452 | dev->open = pc300_open; | ||
453 | dev->stop = pc300_close; | ||
454 | hdlc->attach = sca_attach; | 458 | hdlc->attach = sca_attach; |
455 | hdlc->xmit = sca_xmit; | 459 | hdlc->xmit = sca_xmit; |
456 | port->settings.clock_type = CLOCK_EXT; | 460 | port->settings.clock_type = CLOCK_EXT; |
diff --git a/drivers/net/wan/pci200syn.c b/drivers/net/wan/pci200syn.c index 1104d3a692f7..e035d8c57e11 100644 --- a/drivers/net/wan/pci200syn.c +++ b/drivers/net/wan/pci200syn.c | |||
@@ -265,7 +265,13 @@ static void pci200_pci_remove_one(struct pci_dev *pdev) | |||
265 | kfree(card); | 265 | kfree(card); |
266 | } | 266 | } |
267 | 267 | ||
268 | 268 | static const struct net_device_ops pci200_ops = { | |
269 | .ndo_open = pci200_open, | ||
270 | .ndo_stop = pci200_close, | ||
271 | .ndo_change_mtu = hdlc_change_mtu, | ||
272 | .ndo_start_xmit = hdlc_start_xmit, | ||
273 | .ndo_do_ioctl = pci200_ioctl, | ||
274 | }; | ||
269 | 275 | ||
270 | static int __devinit pci200_pci_init_one(struct pci_dev *pdev, | 276 | static int __devinit pci200_pci_init_one(struct pci_dev *pdev, |
271 | const struct pci_device_id *ent) | 277 | const struct pci_device_id *ent) |
@@ -395,9 +401,7 @@ static int __devinit pci200_pci_init_one(struct pci_dev *pdev, | |||
395 | dev->mem_start = ramphys; | 401 | dev->mem_start = ramphys; |
396 | dev->mem_end = ramphys + ramsize - 1; | 402 | dev->mem_end = ramphys + ramsize - 1; |
397 | dev->tx_queue_len = 50; | 403 | dev->tx_queue_len = 50; |
398 | dev->do_ioctl = pci200_ioctl; | 404 | dev->netdev_ops = &pci200_ops; |
399 | dev->open = pci200_open; | ||
400 | dev->stop = pci200_close; | ||
401 | hdlc->attach = sca_attach; | 405 | hdlc->attach = sca_attach; |
402 | hdlc->xmit = sca_xmit; | 406 | hdlc->xmit = sca_xmit; |
403 | port->settings.clock_type = CLOCK_EXT; | 407 | port->settings.clock_type = CLOCK_EXT; |
diff --git a/drivers/net/wan/sealevel.c b/drivers/net/wan/sealevel.c index 0941a26f6e3f..23b269027453 100644 --- a/drivers/net/wan/sealevel.c +++ b/drivers/net/wan/sealevel.c | |||
@@ -169,6 +169,14 @@ static int sealevel_attach(struct net_device *dev, unsigned short encoding, | |||
169 | return -EINVAL; | 169 | return -EINVAL; |
170 | } | 170 | } |
171 | 171 | ||
172 | static const struct net_device_ops sealevel_ops = { | ||
173 | .ndo_open = sealevel_open, | ||
174 | .ndo_stop = sealevel_close, | ||
175 | .ndo_change_mtu = hdlc_change_mtu, | ||
176 | .ndo_start_xmit = hdlc_start_xmit, | ||
177 | .ndo_do_ioctl = sealevel_ioctl, | ||
178 | }; | ||
179 | |||
172 | static int slvl_setup(struct slvl_device *sv, int iobase, int irq) | 180 | static int slvl_setup(struct slvl_device *sv, int iobase, int irq) |
173 | { | 181 | { |
174 | struct net_device *dev = alloc_hdlcdev(sv); | 182 | struct net_device *dev = alloc_hdlcdev(sv); |
@@ -177,9 +185,7 @@ static int slvl_setup(struct slvl_device *sv, int iobase, int irq) | |||
177 | 185 | ||
178 | dev_to_hdlc(dev)->attach = sealevel_attach; | 186 | dev_to_hdlc(dev)->attach = sealevel_attach; |
179 | dev_to_hdlc(dev)->xmit = sealevel_queue_xmit; | 187 | dev_to_hdlc(dev)->xmit = sealevel_queue_xmit; |
180 | dev->open = sealevel_open; | 188 | dev->netdev_ops = &sealevel_ops; |
181 | dev->stop = sealevel_close; | ||
182 | dev->do_ioctl = sealevel_ioctl; | ||
183 | dev->base_addr = iobase; | 189 | dev->base_addr = iobase; |
184 | dev->irq = irq; | 190 | dev->irq = irq; |
185 | 191 | ||
diff --git a/drivers/net/wan/wanxl.c b/drivers/net/wan/wanxl.c index 4bffb67ebcae..887acb0dc807 100644 --- a/drivers/net/wan/wanxl.c +++ b/drivers/net/wan/wanxl.c | |||
@@ -547,6 +547,15 @@ static void wanxl_pci_remove_one(struct pci_dev *pdev) | |||
547 | 547 | ||
548 | #include "wanxlfw.inc" | 548 | #include "wanxlfw.inc" |
549 | 549 | ||
550 | static const struct net_device_ops wanxl_ops = { | ||
551 | .ndo_open = wanxl_open, | ||
552 | .ndo_stop = wanxl_close, | ||
553 | .ndo_change_mtu = hdlc_change_mtu, | ||
554 | .ndo_start_xmit = hdlc_start_xmit, | ||
555 | .ndo_do_ioctl = wanxl_ioctl, | ||
556 | .ndo_get_stats = wanxl_get_stats, | ||
557 | }; | ||
558 | |||
550 | static int __devinit wanxl_pci_init_one(struct pci_dev *pdev, | 559 | static int __devinit wanxl_pci_init_one(struct pci_dev *pdev, |
551 | const struct pci_device_id *ent) | 560 | const struct pci_device_id *ent) |
552 | { | 561 | { |
@@ -777,12 +786,9 @@ static int __devinit wanxl_pci_init_one(struct pci_dev *pdev, | |||
777 | hdlc = dev_to_hdlc(dev); | 786 | hdlc = dev_to_hdlc(dev); |
778 | spin_lock_init(&port->lock); | 787 | spin_lock_init(&port->lock); |
779 | dev->tx_queue_len = 50; | 788 | dev->tx_queue_len = 50; |
780 | dev->do_ioctl = wanxl_ioctl; | 789 | dev->netdev_ops = &wanxl_ops; |
781 | dev->open = wanxl_open; | ||
782 | dev->stop = wanxl_close; | ||
783 | hdlc->attach = wanxl_attach; | 790 | hdlc->attach = wanxl_attach; |
784 | hdlc->xmit = wanxl_xmit; | 791 | hdlc->xmit = wanxl_xmit; |
785 | dev->get_stats = wanxl_get_stats; | ||
786 | port->card = card; | 792 | port->card = card; |
787 | port->node = i; | 793 | port->node = i; |
788 | get_status(port)->clocking = CLOCK_EXT; | 794 | get_status(port)->clocking = CLOCK_EXT; |
diff --git a/drivers/net/wimax/i2400m/fw.c b/drivers/net/wimax/i2400m/fw.c index 1d8271f34c38..ecd0cfaefdcc 100644 --- a/drivers/net/wimax/i2400m/fw.c +++ b/drivers/net/wimax/i2400m/fw.c | |||
@@ -140,10 +140,10 @@ | |||
140 | 140 | ||
141 | 141 | ||
142 | static const __le32 i2400m_ACK_BARKER[4] = { | 142 | static const __le32 i2400m_ACK_BARKER[4] = { |
143 | __constant_cpu_to_le32(I2400M_ACK_BARKER), | 143 | cpu_to_le32(I2400M_ACK_BARKER), |
144 | __constant_cpu_to_le32(I2400M_ACK_BARKER), | 144 | cpu_to_le32(I2400M_ACK_BARKER), |
145 | __constant_cpu_to_le32(I2400M_ACK_BARKER), | 145 | cpu_to_le32(I2400M_ACK_BARKER), |
146 | __constant_cpu_to_le32(I2400M_ACK_BARKER) | 146 | cpu_to_le32(I2400M_ACK_BARKER) |
147 | }; | 147 | }; |
148 | 148 | ||
149 | 149 | ||
@@ -771,8 +771,8 @@ static | |||
771 | int i2400m_dnload_init_nonsigned(struct i2400m *i2400m) | 771 | int i2400m_dnload_init_nonsigned(struct i2400m *i2400m) |
772 | { | 772 | { |
773 | #define POKE(a, d) { \ | 773 | #define POKE(a, d) { \ |
774 | .address = __constant_cpu_to_le32(a), \ | 774 | .address = cpu_to_le32(a), \ |
775 | .data = __constant_cpu_to_le32(d) \ | 775 | .data = cpu_to_le32(d) \ |
776 | } | 776 | } |
777 | static const struct { | 777 | static const struct { |
778 | __le32 address; | 778 | __le32 address; |
diff --git a/drivers/net/wimax/i2400m/i2400m.h b/drivers/net/wimax/i2400m/i2400m.h index 067c871cc226..236f19ea4c85 100644 --- a/drivers/net/wimax/i2400m/i2400m.h +++ b/drivers/net/wimax/i2400m/i2400m.h | |||
@@ -664,17 +664,17 @@ extern struct i2400m_msg_hdr *i2400m_tx_msg_get(struct i2400m *, size_t *); | |||
664 | extern void i2400m_tx_msg_sent(struct i2400m *); | 664 | extern void i2400m_tx_msg_sent(struct i2400m *); |
665 | 665 | ||
666 | static const __le32 i2400m_NBOOT_BARKER[4] = { | 666 | static const __le32 i2400m_NBOOT_BARKER[4] = { |
667 | __constant_cpu_to_le32(I2400M_NBOOT_BARKER), | 667 | cpu_to_le32(I2400M_NBOOT_BARKER), |
668 | __constant_cpu_to_le32(I2400M_NBOOT_BARKER), | 668 | cpu_to_le32(I2400M_NBOOT_BARKER), |
669 | __constant_cpu_to_le32(I2400M_NBOOT_BARKER), | 669 | cpu_to_le32(I2400M_NBOOT_BARKER), |
670 | __constant_cpu_to_le32(I2400M_NBOOT_BARKER) | 670 | cpu_to_le32(I2400M_NBOOT_BARKER) |
671 | }; | 671 | }; |
672 | 672 | ||
673 | static const __le32 i2400m_SBOOT_BARKER[4] = { | 673 | static const __le32 i2400m_SBOOT_BARKER[4] = { |
674 | __constant_cpu_to_le32(I2400M_SBOOT_BARKER), | 674 | cpu_to_le32(I2400M_SBOOT_BARKER), |
675 | __constant_cpu_to_le32(I2400M_SBOOT_BARKER), | 675 | cpu_to_le32(I2400M_SBOOT_BARKER), |
676 | __constant_cpu_to_le32(I2400M_SBOOT_BARKER), | 676 | cpu_to_le32(I2400M_SBOOT_BARKER), |
677 | __constant_cpu_to_le32(I2400M_SBOOT_BARKER) | 677 | cpu_to_le32(I2400M_SBOOT_BARKER) |
678 | }; | 678 | }; |
679 | 679 | ||
680 | 680 | ||
diff --git a/drivers/net/wimax/i2400m/netdev.c b/drivers/net/wimax/i2400m/netdev.c index 63fe708e8a31..be8be4d0709c 100644 --- a/drivers/net/wimax/i2400m/netdev.c +++ b/drivers/net/wimax/i2400m/netdev.c | |||
@@ -419,7 +419,7 @@ void i2400m_rx_fake_eth_header(struct net_device *net_dev, | |||
419 | 419 | ||
420 | memcpy(eth_hdr->h_dest, net_dev->dev_addr, sizeof(eth_hdr->h_dest)); | 420 | memcpy(eth_hdr->h_dest, net_dev->dev_addr, sizeof(eth_hdr->h_dest)); |
421 | memset(eth_hdr->h_source, 0, sizeof(eth_hdr->h_dest)); | 421 | memset(eth_hdr->h_source, 0, sizeof(eth_hdr->h_dest)); |
422 | eth_hdr->h_proto = __constant_cpu_to_be16(ETH_P_IP); | 422 | eth_hdr->h_proto = cpu_to_be16(ETH_P_IP); |
423 | } | 423 | } |
424 | 424 | ||
425 | 425 | ||
@@ -493,6 +493,14 @@ error_skb_realloc: | |||
493 | i2400m, buf, buf_len); | 493 | i2400m, buf, buf_len); |
494 | } | 494 | } |
495 | 495 | ||
496 | static const struct net_device_ops i2400m_netdev_ops = { | ||
497 | .ndo_open = i2400m_open, | ||
498 | .ndo_stop = i2400m_stop, | ||
499 | .ndo_start_xmit = i2400m_hard_start_xmit, | ||
500 | .ndo_tx_timeout = i2400m_tx_timeout, | ||
501 | .ndo_change_mtu = i2400m_change_mtu, | ||
502 | }; | ||
503 | |||
496 | 504 | ||
497 | /** | 505 | /** |
498 | * i2400m_netdev_setup - Setup setup @net_dev's i2400m private data | 506 | * i2400m_netdev_setup - Setup setup @net_dev's i2400m private data |
@@ -513,11 +521,7 @@ void i2400m_netdev_setup(struct net_device *net_dev) | |||
513 | & (~IFF_BROADCAST /* i2400m is P2P */ | 521 | & (~IFF_BROADCAST /* i2400m is P2P */ |
514 | & ~IFF_MULTICAST); | 522 | & ~IFF_MULTICAST); |
515 | net_dev->watchdog_timeo = I2400M_TX_TIMEOUT; | 523 | net_dev->watchdog_timeo = I2400M_TX_TIMEOUT; |
516 | net_dev->open = i2400m_open; | 524 | net_dev->netdev_ops = &i2400m_netdev_ops; |
517 | net_dev->stop = i2400m_stop; | ||
518 | net_dev->hard_start_xmit = i2400m_hard_start_xmit; | ||
519 | net_dev->change_mtu = i2400m_change_mtu; | ||
520 | net_dev->tx_timeout = i2400m_tx_timeout; | ||
521 | d_fnend(3, NULL, "(net_dev %p) = void\n", net_dev); | 525 | d_fnend(3, NULL, "(net_dev %p) = void\n", net_dev); |
522 | } | 526 | } |
523 | EXPORT_SYMBOL_GPL(i2400m_netdev_setup); | 527 | EXPORT_SYMBOL_GPL(i2400m_netdev_setup); |
diff --git a/drivers/net/wimax/i2400m/sdio.c b/drivers/net/wimax/i2400m/sdio.c index 1bfa283bbd8a..123a5f8db6ad 100644 --- a/drivers/net/wimax/i2400m/sdio.c +++ b/drivers/net/wimax/i2400m/sdio.c | |||
@@ -255,16 +255,16 @@ int i2400ms_bus_reset(struct i2400m *i2400m, enum i2400m_reset_type rt) | |||
255 | container_of(i2400m, struct i2400ms, i2400m); | 255 | container_of(i2400m, struct i2400ms, i2400m); |
256 | struct device *dev = i2400m_dev(i2400m); | 256 | struct device *dev = i2400m_dev(i2400m); |
257 | static const __le32 i2400m_WARM_BOOT_BARKER[4] = { | 257 | static const __le32 i2400m_WARM_BOOT_BARKER[4] = { |
258 | __constant_cpu_to_le32(I2400M_WARM_RESET_BARKER), | 258 | cpu_to_le32(I2400M_WARM_RESET_BARKER), |
259 | __constant_cpu_to_le32(I2400M_WARM_RESET_BARKER), | 259 | cpu_to_le32(I2400M_WARM_RESET_BARKER), |
260 | __constant_cpu_to_le32(I2400M_WARM_RESET_BARKER), | 260 | cpu_to_le32(I2400M_WARM_RESET_BARKER), |
261 | __constant_cpu_to_le32(I2400M_WARM_RESET_BARKER), | 261 | cpu_to_le32(I2400M_WARM_RESET_BARKER), |
262 | }; | 262 | }; |
263 | static const __le32 i2400m_COLD_BOOT_BARKER[4] = { | 263 | static const __le32 i2400m_COLD_BOOT_BARKER[4] = { |
264 | __constant_cpu_to_le32(I2400M_COLD_RESET_BARKER), | 264 | cpu_to_le32(I2400M_COLD_RESET_BARKER), |
265 | __constant_cpu_to_le32(I2400M_COLD_RESET_BARKER), | 265 | cpu_to_le32(I2400M_COLD_RESET_BARKER), |
266 | __constant_cpu_to_le32(I2400M_COLD_RESET_BARKER), | 266 | cpu_to_le32(I2400M_COLD_RESET_BARKER), |
267 | __constant_cpu_to_le32(I2400M_COLD_RESET_BARKER), | 267 | cpu_to_le32(I2400M_COLD_RESET_BARKER), |
268 | }; | 268 | }; |
269 | 269 | ||
270 | if (rt == I2400M_RT_WARM) | 270 | if (rt == I2400M_RT_WARM) |
diff --git a/drivers/net/wimax/i2400m/usb.c b/drivers/net/wimax/i2400m/usb.c index c6d93465c7e2..7c28610da6f3 100644 --- a/drivers/net/wimax/i2400m/usb.c +++ b/drivers/net/wimax/i2400m/usb.c | |||
@@ -211,16 +211,16 @@ int i2400mu_bus_reset(struct i2400m *i2400m, enum i2400m_reset_type rt) | |||
211 | container_of(i2400m, struct i2400mu, i2400m); | 211 | container_of(i2400m, struct i2400mu, i2400m); |
212 | struct device *dev = i2400m_dev(i2400m); | 212 | struct device *dev = i2400m_dev(i2400m); |
213 | static const __le32 i2400m_WARM_BOOT_BARKER[4] = { | 213 | static const __le32 i2400m_WARM_BOOT_BARKER[4] = { |
214 | __constant_cpu_to_le32(I2400M_WARM_RESET_BARKER), | 214 | cpu_to_le32(I2400M_WARM_RESET_BARKER), |
215 | __constant_cpu_to_le32(I2400M_WARM_RESET_BARKER), | 215 | cpu_to_le32(I2400M_WARM_RESET_BARKER), |
216 | __constant_cpu_to_le32(I2400M_WARM_RESET_BARKER), | 216 | cpu_to_le32(I2400M_WARM_RESET_BARKER), |
217 | __constant_cpu_to_le32(I2400M_WARM_RESET_BARKER), | 217 | cpu_to_le32(I2400M_WARM_RESET_BARKER), |
218 | }; | 218 | }; |
219 | static const __le32 i2400m_COLD_BOOT_BARKER[4] = { | 219 | static const __le32 i2400m_COLD_BOOT_BARKER[4] = { |
220 | __constant_cpu_to_le32(I2400M_COLD_RESET_BARKER), | 220 | cpu_to_le32(I2400M_COLD_RESET_BARKER), |
221 | __constant_cpu_to_le32(I2400M_COLD_RESET_BARKER), | 221 | cpu_to_le32(I2400M_COLD_RESET_BARKER), |
222 | __constant_cpu_to_le32(I2400M_COLD_RESET_BARKER), | 222 | cpu_to_le32(I2400M_COLD_RESET_BARKER), |
223 | __constant_cpu_to_le32(I2400M_COLD_RESET_BARKER), | 223 | cpu_to_le32(I2400M_COLD_RESET_BARKER), |
224 | }; | 224 | }; |
225 | 225 | ||
226 | d_fnstart(3, dev, "(i2400m %p rt %u)\n", i2400m, rt); | 226 | d_fnstart(3, dev, "(i2400m %p rt %u)\n", i2400m, rt); |
diff --git a/drivers/net/wireless/ath5k/debug.c b/drivers/net/wireless/ath5k/debug.c index ccaeb5c219d2..d281b6e38629 100644 --- a/drivers/net/wireless/ath5k/debug.c +++ b/drivers/net/wireless/ath5k/debug.c | |||
@@ -165,7 +165,7 @@ static int reg_show(struct seq_file *seq, void *p) | |||
165 | return 0; | 165 | return 0; |
166 | } | 166 | } |
167 | 167 | ||
168 | static struct seq_operations register_seq_ops = { | 168 | static const struct seq_operations register_seq_ops = { |
169 | .start = reg_start, | 169 | .start = reg_start, |
170 | .next = reg_next, | 170 | .next = reg_next, |
171 | .stop = reg_stop, | 171 | .stop = reg_stop, |
diff --git a/drivers/net/wireless/libertas/debugfs.c b/drivers/net/wireless/libertas/debugfs.c index ec4efd7ff3c8..50e28a0cdfee 100644 --- a/drivers/net/wireless/libertas/debugfs.c +++ b/drivers/net/wireless/libertas/debugfs.c | |||
@@ -629,7 +629,7 @@ static ssize_t lbs_rdrf_write(struct file *file, | |||
629 | res = -EFAULT; | 629 | res = -EFAULT; |
630 | goto out_unlock; | 630 | goto out_unlock; |
631 | } | 631 | } |
632 | priv->rf_offset = simple_strtoul((char *)buf, NULL, 16); | 632 | priv->rf_offset = simple_strtoul(buf, NULL, 16); |
633 | res = count; | 633 | res = count; |
634 | out_unlock: | 634 | out_unlock: |
635 | free_page(addr); | 635 | free_page(addr); |
@@ -680,12 +680,12 @@ out_unlock: | |||
680 | } | 680 | } |
681 | 681 | ||
682 | struct lbs_debugfs_files { | 682 | struct lbs_debugfs_files { |
683 | char *name; | 683 | const char *name; |
684 | int perm; | 684 | int perm; |
685 | struct file_operations fops; | 685 | struct file_operations fops; |
686 | }; | 686 | }; |
687 | 687 | ||
688 | static struct lbs_debugfs_files debugfs_files[] = { | 688 | static const struct lbs_debugfs_files debugfs_files[] = { |
689 | { "info", 0444, FOPS(lbs_dev_info, write_file_dummy), }, | 689 | { "info", 0444, FOPS(lbs_dev_info, write_file_dummy), }, |
690 | { "getscantable", 0444, FOPS(lbs_getscantable, | 690 | { "getscantable", 0444, FOPS(lbs_getscantable, |
691 | write_file_dummy), }, | 691 | write_file_dummy), }, |
@@ -693,7 +693,7 @@ static struct lbs_debugfs_files debugfs_files[] = { | |||
693 | lbs_sleepparams_write), }, | 693 | lbs_sleepparams_write), }, |
694 | }; | 694 | }; |
695 | 695 | ||
696 | static struct lbs_debugfs_files debugfs_events_files[] = { | 696 | static const struct lbs_debugfs_files debugfs_events_files[] = { |
697 | {"low_rssi", 0644, FOPS(lbs_lowrssi_read, | 697 | {"low_rssi", 0644, FOPS(lbs_lowrssi_read, |
698 | lbs_lowrssi_write), }, | 698 | lbs_lowrssi_write), }, |
699 | {"low_snr", 0644, FOPS(lbs_lowsnr_read, | 699 | {"low_snr", 0644, FOPS(lbs_lowsnr_read, |
@@ -708,7 +708,7 @@ static struct lbs_debugfs_files debugfs_events_files[] = { | |||
708 | lbs_highsnr_write), }, | 708 | lbs_highsnr_write), }, |
709 | }; | 709 | }; |
710 | 710 | ||
711 | static struct lbs_debugfs_files debugfs_regs_files[] = { | 711 | static const struct lbs_debugfs_files debugfs_regs_files[] = { |
712 | {"rdmac", 0644, FOPS(lbs_rdmac_read, lbs_rdmac_write), }, | 712 | {"rdmac", 0644, FOPS(lbs_rdmac_read, lbs_rdmac_write), }, |
713 | {"wrmac", 0600, FOPS(NULL, lbs_wrmac_write), }, | 713 | {"wrmac", 0600, FOPS(NULL, lbs_wrmac_write), }, |
714 | {"rdbbp", 0644, FOPS(lbs_rdbbp_read, lbs_rdbbp_write), }, | 714 | {"rdbbp", 0644, FOPS(lbs_rdbbp_read, lbs_rdbbp_write), }, |
@@ -735,7 +735,7 @@ void lbs_debugfs_remove(void) | |||
735 | void lbs_debugfs_init_one(struct lbs_private *priv, struct net_device *dev) | 735 | void lbs_debugfs_init_one(struct lbs_private *priv, struct net_device *dev) |
736 | { | 736 | { |
737 | int i; | 737 | int i; |
738 | struct lbs_debugfs_files *files; | 738 | const struct lbs_debugfs_files *files; |
739 | if (!lbs_dir) | 739 | if (!lbs_dir) |
740 | goto exit; | 740 | goto exit; |
741 | 741 | ||
@@ -938,7 +938,7 @@ static ssize_t lbs_debugfs_write(struct file *f, const char __user *buf, | |||
938 | return (ssize_t)cnt; | 938 | return (ssize_t)cnt; |
939 | } | 939 | } |
940 | 940 | ||
941 | static struct file_operations lbs_debug_fops = { | 941 | static const struct file_operations lbs_debug_fops = { |
942 | .owner = THIS_MODULE, | 942 | .owner = THIS_MODULE, |
943 | .open = open_file_generic, | 943 | .open = open_file_generic, |
944 | .write = lbs_debugfs_write, | 944 | .write = lbs_debugfs_write, |
diff --git a/drivers/net/wireless/strip.c b/drivers/net/wireless/strip.c index 7015f2480550..d6bf8d2ef8ea 100644 --- a/drivers/net/wireless/strip.c +++ b/drivers/net/wireless/strip.c | |||
@@ -1125,7 +1125,7 @@ static int strip_seq_show(struct seq_file *seq, void *v) | |||
1125 | } | 1125 | } |
1126 | 1126 | ||
1127 | 1127 | ||
1128 | static struct seq_operations strip_seq_ops = { | 1128 | static const struct seq_operations strip_seq_ops = { |
1129 | .start = strip_seq_start, | 1129 | .start = strip_seq_start, |
1130 | .next = strip_seq_next, | 1130 | .next = strip_seq_next, |
1131 | .stop = strip_seq_stop, | 1131 | .stop = strip_seq_stop, |
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c index cd6184ee08ee..9f102a6535c4 100644 --- a/drivers/net/xen-netfront.c +++ b/drivers/net/xen-netfront.c | |||
@@ -196,7 +196,7 @@ static void rx_refill_timeout(unsigned long data) | |||
196 | { | 196 | { |
197 | struct net_device *dev = (struct net_device *)data; | 197 | struct net_device *dev = (struct net_device *)data; |
198 | struct netfront_info *np = netdev_priv(dev); | 198 | struct netfront_info *np = netdev_priv(dev); |
199 | netif_rx_schedule(&np->napi); | 199 | napi_schedule(&np->napi); |
200 | } | 200 | } |
201 | 201 | ||
202 | static int netfront_tx_slot_available(struct netfront_info *np) | 202 | static int netfront_tx_slot_available(struct netfront_info *np) |
@@ -328,7 +328,7 @@ static int xennet_open(struct net_device *dev) | |||
328 | xennet_alloc_rx_buffers(dev); | 328 | xennet_alloc_rx_buffers(dev); |
329 | np->rx.sring->rsp_event = np->rx.rsp_cons + 1; | 329 | np->rx.sring->rsp_event = np->rx.rsp_cons + 1; |
330 | if (RING_HAS_UNCONSUMED_RESPONSES(&np->rx)) | 330 | if (RING_HAS_UNCONSUMED_RESPONSES(&np->rx)) |
331 | netif_rx_schedule(&np->napi); | 331 | napi_schedule(&np->napi); |
332 | } | 332 | } |
333 | spin_unlock_bh(&np->rx_lock); | 333 | spin_unlock_bh(&np->rx_lock); |
334 | 334 | ||
@@ -979,7 +979,7 @@ err: | |||
979 | 979 | ||
980 | RING_FINAL_CHECK_FOR_RESPONSES(&np->rx, more_to_do); | 980 | RING_FINAL_CHECK_FOR_RESPONSES(&np->rx, more_to_do); |
981 | if (!more_to_do) | 981 | if (!more_to_do) |
982 | __netif_rx_complete(napi); | 982 | __napi_complete(napi); |
983 | 983 | ||
984 | local_irq_restore(flags); | 984 | local_irq_restore(flags); |
985 | } | 985 | } |
@@ -1317,7 +1317,7 @@ static irqreturn_t xennet_interrupt(int irq, void *dev_id) | |||
1317 | xennet_tx_buf_gc(dev); | 1317 | xennet_tx_buf_gc(dev); |
1318 | /* Under tx_lock: protects access to rx shared-ring indexes. */ | 1318 | /* Under tx_lock: protects access to rx shared-ring indexes. */ |
1319 | if (RING_HAS_UNCONSUMED_RESPONSES(&np->rx)) | 1319 | if (RING_HAS_UNCONSUMED_RESPONSES(&np->rx)) |
1320 | netif_rx_schedule(&np->napi); | 1320 | napi_schedule(&np->napi); |
1321 | } | 1321 | } |
1322 | 1322 | ||
1323 | spin_unlock_irqrestore(&np->tx_lock, flags); | 1323 | spin_unlock_irqrestore(&np->tx_lock, flags); |
diff --git a/drivers/net/xtsonic.c b/drivers/net/xtsonic.c index 03a3f34e9039..a12a7211c982 100644 --- a/drivers/net/xtsonic.c +++ b/drivers/net/xtsonic.c | |||
@@ -183,7 +183,7 @@ static int __init sonic_probe1(struct net_device *dev) | |||
183 | 183 | ||
184 | if (lp->descriptors == NULL) { | 184 | if (lp->descriptors == NULL) { |
185 | printk(KERN_ERR "%s: couldn't alloc DMA memory for " | 185 | printk(KERN_ERR "%s: couldn't alloc DMA memory for " |
186 | " descriptors.\n", lp->device->bus_id); | 186 | " descriptors.\n", dev_name(lp->device)); |
187 | goto out; | 187 | goto out; |
188 | } | 188 | } |
189 | 189 | ||
diff --git a/drivers/net/znet.c b/drivers/net/znet.c index f0b15c9347d0..0a6992d8611b 100644 --- a/drivers/net/znet.c +++ b/drivers/net/znet.c | |||
@@ -358,6 +358,17 @@ static void znet_set_multicast_list (struct net_device *dev) | |||
358 | * multicast address configured isn't equal to IFF_ALLMULTI */ | 358 | * multicast address configured isn't equal to IFF_ALLMULTI */ |
359 | } | 359 | } |
360 | 360 | ||
361 | static const struct net_device_ops znet_netdev_ops = { | ||
362 | .ndo_open = znet_open, | ||
363 | .ndo_stop = znet_close, | ||
364 | .ndo_start_xmit = znet_send_packet, | ||
365 | .ndo_set_multicast_list = znet_set_multicast_list, | ||
366 | .ndo_tx_timeout = znet_tx_timeout, | ||
367 | .ndo_change_mtu = eth_change_mtu, | ||
368 | .ndo_set_mac_address = eth_mac_addr, | ||
369 | .ndo_validate_addr = eth_validate_addr, | ||
370 | }; | ||
371 | |||
361 | /* The Z-Note probe is pretty easy. The NETIDBLK exists in the safe-to-probe | 372 | /* The Z-Note probe is pretty easy. The NETIDBLK exists in the safe-to-probe |
362 | BIOS area. We just scan for the signature, and pull the vital parameters | 373 | BIOS area. We just scan for the signature, and pull the vital parameters |
363 | out of the structure. */ | 374 | out of the structure. */ |
@@ -440,11 +451,7 @@ static int __init znet_probe (void) | |||
440 | znet->tx_end = znet->tx_start + znet->tx_buf_len; | 451 | znet->tx_end = znet->tx_start + znet->tx_buf_len; |
441 | 452 | ||
442 | /* The ZNET-specific entries in the device structure. */ | 453 | /* The ZNET-specific entries in the device structure. */ |
443 | dev->open = &znet_open; | 454 | dev->netdev_ops = &znet_netdev_ops; |
444 | dev->hard_start_xmit = &znet_send_packet; | ||
445 | dev->stop = &znet_close; | ||
446 | dev->set_multicast_list = &znet_set_multicast_list; | ||
447 | dev->tx_timeout = znet_tx_timeout; | ||
448 | dev->watchdog_timeo = TX_TIMEOUT; | 455 | dev->watchdog_timeo = TX_TIMEOUT; |
449 | err = register_netdev(dev); | 456 | err = register_netdev(dev); |
450 | if (err) | 457 | if (err) |
diff --git a/drivers/s390/net/claw.c b/drivers/s390/net/claw.c index f5e618562c5f..6669adf355be 100644 --- a/drivers/s390/net/claw.c +++ b/drivers/s390/net/claw.c | |||
@@ -60,6 +60,9 @@ | |||
60 | * 1.25 Added Packing support | 60 | * 1.25 Added Packing support |
61 | * 1.5 | 61 | * 1.5 |
62 | */ | 62 | */ |
63 | |||
64 | #define KMSG_COMPONENT "claw" | ||
65 | |||
63 | #include <asm/ccwdev.h> | 66 | #include <asm/ccwdev.h> |
64 | #include <asm/ccwgroup.h> | 67 | #include <asm/ccwgroup.h> |
65 | #include <asm/debug.h> | 68 | #include <asm/debug.h> |
@@ -94,7 +97,7 @@ | |||
94 | CLAW uses the s390dbf file system see claw_trace and claw_setup | 97 | CLAW uses the s390dbf file system see claw_trace and claw_setup |
95 | */ | 98 | */ |
96 | 99 | ||
97 | 100 | static char version[] __initdata = "CLAW driver"; | |
98 | static char debug_buffer[255]; | 101 | static char debug_buffer[255]; |
99 | /** | 102 | /** |
100 | * Debug Facility Stuff | 103 | * Debug Facility Stuff |
@@ -206,20 +209,30 @@ static struct net_device_stats *claw_stats(struct net_device *dev); | |||
206 | static int pages_to_order_of_mag(int num_of_pages); | 209 | static int pages_to_order_of_mag(int num_of_pages); |
207 | static struct sk_buff *claw_pack_skb(struct claw_privbk *privptr); | 210 | static struct sk_buff *claw_pack_skb(struct claw_privbk *privptr); |
208 | /* sysfs Functions */ | 211 | /* sysfs Functions */ |
209 | static ssize_t claw_hname_show(struct device *dev, struct device_attribute *attr, char *buf); | 212 | static ssize_t claw_hname_show(struct device *dev, |
210 | static ssize_t claw_hname_write(struct device *dev, struct device_attribute *attr, | 213 | struct device_attribute *attr, char *buf); |
214 | static ssize_t claw_hname_write(struct device *dev, | ||
215 | struct device_attribute *attr, | ||
211 | const char *buf, size_t count); | 216 | const char *buf, size_t count); |
212 | static ssize_t claw_adname_show(struct device *dev, struct device_attribute *attr, char *buf); | 217 | static ssize_t claw_adname_show(struct device *dev, |
213 | static ssize_t claw_adname_write(struct device *dev, struct device_attribute *attr, | 218 | struct device_attribute *attr, char *buf); |
219 | static ssize_t claw_adname_write(struct device *dev, | ||
220 | struct device_attribute *attr, | ||
214 | const char *buf, size_t count); | 221 | const char *buf, size_t count); |
215 | static ssize_t claw_apname_show(struct device *dev, struct device_attribute *attr, char *buf); | 222 | static ssize_t claw_apname_show(struct device *dev, |
216 | static ssize_t claw_apname_write(struct device *dev, struct device_attribute *attr, | 223 | struct device_attribute *attr, char *buf); |
224 | static ssize_t claw_apname_write(struct device *dev, | ||
225 | struct device_attribute *attr, | ||
217 | const char *buf, size_t count); | 226 | const char *buf, size_t count); |
218 | static ssize_t claw_wbuff_show(struct device *dev, struct device_attribute *attr, char *buf); | 227 | static ssize_t claw_wbuff_show(struct device *dev, |
219 | static ssize_t claw_wbuff_write(struct device *dev, struct device_attribute *attr, | 228 | struct device_attribute *attr, char *buf); |
229 | static ssize_t claw_wbuff_write(struct device *dev, | ||
230 | struct device_attribute *attr, | ||
220 | const char *buf, size_t count); | 231 | const char *buf, size_t count); |
221 | static ssize_t claw_rbuff_show(struct device *dev, struct device_attribute *attr, char *buf); | 232 | static ssize_t claw_rbuff_show(struct device *dev, |
222 | static ssize_t claw_rbuff_write(struct device *dev, struct device_attribute *attr, | 233 | struct device_attribute *attr, char *buf); |
234 | static ssize_t claw_rbuff_write(struct device *dev, | ||
235 | struct device_attribute *attr, | ||
223 | const char *buf, size_t count); | 236 | const char *buf, size_t count); |
224 | static int claw_add_files(struct device *dev); | 237 | static int claw_add_files(struct device *dev); |
225 | static void claw_remove_files(struct device *dev); | 238 | static void claw_remove_files(struct device *dev); |
@@ -298,8 +311,8 @@ claw_probe(struct ccwgroup_device *cgdev) | |||
298 | if (rc) { | 311 | if (rc) { |
299 | probe_error(cgdev); | 312 | probe_error(cgdev); |
300 | put_device(&cgdev->dev); | 313 | put_device(&cgdev->dev); |
301 | printk(KERN_WARNING "add_files failed %s %s Exit Line %d \n", | 314 | dev_err(&cgdev->dev, "Creating the /proc files for a new" |
302 | dev_name(&cgdev->cdev[0]->dev), __func__, __LINE__); | 315 | " CLAW device failed\n"); |
303 | CLAW_DBF_TEXT_(2, setup, "probex%d", rc); | 316 | CLAW_DBF_TEXT_(2, setup, "probex%d", rc); |
304 | return rc; | 317 | return rc; |
305 | } | 318 | } |
@@ -496,7 +509,8 @@ claw_open(struct net_device *dev) | |||
496 | ~(DEV_STAT_CHN_END | DEV_STAT_DEV_END)) != 0x00) || | 509 | ~(DEV_STAT_CHN_END | DEV_STAT_DEV_END)) != 0x00) || |
497 | (((privptr->channel[READ].flag | | 510 | (((privptr->channel[READ].flag | |
498 | privptr->channel[WRITE].flag) & CLAW_TIMER) != 0x00)) { | 511 | privptr->channel[WRITE].flag) & CLAW_TIMER) != 0x00)) { |
499 | printk(KERN_INFO "%s: remote side is not ready\n", dev->name); | 512 | dev_info(&privptr->channel[READ].cdev->dev, |
513 | "%s: remote side is not ready\n", dev->name); | ||
500 | CLAW_DBF_TEXT(2, trace, "notrdy"); | 514 | CLAW_DBF_TEXT(2, trace, "notrdy"); |
501 | 515 | ||
502 | for ( i = 0; i < 2; i++) { | 516 | for ( i = 0; i < 2; i++) { |
@@ -582,10 +596,9 @@ claw_irq_handler(struct ccw_device *cdev, | |||
582 | CLAW_DBF_TEXT(4, trace, "clawirq"); | 596 | CLAW_DBF_TEXT(4, trace, "clawirq"); |
583 | /* Bypass all 'unsolicited interrupts' */ | 597 | /* Bypass all 'unsolicited interrupts' */ |
584 | if (!cdev->dev.driver_data) { | 598 | if (!cdev->dev.driver_data) { |
585 | printk(KERN_WARNING "claw: unsolicited interrupt for device:" | 599 | dev_warn(&cdev->dev, "An uninitialized CLAW device received an" |
586 | "%s received c-%02x d-%02x\n", | 600 | " IRQ, c-%02x d-%02x\n", |
587 | dev_name(&cdev->dev), irb->scsw.cmd.cstat, | 601 | irb->scsw.cmd.cstat, irb->scsw.cmd.dstat); |
588 | irb->scsw.cmd.dstat); | ||
589 | CLAW_DBF_TEXT(2, trace, "badirq"); | 602 | CLAW_DBF_TEXT(2, trace, "badirq"); |
590 | return; | 603 | return; |
591 | } | 604 | } |
@@ -597,8 +610,7 @@ claw_irq_handler(struct ccw_device *cdev, | |||
597 | else if (privptr->channel[WRITE].cdev == cdev) | 610 | else if (privptr->channel[WRITE].cdev == cdev) |
598 | p_ch = &privptr->channel[WRITE]; | 611 | p_ch = &privptr->channel[WRITE]; |
599 | else { | 612 | else { |
600 | printk(KERN_WARNING "claw: Can't determine channel for " | 613 | dev_warn(&cdev->dev, "The device is not a CLAW device\n"); |
601 | "interrupt, device %s\n", dev_name(&cdev->dev)); | ||
602 | CLAW_DBF_TEXT(2, trace, "badchan"); | 614 | CLAW_DBF_TEXT(2, trace, "badchan"); |
603 | return; | 615 | return; |
604 | } | 616 | } |
@@ -612,7 +624,8 @@ claw_irq_handler(struct ccw_device *cdev, | |||
612 | 624 | ||
613 | /* Check for good subchannel return code, otherwise info message */ | 625 | /* Check for good subchannel return code, otherwise info message */ |
614 | if (irb->scsw.cmd.cstat && !(irb->scsw.cmd.cstat & SCHN_STAT_PCI)) { | 626 | if (irb->scsw.cmd.cstat && !(irb->scsw.cmd.cstat & SCHN_STAT_PCI)) { |
615 | printk(KERN_INFO "%s: subchannel check for device: %04x -" | 627 | dev_info(&cdev->dev, |
628 | "%s: subchannel check for device: %04x -" | ||
616 | " Sch Stat %02x Dev Stat %02x CPA - %04x\n", | 629 | " Sch Stat %02x Dev Stat %02x CPA - %04x\n", |
617 | dev->name, p_ch->devno, | 630 | dev->name, p_ch->devno, |
618 | irb->scsw.cmd.cstat, irb->scsw.cmd.dstat, | 631 | irb->scsw.cmd.cstat, irb->scsw.cmd.dstat, |
@@ -651,7 +664,7 @@ claw_irq_handler(struct ccw_device *cdev, | |||
651 | wake_up(&p_ch->wait); /* wake claw_open (READ)*/ | 664 | wake_up(&p_ch->wait); /* wake claw_open (READ)*/ |
652 | } else if (p_ch->flag == CLAW_WRITE) { | 665 | } else if (p_ch->flag == CLAW_WRITE) { |
653 | p_ch->claw_state = CLAW_START_WRITE; | 666 | p_ch->claw_state = CLAW_START_WRITE; |
654 | /* send SYSTEM_VALIDATE */ | 667 | /* send SYSTEM_VALIDATE */ |
655 | claw_strt_read(dev, LOCK_NO); | 668 | claw_strt_read(dev, LOCK_NO); |
656 | claw_send_control(dev, | 669 | claw_send_control(dev, |
657 | SYSTEM_VALIDATE_REQUEST, | 670 | SYSTEM_VALIDATE_REQUEST, |
@@ -659,10 +672,9 @@ claw_irq_handler(struct ccw_device *cdev, | |||
659 | p_env->host_name, | 672 | p_env->host_name, |
660 | p_env->adapter_name); | 673 | p_env->adapter_name); |
661 | } else { | 674 | } else { |
662 | printk(KERN_WARNING "claw: unsolicited " | 675 | dev_warn(&cdev->dev, "The CLAW device received" |
663 | "interrupt for device:" | 676 | " an unexpected IRQ, " |
664 | "%s received c-%02x d-%02x\n", | 677 | "c-%02x d-%02x\n", |
665 | dev_name(&cdev->dev), | ||
666 | irb->scsw.cmd.cstat, | 678 | irb->scsw.cmd.cstat, |
667 | irb->scsw.cmd.dstat); | 679 | irb->scsw.cmd.dstat); |
668 | return; | 680 | return; |
@@ -677,8 +689,8 @@ claw_irq_handler(struct ccw_device *cdev, | |||
677 | (p_ch->irb->ecw[0] & 0x40) == 0x40 || | 689 | (p_ch->irb->ecw[0] & 0x40) == 0x40 || |
678 | (p_ch->irb->ecw[0]) == 0) { | 690 | (p_ch->irb->ecw[0]) == 0) { |
679 | privptr->stats.rx_errors++; | 691 | privptr->stats.rx_errors++; |
680 | printk(KERN_INFO "%s: Restart is " | 692 | dev_info(&cdev->dev, |
681 | "required after remote " | 693 | "%s: Restart is required after remote " |
682 | "side recovers \n", | 694 | "side recovers \n", |
683 | dev->name); | 695 | dev->name); |
684 | } | 696 | } |
@@ -713,11 +725,13 @@ claw_irq_handler(struct ccw_device *cdev, | |||
713 | return; | 725 | return; |
714 | case CLAW_START_WRITE: | 726 | case CLAW_START_WRITE: |
715 | if (p_ch->irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) { | 727 | if (p_ch->irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) { |
716 | printk(KERN_INFO "%s: Unit Check Occured in " | 728 | dev_info(&cdev->dev, |
729 | "%s: Unit Check Occured in " | ||
717 | "write channel\n", dev->name); | 730 | "write channel\n", dev->name); |
718 | clear_bit(0, (void *)&p_ch->IO_active); | 731 | clear_bit(0, (void *)&p_ch->IO_active); |
719 | if (p_ch->irb->ecw[0] & 0x80) { | 732 | if (p_ch->irb->ecw[0] & 0x80) { |
720 | printk(KERN_INFO "%s: Resetting Event " | 733 | dev_info(&cdev->dev, |
734 | "%s: Resetting Event " | ||
721 | "occurred:\n", dev->name); | 735 | "occurred:\n", dev->name); |
722 | init_timer(&p_ch->timer); | 736 | init_timer(&p_ch->timer); |
723 | p_ch->timer.function = | 737 | p_ch->timer.function = |
@@ -725,7 +739,8 @@ claw_irq_handler(struct ccw_device *cdev, | |||
725 | p_ch->timer.data = (unsigned long)p_ch; | 739 | p_ch->timer.data = (unsigned long)p_ch; |
726 | p_ch->timer.expires = jiffies + 10*HZ; | 740 | p_ch->timer.expires = jiffies + 10*HZ; |
727 | add_timer(&p_ch->timer); | 741 | add_timer(&p_ch->timer); |
728 | printk(KERN_INFO "%s: write connection " | 742 | dev_info(&cdev->dev, |
743 | "%s: write connection " | ||
729 | "restarting\n", dev->name); | 744 | "restarting\n", dev->name); |
730 | } | 745 | } |
731 | CLAW_DBF_TEXT(4, trace, "rstrtwrt"); | 746 | CLAW_DBF_TEXT(4, trace, "rstrtwrt"); |
@@ -733,9 +748,10 @@ claw_irq_handler(struct ccw_device *cdev, | |||
733 | } | 748 | } |
734 | if (p_ch->irb->scsw.cmd.dstat & DEV_STAT_UNIT_EXCEP) { | 749 | if (p_ch->irb->scsw.cmd.dstat & DEV_STAT_UNIT_EXCEP) { |
735 | clear_bit(0, (void *)&p_ch->IO_active); | 750 | clear_bit(0, (void *)&p_ch->IO_active); |
736 | printk(KERN_INFO "%s: Unit Exception " | 751 | dev_info(&cdev->dev, |
737 | "Occured in write channel\n", | 752 | "%s: Unit Exception " |
738 | dev->name); | 753 | "occurred in write channel\n", |
754 | dev->name); | ||
739 | } | 755 | } |
740 | if (!((p_ch->irb->scsw.cmd.stctl & SCSW_STCTL_SEC_STATUS) || | 756 | if (!((p_ch->irb->scsw.cmd.stctl & SCSW_STCTL_SEC_STATUS) || |
741 | (p_ch->irb->scsw.cmd.stctl == SCSW_STCTL_STATUS_PEND) || | 757 | (p_ch->irb->scsw.cmd.stctl == SCSW_STCTL_STATUS_PEND) || |
@@ -757,8 +773,9 @@ claw_irq_handler(struct ccw_device *cdev, | |||
757 | CLAW_DBF_TEXT(4, trace, "StWtExit"); | 773 | CLAW_DBF_TEXT(4, trace, "StWtExit"); |
758 | return; | 774 | return; |
759 | default: | 775 | default: |
760 | printk(KERN_WARNING "%s: wrong selection code - irq " | 776 | dev_warn(&cdev->dev, |
761 | "state=%d\n", dev->name, p_ch->claw_state); | 777 | "The CLAW device for %s received an unexpected IRQ\n", |
778 | dev->name); | ||
762 | CLAW_DBF_TEXT(2, trace, "badIRQ"); | 779 | CLAW_DBF_TEXT(2, trace, "badIRQ"); |
763 | return; | 780 | return; |
764 | } | 781 | } |
@@ -910,8 +927,10 @@ claw_release(struct net_device *dev) | |||
910 | if (((privptr->channel[READ].last_dstat | | 927 | if (((privptr->channel[READ].last_dstat | |
911 | privptr->channel[WRITE].last_dstat) & | 928 | privptr->channel[WRITE].last_dstat) & |
912 | ~(DEV_STAT_CHN_END | DEV_STAT_DEV_END)) != 0x00) { | 929 | ~(DEV_STAT_CHN_END | DEV_STAT_DEV_END)) != 0x00) { |
913 | printk(KERN_WARNING "%s: channel problems during close - " | 930 | dev_warn(&privptr->channel[READ].cdev->dev, |
914 | "read: %02x - write: %02x\n", | 931 | "Deactivating %s completed with incorrect" |
932 | " subchannel status " | ||
933 | "(read %02x, write %02x)\n", | ||
915 | dev->name, | 934 | dev->name, |
916 | privptr->channel[READ].last_dstat, | 935 | privptr->channel[READ].last_dstat, |
917 | privptr->channel[WRITE].last_dstat); | 936 | privptr->channel[WRITE].last_dstat); |
@@ -1076,8 +1095,8 @@ add_claw_reads(struct net_device *dev, struct ccwbk* p_first, | |||
1076 | } | 1095 | } |
1077 | 1096 | ||
1078 | if ( privptr-> p_read_active_first ==NULL ) { | 1097 | if ( privptr-> p_read_active_first ==NULL ) { |
1079 | privptr-> p_read_active_first= p_first; /* set new first */ | 1098 | privptr->p_read_active_first = p_first; /* set new first */ |
1080 | privptr-> p_read_active_last = p_last; /* set new last */ | 1099 | privptr->p_read_active_last = p_last; /* set new last */ |
1081 | } | 1100 | } |
1082 | else { | 1101 | else { |
1083 | 1102 | ||
@@ -1113,7 +1132,7 @@ add_claw_reads(struct net_device *dev, struct ccwbk* p_first, | |||
1113 | privptr->p_read_active_last->r_TIC_2.cda= | 1132 | privptr->p_read_active_last->r_TIC_2.cda= |
1114 | (__u32)__pa(&p_first->read); | 1133 | (__u32)__pa(&p_first->read); |
1115 | } | 1134 | } |
1116 | /* chain in new set of blocks */ | 1135 | /* chain in new set of blocks */ |
1117 | privptr->p_read_active_last->next = p_first; | 1136 | privptr->p_read_active_last->next = p_first; |
1118 | privptr->p_read_active_last=p_last; | 1137 | privptr->p_read_active_last=p_last; |
1119 | } /* end of if ( privptr-> p_read_active_first ==NULL) */ | 1138 | } /* end of if ( privptr-> p_read_active_first ==NULL) */ |
@@ -1135,21 +1154,18 @@ ccw_check_return_code(struct ccw_device *cdev, int return_code) | |||
1135 | case -EBUSY: /* BUSY is a transient state no action needed */ | 1154 | case -EBUSY: /* BUSY is a transient state no action needed */ |
1136 | break; | 1155 | break; |
1137 | case -ENODEV: | 1156 | case -ENODEV: |
1138 | printk(KERN_EMERG "%s: Missing device called " | 1157 | dev_err(&cdev->dev, "The remote channel adapter is not" |
1139 | "for IO ENODEV\n", dev_name(&cdev->dev)); | 1158 | " available\n"); |
1140 | break; | ||
1141 | case -EIO: | ||
1142 | printk(KERN_EMERG "%s: Status pending... EIO \n", | ||
1143 | dev_name(&cdev->dev)); | ||
1144 | break; | 1159 | break; |
1145 | case -EINVAL: | 1160 | case -EINVAL: |
1146 | printk(KERN_EMERG "%s: Invalid Dev State EINVAL \n", | 1161 | dev_err(&cdev->dev, |
1147 | dev_name(&cdev->dev)); | 1162 | "The status of the remote channel adapter" |
1163 | " is not valid\n"); | ||
1148 | break; | 1164 | break; |
1149 | default: | 1165 | default: |
1150 | printk(KERN_EMERG "%s: Unknown error in " | 1166 | dev_err(&cdev->dev, "The common device layer" |
1151 | "Do_IO %d\n", dev_name(&cdev->dev), | 1167 | " returned error code %d\n", |
1152 | return_code); | 1168 | return_code); |
1153 | } | 1169 | } |
1154 | } | 1170 | } |
1155 | CLAW_DBF_TEXT(4, trace, "ccwret"); | 1171 | CLAW_DBF_TEXT(4, trace, "ccwret"); |
@@ -1163,40 +1179,41 @@ static void | |||
1163 | ccw_check_unit_check(struct chbk * p_ch, unsigned char sense ) | 1179 | ccw_check_unit_check(struct chbk * p_ch, unsigned char sense ) |
1164 | { | 1180 | { |
1165 | struct net_device *ndev = p_ch->ndev; | 1181 | struct net_device *ndev = p_ch->ndev; |
1182 | struct device *dev = &p_ch->cdev->dev; | ||
1166 | 1183 | ||
1167 | CLAW_DBF_TEXT(4, trace, "unitchek"); | 1184 | CLAW_DBF_TEXT(4, trace, "unitchek"); |
1168 | printk(KERN_INFO "%s: Unit Check with sense byte:0x%04x\n", | 1185 | dev_warn(dev, "The communication peer of %s disconnected\n", |
1169 | ndev->name, sense); | 1186 | ndev->name); |
1170 | 1187 | ||
1171 | if (sense & 0x40) { | 1188 | if (sense & 0x40) { |
1172 | if (sense & 0x01) { | 1189 | if (sense & 0x01) { |
1173 | printk(KERN_WARNING "%s: Interface disconnect or " | 1190 | dev_warn(dev, "The remote channel adapter for" |
1174 | "Selective reset " | 1191 | " %s has been reset\n", |
1175 | "occurred (remote side)\n", ndev->name); | 1192 | ndev->name); |
1176 | } | ||
1177 | else { | ||
1178 | printk(KERN_WARNING "%s: System reset occured" | ||
1179 | " (remote side)\n", ndev->name); | ||
1180 | } | 1193 | } |
1181 | } | 1194 | } |
1182 | else if (sense & 0x20) { | 1195 | else if (sense & 0x20) { |
1183 | if (sense & 0x04) { | 1196 | if (sense & 0x04) { |
1184 | printk(KERN_WARNING "%s: Data-streaming " | 1197 | dev_warn(dev, "A data streaming timeout occurred" |
1185 | "timeout)\n", ndev->name); | 1198 | " for %s\n", |
1199 | ndev->name); | ||
1186 | } | 1200 | } |
1187 | else { | 1201 | else { |
1188 | printk(KERN_WARNING "%s: Data-transfer parity" | 1202 | dev_warn(dev, "A data transfer parity error occurred" |
1189 | " error\n", ndev->name); | 1203 | " for %s\n", |
1204 | ndev->name); | ||
1190 | } | 1205 | } |
1191 | } | 1206 | } |
1192 | else if (sense & 0x10) { | 1207 | else if (sense & 0x10) { |
1193 | if (sense & 0x20) { | 1208 | if (sense & 0x20) { |
1194 | printk(KERN_WARNING "%s: Hardware malfunction " | 1209 | dev_warn(dev, "The remote channel adapter for %s" |
1195 | "(remote side)\n", ndev->name); | 1210 | " is faulty\n", |
1211 | ndev->name); | ||
1196 | } | 1212 | } |
1197 | else { | 1213 | else { |
1198 | printk(KERN_WARNING "%s: read-data parity error " | 1214 | dev_warn(dev, "A read data parity error occurred" |
1199 | "(remote side)\n", ndev->name); | 1215 | " for %s\n", |
1216 | ndev->name); | ||
1200 | } | 1217 | } |
1201 | } | 1218 | } |
1202 | 1219 | ||
@@ -1375,7 +1392,7 @@ claw_hw_tx(struct sk_buff *skb, struct net_device *dev, long linkid) | |||
1375 | */ | 1392 | */ |
1376 | 1393 | ||
1377 | if (p_first_ccw!=NULL) { | 1394 | if (p_first_ccw!=NULL) { |
1378 | /* setup ending ccw sequence for this segment */ | 1395 | /* setup ending ccw sequence for this segment */ |
1379 | pEnd=privptr->p_end_ccw; | 1396 | pEnd=privptr->p_end_ccw; |
1380 | if (pEnd->write1) { | 1397 | if (pEnd->write1) { |
1381 | pEnd->write1=0x00; /* second end ccw is now active */ | 1398 | pEnd->write1=0x00; /* second end ccw is now active */ |
@@ -1697,10 +1714,11 @@ init_ccw_bk(struct net_device *dev) | |||
1697 | p_buf-> w_TIC_1.flags = 0; | 1714 | p_buf-> w_TIC_1.flags = 0; |
1698 | p_buf-> w_TIC_1.count = 0; | 1715 | p_buf-> w_TIC_1.count = 0; |
1699 | 1716 | ||
1700 | if (((unsigned long)p_buff+privptr->p_env->write_size) >= | 1717 | if (((unsigned long)p_buff + |
1718 | privptr->p_env->write_size) >= | ||
1701 | ((unsigned long)(p_buff+2* | 1719 | ((unsigned long)(p_buff+2* |
1702 | (privptr->p_env->write_size) -1) & PAGE_MASK)) { | 1720 | (privptr->p_env->write_size) - 1) & PAGE_MASK)) { |
1703 | p_buff= p_buff+privptr->p_env->write_size; | 1721 | p_buff = p_buff+privptr->p_env->write_size; |
1704 | } | 1722 | } |
1705 | } | 1723 | } |
1706 | } | 1724 | } |
@@ -1840,15 +1858,16 @@ init_ccw_bk(struct net_device *dev) | |||
1840 | p_buf->header.opcode=0xff; | 1858 | p_buf->header.opcode=0xff; |
1841 | p_buf->header.flag=CLAW_PENDING; | 1859 | p_buf->header.flag=CLAW_PENDING; |
1842 | 1860 | ||
1843 | if (((unsigned long)p_buff+privptr->p_env->read_size) >= | 1861 | if (((unsigned long)p_buff+privptr->p_env->read_size) >= |
1844 | ((unsigned long)(p_buff+2*(privptr->p_env->read_size) -1) | 1862 | ((unsigned long)(p_buff+2*(privptr->p_env->read_size) |
1845 | & PAGE_MASK) ) { | 1863 | -1) |
1864 | & PAGE_MASK)) { | ||
1846 | p_buff= p_buff+privptr->p_env->read_size; | 1865 | p_buff= p_buff+privptr->p_env->read_size; |
1847 | } | 1866 | } |
1848 | else { | 1867 | else { |
1849 | p_buff= | 1868 | p_buff= |
1850 | (void *)((unsigned long) | 1869 | (void *)((unsigned long) |
1851 | (p_buff+2*(privptr->p_env->read_size) -1) | 1870 | (p_buff+2*(privptr->p_env->read_size)-1) |
1852 | & PAGE_MASK) ; | 1871 | & PAGE_MASK) ; |
1853 | } | 1872 | } |
1854 | } /* for read_buffers */ | 1873 | } /* for read_buffers */ |
@@ -1856,24 +1875,28 @@ init_ccw_bk(struct net_device *dev) | |||
1856 | else { /* read Size >= PAGE_SIZE */ | 1875 | else { /* read Size >= PAGE_SIZE */ |
1857 | for (i=0 ; i< privptr->p_env->read_buffers ; i++) { | 1876 | for (i=0 ; i< privptr->p_env->read_buffers ; i++) { |
1858 | p_buff = (void *)__get_free_pages(__GFP_DMA, | 1877 | p_buff = (void *)__get_free_pages(__GFP_DMA, |
1859 | (int)pages_to_order_of_mag(privptr->p_buff_pages_perread) ); | 1878 | (int)pages_to_order_of_mag( |
1879 | privptr->p_buff_pages_perread)); | ||
1860 | if (p_buff==NULL) { | 1880 | if (p_buff==NULL) { |
1861 | free_pages((unsigned long)privptr->p_buff_ccw, | 1881 | free_pages((unsigned long)privptr->p_buff_ccw, |
1862 | (int)pages_to_order_of_mag(privptr->p_buff_ccw_num)); | 1882 | (int)pages_to_order_of_mag(privptr-> |
1883 | p_buff_ccw_num)); | ||
1863 | /* free the write pages */ | 1884 | /* free the write pages */ |
1864 | p_buf=privptr->p_buff_write; | 1885 | p_buf=privptr->p_buff_write; |
1865 | while (p_buf!=NULL) { | 1886 | while (p_buf!=NULL) { |
1866 | free_pages((unsigned long)p_buf->p_buffer, | 1887 | free_pages( |
1867 | (int)pages_to_order_of_mag( | 1888 | (unsigned long)p_buf->p_buffer, |
1868 | privptr->p_buff_pages_perwrite )); | 1889 | (int)pages_to_order_of_mag( |
1890 | privptr->p_buff_pages_perwrite)); | ||
1869 | p_buf=p_buf->next; | 1891 | p_buf=p_buf->next; |
1870 | } | 1892 | } |
1871 | /* free any read pages already alloc */ | 1893 | /* free any read pages already alloc */ |
1872 | p_buf=privptr->p_buff_read; | 1894 | p_buf=privptr->p_buff_read; |
1873 | while (p_buf!=NULL) { | 1895 | while (p_buf!=NULL) { |
1874 | free_pages((unsigned long)p_buf->p_buffer, | 1896 | free_pages( |
1875 | (int)pages_to_order_of_mag( | 1897 | (unsigned long)p_buf->p_buffer, |
1876 | privptr->p_buff_pages_perread )); | 1898 | (int)pages_to_order_of_mag( |
1899 | privptr->p_buff_pages_perread)); | ||
1877 | p_buf=p_buf->next; | 1900 | p_buf=p_buf->next; |
1878 | } | 1901 | } |
1879 | privptr->p_buff_ccw=NULL; | 1902 | privptr->p_buff_ccw=NULL; |
@@ -2003,7 +2026,7 @@ claw_process_control( struct net_device *dev, struct ccwbk * p_ccw) | |||
2003 | tdev = &privptr->channel[READ].cdev->dev; | 2026 | tdev = &privptr->channel[READ].cdev->dev; |
2004 | memcpy( &temp_host_name, p_env->host_name, 8); | 2027 | memcpy( &temp_host_name, p_env->host_name, 8); |
2005 | memcpy( &temp_ws_name, p_env->adapter_name , 8); | 2028 | memcpy( &temp_ws_name, p_env->adapter_name , 8); |
2006 | printk(KERN_INFO "%s: CLAW device %.8s: " | 2029 | dev_info(tdev, "%s: CLAW device %.8s: " |
2007 | "Received Control Packet\n", | 2030 | "Received Control Packet\n", |
2008 | dev->name, temp_ws_name); | 2031 | dev->name, temp_ws_name); |
2009 | if (privptr->release_pend==1) { | 2032 | if (privptr->release_pend==1) { |
@@ -2022,32 +2045,30 @@ claw_process_control( struct net_device *dev, struct ccwbk * p_ccw) | |||
2022 | if (p_ctlbk->version != CLAW_VERSION_ID) { | 2045 | if (p_ctlbk->version != CLAW_VERSION_ID) { |
2023 | claw_snd_sys_validate_rsp(dev, p_ctlbk, | 2046 | claw_snd_sys_validate_rsp(dev, p_ctlbk, |
2024 | CLAW_RC_WRONG_VERSION); | 2047 | CLAW_RC_WRONG_VERSION); |
2025 | printk("%s: %d is wrong version id. " | 2048 | dev_warn(tdev, "The communication peer of %s" |
2026 | "Expected %d\n", | 2049 | " uses an incorrect API version %d\n", |
2027 | dev->name, p_ctlbk->version, | 2050 | dev->name, p_ctlbk->version); |
2028 | CLAW_VERSION_ID); | ||
2029 | } | 2051 | } |
2030 | p_sysval = (struct sysval *)&(p_ctlbk->data); | 2052 | p_sysval = (struct sysval *)&(p_ctlbk->data); |
2031 | printk("%s: Recv Sys Validate Request: " | 2053 | dev_info(tdev, "%s: Recv Sys Validate Request: " |
2032 | "Vers=%d,link_id=%d,Corr=%d,WS name=%." | 2054 | "Vers=%d,link_id=%d,Corr=%d,WS name=%.8s," |
2033 | "8s,Host name=%.8s\n", | 2055 | "Host name=%.8s\n", |
2034 | dev->name, p_ctlbk->version, | 2056 | dev->name, p_ctlbk->version, |
2035 | p_ctlbk->linkid, | 2057 | p_ctlbk->linkid, |
2036 | p_ctlbk->correlator, | 2058 | p_ctlbk->correlator, |
2037 | p_sysval->WS_name, | 2059 | p_sysval->WS_name, |
2038 | p_sysval->host_name); | 2060 | p_sysval->host_name); |
2039 | if (memcmp(temp_host_name, p_sysval->host_name, 8)) { | 2061 | if (memcmp(temp_host_name, p_sysval->host_name, 8)) { |
2040 | claw_snd_sys_validate_rsp(dev, p_ctlbk, | 2062 | claw_snd_sys_validate_rsp(dev, p_ctlbk, |
2041 | CLAW_RC_NAME_MISMATCH); | 2063 | CLAW_RC_NAME_MISMATCH); |
2042 | CLAW_DBF_TEXT(2, setup, "HSTBAD"); | 2064 | CLAW_DBF_TEXT(2, setup, "HSTBAD"); |
2043 | CLAW_DBF_TEXT_(2, setup, "%s", p_sysval->host_name); | 2065 | CLAW_DBF_TEXT_(2, setup, "%s", p_sysval->host_name); |
2044 | CLAW_DBF_TEXT_(2, setup, "%s", temp_host_name); | 2066 | CLAW_DBF_TEXT_(2, setup, "%s", temp_host_name); |
2045 | printk(KERN_INFO "%s: Host name mismatch\n", | 2067 | dev_warn(tdev, |
2046 | dev->name); | 2068 | "Host name %s for %s does not match the" |
2047 | printk(KERN_INFO "%s: Received :%s: " | 2069 | " remote adapter name %s\n", |
2048 | "expected :%s: \n", | ||
2049 | dev->name, | ||
2050 | p_sysval->host_name, | 2070 | p_sysval->host_name, |
2071 | dev->name, | ||
2051 | temp_host_name); | 2072 | temp_host_name); |
2052 | } | 2073 | } |
2053 | if (memcmp(temp_ws_name, p_sysval->WS_name, 8)) { | 2074 | if (memcmp(temp_ws_name, p_sysval->WS_name, 8)) { |
@@ -2056,35 +2077,38 @@ claw_process_control( struct net_device *dev, struct ccwbk * p_ccw) | |||
2056 | CLAW_DBF_TEXT(2, setup, "WSNBAD"); | 2077 | CLAW_DBF_TEXT(2, setup, "WSNBAD"); |
2057 | CLAW_DBF_TEXT_(2, setup, "%s", p_sysval->WS_name); | 2078 | CLAW_DBF_TEXT_(2, setup, "%s", p_sysval->WS_name); |
2058 | CLAW_DBF_TEXT_(2, setup, "%s", temp_ws_name); | 2079 | CLAW_DBF_TEXT_(2, setup, "%s", temp_ws_name); |
2059 | printk(KERN_INFO "%s: WS name mismatch\n", | 2080 | dev_warn(tdev, "Adapter name %s for %s does not match" |
2060 | dev->name); | 2081 | " the remote host name %s\n", |
2061 | printk(KERN_INFO "%s: Received :%s: " | 2082 | p_sysval->WS_name, |
2062 | "expected :%s: \n", | 2083 | dev->name, |
2063 | dev->name, | 2084 | temp_ws_name); |
2064 | p_sysval->WS_name, | ||
2065 | temp_ws_name); | ||
2066 | } | 2085 | } |
2067 | if ((p_sysval->write_frame_size < p_env->write_size) && | 2086 | if ((p_sysval->write_frame_size < p_env->write_size) && |
2068 | (p_env->packing == 0)) { | 2087 | (p_env->packing == 0)) { |
2069 | claw_snd_sys_validate_rsp(dev, p_ctlbk, | 2088 | claw_snd_sys_validate_rsp(dev, p_ctlbk, |
2070 | CLAW_RC_HOST_RCV_TOO_SMALL); | 2089 | CLAW_RC_HOST_RCV_TOO_SMALL); |
2071 | printk(KERN_INFO "%s: host write size is too " | 2090 | dev_warn(tdev, |
2072 | "small\n", dev->name); | 2091 | "The local write buffer is smaller than the" |
2092 | " remote read buffer\n"); | ||
2073 | CLAW_DBF_TEXT(2, setup, "wrtszbad"); | 2093 | CLAW_DBF_TEXT(2, setup, "wrtszbad"); |
2074 | } | 2094 | } |
2075 | if ((p_sysval->read_frame_size < p_env->read_size) && | 2095 | if ((p_sysval->read_frame_size < p_env->read_size) && |
2076 | (p_env->packing == 0)) { | 2096 | (p_env->packing == 0)) { |
2077 | claw_snd_sys_validate_rsp(dev, p_ctlbk, | 2097 | claw_snd_sys_validate_rsp(dev, p_ctlbk, |
2078 | CLAW_RC_HOST_RCV_TOO_SMALL); | 2098 | CLAW_RC_HOST_RCV_TOO_SMALL); |
2079 | printk(KERN_INFO "%s: host read size is too " | 2099 | dev_warn(tdev, |
2080 | "small\n", dev->name); | 2100 | "The local read buffer is smaller than the" |
2101 | " remote write buffer\n"); | ||
2081 | CLAW_DBF_TEXT(2, setup, "rdsizbad"); | 2102 | CLAW_DBF_TEXT(2, setup, "rdsizbad"); |
2082 | } | 2103 | } |
2083 | claw_snd_sys_validate_rsp(dev, p_ctlbk, 0); | 2104 | claw_snd_sys_validate_rsp(dev, p_ctlbk, 0); |
2084 | printk(KERN_INFO "%s: CLAW device %.8s: System validate " | 2105 | dev_info(tdev, |
2085 | "completed.\n", dev->name, temp_ws_name); | 2106 | "CLAW device %.8s: System validate" |
2086 | printk("%s: sys Validate Rsize:%d Wsize:%d\n", dev->name, | 2107 | " completed.\n", temp_ws_name); |
2087 | p_sysval->read_frame_size, p_sysval->write_frame_size); | 2108 | dev_info(tdev, |
2109 | "%s: sys Validate Rsize:%d Wsize:%d\n", | ||
2110 | dev->name, p_sysval->read_frame_size, | ||
2111 | p_sysval->write_frame_size); | ||
2088 | privptr->system_validate_comp = 1; | 2112 | privptr->system_validate_comp = 1; |
2089 | if (strncmp(p_env->api_type, WS_APPL_NAME_PACKED, 6) == 0) | 2113 | if (strncmp(p_env->api_type, WS_APPL_NAME_PACKED, 6) == 0) |
2090 | p_env->packing = PACKING_ASK; | 2114 | p_env->packing = PACKING_ASK; |
@@ -2092,8 +2116,10 @@ claw_process_control( struct net_device *dev, struct ccwbk * p_ccw) | |||
2092 | break; | 2116 | break; |
2093 | case SYSTEM_VALIDATE_RESPONSE: | 2117 | case SYSTEM_VALIDATE_RESPONSE: |
2094 | p_sysval = (struct sysval *)&(p_ctlbk->data); | 2118 | p_sysval = (struct sysval *)&(p_ctlbk->data); |
2095 | printk("%s: Recv Sys Validate Resp: Vers=%d,Corr=%d,RC=%d," | 2119 | dev_info(tdev, |
2096 | "WS name=%.8s,Host name=%.8s\n", | 2120 | "Settings for %s validated (version=%d, " |
2121 | "remote device=%d, rc=%d, adapter name=%.8s, " | ||
2122 | "host name=%.8s)\n", | ||
2097 | dev->name, | 2123 | dev->name, |
2098 | p_ctlbk->version, | 2124 | p_ctlbk->version, |
2099 | p_ctlbk->correlator, | 2125 | p_ctlbk->correlator, |
@@ -2102,41 +2128,39 @@ claw_process_control( struct net_device *dev, struct ccwbk * p_ccw) | |||
2102 | p_sysval->host_name); | 2128 | p_sysval->host_name); |
2103 | switch (p_ctlbk->rc) { | 2129 | switch (p_ctlbk->rc) { |
2104 | case 0: | 2130 | case 0: |
2105 | printk(KERN_INFO "%s: CLAW device " | 2131 | dev_info(tdev, "%s: CLAW device " |
2106 | "%.8s: System validate " | 2132 | "%.8s: System validate completed.\n", |
2107 | "completed.\n", | 2133 | dev->name, temp_ws_name); |
2108 | dev->name, temp_ws_name); | ||
2109 | if (privptr->system_validate_comp == 0) | 2134 | if (privptr->system_validate_comp == 0) |
2110 | claw_strt_conn_req(dev); | 2135 | claw_strt_conn_req(dev); |
2111 | privptr->system_validate_comp = 1; | 2136 | privptr->system_validate_comp = 1; |
2112 | break; | 2137 | break; |
2113 | case CLAW_RC_NAME_MISMATCH: | 2138 | case CLAW_RC_NAME_MISMATCH: |
2114 | printk(KERN_INFO "%s: Sys Validate " | 2139 | dev_warn(tdev, "Validating %s failed because of" |
2115 | "Resp : Host, WS name is " | 2140 | " a host or adapter name mismatch\n", |
2116 | "mismatch\n", | 2141 | dev->name); |
2117 | dev->name); | ||
2118 | break; | 2142 | break; |
2119 | case CLAW_RC_WRONG_VERSION: | 2143 | case CLAW_RC_WRONG_VERSION: |
2120 | printk(KERN_INFO "%s: Sys Validate " | 2144 | dev_warn(tdev, "Validating %s failed because of a" |
2121 | "Resp : Wrong version\n", | 2145 | " version conflict\n", |
2122 | dev->name); | 2146 | dev->name); |
2123 | break; | 2147 | break; |
2124 | case CLAW_RC_HOST_RCV_TOO_SMALL: | 2148 | case CLAW_RC_HOST_RCV_TOO_SMALL: |
2125 | printk(KERN_INFO "%s: Sys Validate " | 2149 | dev_warn(tdev, "Validating %s failed because of a" |
2126 | "Resp : bad frame size\n", | 2150 | " frame size conflict\n", |
2127 | dev->name); | 2151 | dev->name); |
2128 | break; | 2152 | break; |
2129 | default: | 2153 | default: |
2130 | printk(KERN_INFO "%s: Sys Validate " | 2154 | dev_warn(tdev, "The communication peer of %s rejected" |
2131 | "error code=%d \n", | 2155 | " the connection\n", |
2132 | dev->name, p_ctlbk->rc); | 2156 | dev->name); |
2133 | break; | 2157 | break; |
2134 | } | 2158 | } |
2135 | break; | 2159 | break; |
2136 | 2160 | ||
2137 | case CONNECTION_REQUEST: | 2161 | case CONNECTION_REQUEST: |
2138 | p_connect = (struct conncmd *)&(p_ctlbk->data); | 2162 | p_connect = (struct conncmd *)&(p_ctlbk->data); |
2139 | printk(KERN_INFO "%s: Recv Conn Req: Vers=%d,link_id=%d," | 2163 | dev_info(tdev, "%s: Recv Conn Req: Vers=%d,link_id=%d," |
2140 | "Corr=%d,HOST appl=%.8s,WS appl=%.8s\n", | 2164 | "Corr=%d,HOST appl=%.8s,WS appl=%.8s\n", |
2141 | dev->name, | 2165 | dev->name, |
2142 | p_ctlbk->version, | 2166 | p_ctlbk->version, |
@@ -2146,21 +2170,21 @@ claw_process_control( struct net_device *dev, struct ccwbk * p_ccw) | |||
2146 | p_connect->WS_name); | 2170 | p_connect->WS_name); |
2147 | if (privptr->active_link_ID != 0) { | 2171 | if (privptr->active_link_ID != 0) { |
2148 | claw_snd_disc(dev, p_ctlbk); | 2172 | claw_snd_disc(dev, p_ctlbk); |
2149 | printk(KERN_INFO "%s: Conn Req error : " | 2173 | dev_info(tdev, "%s rejected a connection request" |
2150 | "already logical link is active \n", | 2174 | " because it is already active\n", |
2151 | dev->name); | 2175 | dev->name); |
2152 | } | 2176 | } |
2153 | if (p_ctlbk->linkid != 1) { | 2177 | if (p_ctlbk->linkid != 1) { |
2154 | claw_snd_disc(dev, p_ctlbk); | 2178 | claw_snd_disc(dev, p_ctlbk); |
2155 | printk(KERN_INFO "%s: Conn Req error : " | 2179 | dev_info(tdev, "%s rejected a request to open multiple" |
2156 | "req logical link id is not 1\n", | 2180 | " connections\n", |
2157 | dev->name); | 2181 | dev->name); |
2158 | } | 2182 | } |
2159 | rc = find_link(dev, p_connect->host_name, p_connect->WS_name); | 2183 | rc = find_link(dev, p_connect->host_name, p_connect->WS_name); |
2160 | if (rc != 0) { | 2184 | if (rc != 0) { |
2161 | claw_snd_disc(dev, p_ctlbk); | 2185 | claw_snd_disc(dev, p_ctlbk); |
2162 | printk(KERN_INFO "%s: Conn Resp error: " | 2186 | dev_info(tdev, "%s rejected a connection request" |
2163 | "req appl name does not match\n", | 2187 | " because of a type mismatch\n", |
2164 | dev->name); | 2188 | dev->name); |
2165 | } | 2189 | } |
2166 | claw_send_control(dev, | 2190 | claw_send_control(dev, |
@@ -2172,7 +2196,7 @@ claw_process_control( struct net_device *dev, struct ccwbk * p_ccw) | |||
2172 | p_env->packing = PACK_SEND; | 2196 | p_env->packing = PACK_SEND; |
2173 | claw_snd_conn_req(dev, 0); | 2197 | claw_snd_conn_req(dev, 0); |
2174 | } | 2198 | } |
2175 | printk(KERN_INFO "%s: CLAW device %.8s: Connection " | 2199 | dev_info(tdev, "%s: CLAW device %.8s: Connection " |
2176 | "completed link_id=%d.\n", | 2200 | "completed link_id=%d.\n", |
2177 | dev->name, temp_ws_name, | 2201 | dev->name, temp_ws_name, |
2178 | p_ctlbk->linkid); | 2202 | p_ctlbk->linkid); |
@@ -2182,7 +2206,7 @@ claw_process_control( struct net_device *dev, struct ccwbk * p_ccw) | |||
2182 | break; | 2206 | break; |
2183 | case CONNECTION_RESPONSE: | 2207 | case CONNECTION_RESPONSE: |
2184 | p_connect = (struct conncmd *)&(p_ctlbk->data); | 2208 | p_connect = (struct conncmd *)&(p_ctlbk->data); |
2185 | printk(KERN_INFO "%s: Revc Conn Resp: Vers=%d,link_id=%d," | 2209 | dev_info(tdev, "%s: Recv Conn Resp: Vers=%d,link_id=%d," |
2186 | "Corr=%d,RC=%d,Host appl=%.8s, WS appl=%.8s\n", | 2210 | "Corr=%d,RC=%d,Host appl=%.8s, WS appl=%.8s\n", |
2187 | dev->name, | 2211 | dev->name, |
2188 | p_ctlbk->version, | 2212 | p_ctlbk->version, |
@@ -2193,16 +2217,18 @@ claw_process_control( struct net_device *dev, struct ccwbk * p_ccw) | |||
2193 | p_connect->WS_name); | 2217 | p_connect->WS_name); |
2194 | 2218 | ||
2195 | if (p_ctlbk->rc != 0) { | 2219 | if (p_ctlbk->rc != 0) { |
2196 | printk(KERN_INFO "%s: Conn Resp error: rc=%d \n", | 2220 | dev_warn(tdev, "The communication peer of %s rejected" |
2197 | dev->name, p_ctlbk->rc); | 2221 | " a connection request\n", |
2222 | dev->name); | ||
2198 | return 1; | 2223 | return 1; |
2199 | } | 2224 | } |
2200 | rc = find_link(dev, | 2225 | rc = find_link(dev, |
2201 | p_connect->host_name, p_connect->WS_name); | 2226 | p_connect->host_name, p_connect->WS_name); |
2202 | if (rc != 0) { | 2227 | if (rc != 0) { |
2203 | claw_snd_disc(dev, p_ctlbk); | 2228 | claw_snd_disc(dev, p_ctlbk); |
2204 | printk(KERN_INFO "%s: Conn Resp error: " | 2229 | dev_warn(tdev, "The communication peer of %s" |
2205 | "req appl name does not match\n", | 2230 | " rejected a connection " |
2231 | "request because of a type mismatch\n", | ||
2206 | dev->name); | 2232 | dev->name); |
2207 | } | 2233 | } |
2208 | /* should be until CONNECTION_CONFIRM */ | 2234 | /* should be until CONNECTION_CONFIRM */ |
@@ -2210,7 +2236,8 @@ claw_process_control( struct net_device *dev, struct ccwbk * p_ccw) | |||
2210 | break; | 2236 | break; |
2211 | case CONNECTION_CONFIRM: | 2237 | case CONNECTION_CONFIRM: |
2212 | p_connect = (struct conncmd *)&(p_ctlbk->data); | 2238 | p_connect = (struct conncmd *)&(p_ctlbk->data); |
2213 | printk(KERN_INFO "%s: Recv Conn Confirm:Vers=%d,link_id=%d," | 2239 | dev_info(tdev, |
2240 | "%s: Recv Conn Confirm:Vers=%d,link_id=%d," | ||
2214 | "Corr=%d,Host appl=%.8s,WS appl=%.8s\n", | 2241 | "Corr=%d,Host appl=%.8s,WS appl=%.8s\n", |
2215 | dev->name, | 2242 | dev->name, |
2216 | p_ctlbk->version, | 2243 | p_ctlbk->version, |
@@ -2221,21 +2248,21 @@ claw_process_control( struct net_device *dev, struct ccwbk * p_ccw) | |||
2221 | if (p_ctlbk->linkid == -(privptr->active_link_ID)) { | 2248 | if (p_ctlbk->linkid == -(privptr->active_link_ID)) { |
2222 | privptr->active_link_ID = p_ctlbk->linkid; | 2249 | privptr->active_link_ID = p_ctlbk->linkid; |
2223 | if (p_env->packing > PACKING_ASK) { | 2250 | if (p_env->packing > PACKING_ASK) { |
2224 | printk(KERN_INFO "%s: Confirmed Now packing\n", | 2251 | dev_info(tdev, |
2225 | dev->name); | 2252 | "%s: Confirmed Now packing\n", dev->name); |
2226 | p_env->packing = DO_PACKED; | 2253 | p_env->packing = DO_PACKED; |
2227 | } | 2254 | } |
2228 | p_ch = &privptr->channel[WRITE]; | 2255 | p_ch = &privptr->channel[WRITE]; |
2229 | wake_up(&p_ch->wait); | 2256 | wake_up(&p_ch->wait); |
2230 | } else { | 2257 | } else { |
2231 | printk(KERN_INFO "%s: Conn confirm: " | 2258 | dev_warn(tdev, "Activating %s failed because of" |
2232 | "unexpected linkid=%d \n", | 2259 | " an incorrect link ID=%d\n", |
2233 | dev->name, p_ctlbk->linkid); | 2260 | dev->name, p_ctlbk->linkid); |
2234 | claw_snd_disc(dev, p_ctlbk); | 2261 | claw_snd_disc(dev, p_ctlbk); |
2235 | } | 2262 | } |
2236 | break; | 2263 | break; |
2237 | case DISCONNECT: | 2264 | case DISCONNECT: |
2238 | printk(KERN_INFO "%s: Disconnect: " | 2265 | dev_info(tdev, "%s: Disconnect: " |
2239 | "Vers=%d,link_id=%d,Corr=%d\n", | 2266 | "Vers=%d,link_id=%d,Corr=%d\n", |
2240 | dev->name, p_ctlbk->version, | 2267 | dev->name, p_ctlbk->version, |
2241 | p_ctlbk->linkid, p_ctlbk->correlator); | 2268 | p_ctlbk->linkid, p_ctlbk->correlator); |
@@ -2247,12 +2274,13 @@ claw_process_control( struct net_device *dev, struct ccwbk * p_ccw) | |||
2247 | privptr->active_link_ID = 0; | 2274 | privptr->active_link_ID = 0; |
2248 | break; | 2275 | break; |
2249 | case CLAW_ERROR: | 2276 | case CLAW_ERROR: |
2250 | printk(KERN_INFO "%s: CLAW ERROR detected\n", | 2277 | dev_warn(tdev, "The communication peer of %s failed\n", |
2251 | dev->name); | 2278 | dev->name); |
2252 | break; | 2279 | break; |
2253 | default: | 2280 | default: |
2254 | printk(KERN_INFO "%s: Unexpected command code=%d \n", | 2281 | dev_warn(tdev, "The communication peer of %s sent" |
2255 | dev->name, p_ctlbk->command); | 2282 | " an unknown command code\n", |
2283 | dev->name); | ||
2256 | break; | 2284 | break; |
2257 | } | 2285 | } |
2258 | 2286 | ||
@@ -2294,12 +2322,14 @@ claw_send_control(struct net_device *dev, __u8 type, __u8 link, | |||
2294 | memcpy(&p_sysval->host_name, local_name, 8); | 2322 | memcpy(&p_sysval->host_name, local_name, 8); |
2295 | memcpy(&p_sysval->WS_name, remote_name, 8); | 2323 | memcpy(&p_sysval->WS_name, remote_name, 8); |
2296 | if (privptr->p_env->packing > 0) { | 2324 | if (privptr->p_env->packing > 0) { |
2297 | p_sysval->read_frame_size=DEF_PACK_BUFSIZE; | 2325 | p_sysval->read_frame_size = DEF_PACK_BUFSIZE; |
2298 | p_sysval->write_frame_size=DEF_PACK_BUFSIZE; | 2326 | p_sysval->write_frame_size = DEF_PACK_BUFSIZE; |
2299 | } else { | 2327 | } else { |
2300 | /* how big is the biggest group of packets */ | 2328 | /* how big is the biggest group of packets */ |
2301 | p_sysval->read_frame_size=privptr->p_env->read_size; | 2329 | p_sysval->read_frame_size = |
2302 | p_sysval->write_frame_size=privptr->p_env->write_size; | 2330 | privptr->p_env->read_size; |
2331 | p_sysval->write_frame_size = | ||
2332 | privptr->p_env->write_size; | ||
2303 | } | 2333 | } |
2304 | memset(&p_sysval->reserved, 0x00, 4); | 2334 | memset(&p_sysval->reserved, 0x00, 4); |
2305 | break; | 2335 | break; |
@@ -2511,8 +2541,10 @@ unpack_read(struct net_device *dev ) | |||
2511 | mtc_this_frm=1; | 2541 | mtc_this_frm=1; |
2512 | if (p_this_ccw->header.length!= | 2542 | if (p_this_ccw->header.length!= |
2513 | privptr->p_env->read_size ) { | 2543 | privptr->p_env->read_size ) { |
2514 | printk(KERN_INFO " %s: Invalid frame detected " | 2544 | dev_warn(p_dev, |
2515 | "length is %02x\n" , | 2545 | "The communication peer of %s" |
2546 | " sent a faulty" | ||
2547 | " frame of length %02x\n", | ||
2516 | dev->name, p_this_ccw->header.length); | 2548 | dev->name, p_this_ccw->header.length); |
2517 | } | 2549 | } |
2518 | } | 2550 | } |
@@ -2544,7 +2576,7 @@ unpack_next: | |||
2544 | goto NextFrame; | 2576 | goto NextFrame; |
2545 | p_packd = p_this_ccw->p_buffer+pack_off; | 2577 | p_packd = p_this_ccw->p_buffer+pack_off; |
2546 | p_packh = (struct clawph *) p_packd; | 2578 | p_packh = (struct clawph *) p_packd; |
2547 | if ((p_packh->len == 0) || /* all done with this frame? */ | 2579 | if ((p_packh->len == 0) || /* done with this frame? */ |
2548 | (p_packh->flag != 0)) | 2580 | (p_packh->flag != 0)) |
2549 | goto NextFrame; | 2581 | goto NextFrame; |
2550 | bytes_to_mov = p_packh->len; | 2582 | bytes_to_mov = p_packh->len; |
@@ -2594,9 +2626,9 @@ unpack_next: | |||
2594 | netif_rx(skb); | 2626 | netif_rx(skb); |
2595 | } | 2627 | } |
2596 | else { | 2628 | else { |
2629 | dev_info(p_dev, "Allocating a buffer for" | ||
2630 | " incoming data failed\n"); | ||
2597 | privptr->stats.rx_dropped++; | 2631 | privptr->stats.rx_dropped++; |
2598 | printk(KERN_WARNING "%s: %s() low on memory\n", | ||
2599 | dev->name,__func__); | ||
2600 | } | 2632 | } |
2601 | privptr->mtc_offset=0; | 2633 | privptr->mtc_offset=0; |
2602 | privptr->mtc_logical_link=-1; | 2634 | privptr->mtc_logical_link=-1; |
@@ -2720,8 +2752,8 @@ claw_strt_out_IO( struct net_device *dev ) | |||
2720 | if (test_and_set_bit(0, (void *)&p_ch->IO_active) == 0) { | 2752 | if (test_and_set_bit(0, (void *)&p_ch->IO_active) == 0) { |
2721 | parm = (unsigned long) p_ch; | 2753 | parm = (unsigned long) p_ch; |
2722 | CLAW_DBF_TEXT(2, trace, "StWrtIO"); | 2754 | CLAW_DBF_TEXT(2, trace, "StWrtIO"); |
2723 | rc = ccw_device_start (p_ch->cdev,&p_first_ccw->write, parm, | 2755 | rc = ccw_device_start(p_ch->cdev, &p_first_ccw->write, parm, |
2724 | 0xff, 0); | 2756 | 0xff, 0); |
2725 | if (rc != 0) { | 2757 | if (rc != 0) { |
2726 | ccw_check_return_code(p_ch->cdev, rc); | 2758 | ccw_check_return_code(p_ch->cdev, rc); |
2727 | } | 2759 | } |
@@ -2816,22 +2848,26 @@ claw_free_netdevice(struct net_device * dev, int free_dev) | |||
2816 | * Initialize everything of the net device except the name and the | 2848 | * Initialize everything of the net device except the name and the |
2817 | * channel structs. | 2849 | * channel structs. |
2818 | */ | 2850 | */ |
2851 | static const struct net_device_ops claw_netdev_ops = { | ||
2852 | .ndo_open = claw_open, | ||
2853 | .ndo_stop = claw_release, | ||
2854 | .ndo_get_stats = claw_stats, | ||
2855 | .ndo_start_xmit = claw_tx, | ||
2856 | .ndo_change_mtu = claw_change_mtu, | ||
2857 | }; | ||
2858 | |||
2819 | static void | 2859 | static void |
2820 | claw_init_netdevice(struct net_device * dev) | 2860 | claw_init_netdevice(struct net_device * dev) |
2821 | { | 2861 | { |
2822 | CLAW_DBF_TEXT(2, setup, "init_dev"); | 2862 | CLAW_DBF_TEXT(2, setup, "init_dev"); |
2823 | CLAW_DBF_TEXT_(2, setup, "%s", dev->name); | 2863 | CLAW_DBF_TEXT_(2, setup, "%s", dev->name); |
2824 | dev->mtu = CLAW_DEFAULT_MTU_SIZE; | 2864 | dev->mtu = CLAW_DEFAULT_MTU_SIZE; |
2825 | dev->hard_start_xmit = claw_tx; | ||
2826 | dev->open = claw_open; | ||
2827 | dev->stop = claw_release; | ||
2828 | dev->get_stats = claw_stats; | ||
2829 | dev->change_mtu = claw_change_mtu; | ||
2830 | dev->hard_header_len = 0; | 2865 | dev->hard_header_len = 0; |
2831 | dev->addr_len = 0; | 2866 | dev->addr_len = 0; |
2832 | dev->type = ARPHRD_SLIP; | 2867 | dev->type = ARPHRD_SLIP; |
2833 | dev->tx_queue_len = 1300; | 2868 | dev->tx_queue_len = 1300; |
2834 | dev->flags = IFF_POINTOPOINT | IFF_NOARP; | 2869 | dev->flags = IFF_POINTOPOINT | IFF_NOARP; |
2870 | dev->netdev_ops = &claw_netdev_ops; | ||
2835 | CLAW_DBF_TEXT(2, setup, "initok"); | 2871 | CLAW_DBF_TEXT(2, setup, "initok"); |
2836 | return; | 2872 | return; |
2837 | } | 2873 | } |
@@ -2880,8 +2916,8 @@ claw_new_device(struct ccwgroup_device *cgdev) | |||
2880 | int ret; | 2916 | int ret; |
2881 | struct ccw_dev_id dev_id; | 2917 | struct ccw_dev_id dev_id; |
2882 | 2918 | ||
2883 | printk(KERN_INFO "claw: add for %s\n", | 2919 | dev_info(&cgdev->dev, "add for %s\n", |
2884 | dev_name(&cgdev->cdev[READ]->dev)); | 2920 | dev_name(&cgdev->cdev[READ]->dev)); |
2885 | CLAW_DBF_TEXT(2, setup, "new_dev"); | 2921 | CLAW_DBF_TEXT(2, setup, "new_dev"); |
2886 | privptr = cgdev->dev.driver_data; | 2922 | privptr = cgdev->dev.driver_data; |
2887 | cgdev->cdev[READ]->dev.driver_data = privptr; | 2923 | cgdev->cdev[READ]->dev.driver_data = privptr; |
@@ -2897,29 +2933,28 @@ claw_new_device(struct ccwgroup_device *cgdev) | |||
2897 | if (ret == 0) | 2933 | if (ret == 0) |
2898 | ret = add_channel(cgdev->cdev[1],1,privptr); | 2934 | ret = add_channel(cgdev->cdev[1],1,privptr); |
2899 | if (ret != 0) { | 2935 | if (ret != 0) { |
2900 | printk(KERN_WARNING | 2936 | dev_warn(&cgdev->dev, "Creating a CLAW group device" |
2901 | "add channel failed with ret = %d\n", ret); | 2937 | " failed with error code %d\n", ret); |
2902 | goto out; | 2938 | goto out; |
2903 | } | 2939 | } |
2904 | ret = ccw_device_set_online(cgdev->cdev[READ]); | 2940 | ret = ccw_device_set_online(cgdev->cdev[READ]); |
2905 | if (ret != 0) { | 2941 | if (ret != 0) { |
2906 | printk(KERN_WARNING | 2942 | dev_warn(&cgdev->dev, |
2907 | "claw: ccw_device_set_online %s READ failed " | 2943 | "Setting the read subchannel online" |
2908 | "with ret = %d\n", dev_name(&cgdev->cdev[READ]->dev), | 2944 | " failed with error code %d\n", ret); |
2909 | ret); | ||
2910 | goto out; | 2945 | goto out; |
2911 | } | 2946 | } |
2912 | ret = ccw_device_set_online(cgdev->cdev[WRITE]); | 2947 | ret = ccw_device_set_online(cgdev->cdev[WRITE]); |
2913 | if (ret != 0) { | 2948 | if (ret != 0) { |
2914 | printk(KERN_WARNING | 2949 | dev_warn(&cgdev->dev, |
2915 | "claw: ccw_device_set_online %s WRITE failed " | 2950 | "Setting the write subchannel online " |
2916 | "with ret = %d\n", dev_name(&cgdev->cdev[WRITE]->dev), | 2951 | "failed with error code %d\n", ret); |
2917 | ret); | ||
2918 | goto out; | 2952 | goto out; |
2919 | } | 2953 | } |
2920 | dev = alloc_netdev(0,"claw%d",claw_init_netdevice); | 2954 | dev = alloc_netdev(0,"claw%d",claw_init_netdevice); |
2921 | if (!dev) { | 2955 | if (!dev) { |
2922 | printk(KERN_WARNING "%s:alloc_netdev failed\n",__func__); | 2956 | dev_warn(&cgdev->dev, |
2957 | "Activating the CLAW device failed\n"); | ||
2923 | goto out; | 2958 | goto out; |
2924 | } | 2959 | } |
2925 | dev->ml_priv = privptr; | 2960 | dev->ml_priv = privptr; |
@@ -2947,13 +2982,13 @@ claw_new_device(struct ccwgroup_device *cgdev) | |||
2947 | privptr->channel[WRITE].ndev = dev; | 2982 | privptr->channel[WRITE].ndev = dev; |
2948 | privptr->p_env->ndev = dev; | 2983 | privptr->p_env->ndev = dev; |
2949 | 2984 | ||
2950 | printk(KERN_INFO "%s:readsize=%d writesize=%d " | 2985 | dev_info(&cgdev->dev, "%s:readsize=%d writesize=%d " |
2951 | "readbuffer=%d writebuffer=%d read=0x%04x write=0x%04x\n", | 2986 | "readbuffer=%d writebuffer=%d read=0x%04x write=0x%04x\n", |
2952 | dev->name, p_env->read_size, | 2987 | dev->name, p_env->read_size, |
2953 | p_env->write_size, p_env->read_buffers, | 2988 | p_env->write_size, p_env->read_buffers, |
2954 | p_env->write_buffers, p_env->devno[READ], | 2989 | p_env->write_buffers, p_env->devno[READ], |
2955 | p_env->devno[WRITE]); | 2990 | p_env->devno[WRITE]); |
2956 | printk(KERN_INFO "%s:host_name:%.8s, adapter_name " | 2991 | dev_info(&cgdev->dev, "%s:host_name:%.8s, adapter_name " |
2957 | ":%.8s api_type: %.8s\n", | 2992 | ":%.8s api_type: %.8s\n", |
2958 | dev->name, p_env->host_name, | 2993 | dev->name, p_env->host_name, |
2959 | p_env->adapter_name , p_env->api_type); | 2994 | p_env->adapter_name , p_env->api_type); |
@@ -2997,8 +3032,8 @@ claw_shutdown_device(struct ccwgroup_device *cgdev) | |||
2997 | ndev = priv->channel[READ].ndev; | 3032 | ndev = priv->channel[READ].ndev; |
2998 | if (ndev) { | 3033 | if (ndev) { |
2999 | /* Close the device */ | 3034 | /* Close the device */ |
3000 | printk(KERN_INFO | 3035 | dev_info(&cgdev->dev, "%s: shutting down \n", |
3001 | "%s: shuting down \n",ndev->name); | 3036 | ndev->name); |
3002 | if (ndev->flags & IFF_RUNNING) | 3037 | if (ndev->flags & IFF_RUNNING) |
3003 | ret = claw_release(ndev); | 3038 | ret = claw_release(ndev); |
3004 | ndev->flags &=~IFF_RUNNING; | 3039 | ndev->flags &=~IFF_RUNNING; |
@@ -3023,8 +3058,7 @@ claw_remove_device(struct ccwgroup_device *cgdev) | |||
3023 | CLAW_DBF_TEXT_(2, setup, "%s", dev_name(&cgdev->dev)); | 3058 | CLAW_DBF_TEXT_(2, setup, "%s", dev_name(&cgdev->dev)); |
3024 | priv = cgdev->dev.driver_data; | 3059 | priv = cgdev->dev.driver_data; |
3025 | BUG_ON(!priv); | 3060 | BUG_ON(!priv); |
3026 | printk(KERN_INFO "claw: %s() called %s will be removed.\n", | 3061 | dev_info(&cgdev->dev, " will be removed.\n"); |
3027 | __func__, dev_name(&cgdev->cdev[0]->dev)); | ||
3028 | if (cgdev->state == CCWGROUP_ONLINE) | 3062 | if (cgdev->state == CCWGROUP_ONLINE) |
3029 | claw_shutdown_device(cgdev); | 3063 | claw_shutdown_device(cgdev); |
3030 | claw_remove_files(&cgdev->dev); | 3064 | claw_remove_files(&cgdev->dev); |
@@ -3063,7 +3097,8 @@ claw_hname_show(struct device *dev, struct device_attribute *attr, char *buf) | |||
3063 | } | 3097 | } |
3064 | 3098 | ||
3065 | static ssize_t | 3099 | static ssize_t |
3066 | claw_hname_write(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) | 3100 | claw_hname_write(struct device *dev, struct device_attribute *attr, |
3101 | const char *buf, size_t count) | ||
3067 | { | 3102 | { |
3068 | struct claw_privbk *priv; | 3103 | struct claw_privbk *priv; |
3069 | struct claw_env * p_env; | 3104 | struct claw_env * p_env; |
@@ -3100,7 +3135,8 @@ claw_adname_show(struct device *dev, struct device_attribute *attr, char *buf) | |||
3100 | } | 3135 | } |
3101 | 3136 | ||
3102 | static ssize_t | 3137 | static ssize_t |
3103 | claw_adname_write(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) | 3138 | claw_adname_write(struct device *dev, struct device_attribute *attr, |
3139 | const char *buf, size_t count) | ||
3104 | { | 3140 | { |
3105 | struct claw_privbk *priv; | 3141 | struct claw_privbk *priv; |
3106 | struct claw_env * p_env; | 3142 | struct claw_env * p_env; |
@@ -3138,7 +3174,8 @@ claw_apname_show(struct device *dev, struct device_attribute *attr, char *buf) | |||
3138 | } | 3174 | } |
3139 | 3175 | ||
3140 | static ssize_t | 3176 | static ssize_t |
3141 | claw_apname_write(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) | 3177 | claw_apname_write(struct device *dev, struct device_attribute *attr, |
3178 | const char *buf, size_t count) | ||
3142 | { | 3179 | { |
3143 | struct claw_privbk *priv; | 3180 | struct claw_privbk *priv; |
3144 | struct claw_env * p_env; | 3181 | struct claw_env * p_env; |
@@ -3185,7 +3222,8 @@ claw_wbuff_show(struct device *dev, struct device_attribute *attr, char *buf) | |||
3185 | } | 3222 | } |
3186 | 3223 | ||
3187 | static ssize_t | 3224 | static ssize_t |
3188 | claw_wbuff_write(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) | 3225 | claw_wbuff_write(struct device *dev, struct device_attribute *attr, |
3226 | const char *buf, size_t count) | ||
3189 | { | 3227 | { |
3190 | struct claw_privbk *priv; | 3228 | struct claw_privbk *priv; |
3191 | struct claw_env * p_env; | 3229 | struct claw_env * p_env; |
@@ -3226,7 +3264,8 @@ claw_rbuff_show(struct device *dev, struct device_attribute *attr, char *buf) | |||
3226 | } | 3264 | } |
3227 | 3265 | ||
3228 | static ssize_t | 3266 | static ssize_t |
3229 | claw_rbuff_write(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) | 3267 | claw_rbuff_write(struct device *dev, struct device_attribute *attr, |
3268 | const char *buf, size_t count) | ||
3230 | { | 3269 | { |
3231 | struct claw_privbk *priv; | 3270 | struct claw_privbk *priv; |
3232 | struct claw_env *p_env; | 3271 | struct claw_env *p_env; |
@@ -3289,7 +3328,7 @@ claw_cleanup(void) | |||
3289 | { | 3328 | { |
3290 | unregister_cu3088_discipline(&claw_group_driver); | 3329 | unregister_cu3088_discipline(&claw_group_driver); |
3291 | claw_unregister_debug_facility(); | 3330 | claw_unregister_debug_facility(); |
3292 | printk(KERN_INFO "claw: Driver unloaded\n"); | 3331 | pr_info("Driver unloaded\n"); |
3293 | 3332 | ||
3294 | } | 3333 | } |
3295 | 3334 | ||
@@ -3303,12 +3342,12 @@ static int __init | |||
3303 | claw_init(void) | 3342 | claw_init(void) |
3304 | { | 3343 | { |
3305 | int ret = 0; | 3344 | int ret = 0; |
3306 | printk(KERN_INFO "claw: starting driver\n"); | ||
3307 | 3345 | ||
3346 | pr_info("Loading %s\n", version); | ||
3308 | ret = claw_register_debug_facility(); | 3347 | ret = claw_register_debug_facility(); |
3309 | if (ret) { | 3348 | if (ret) { |
3310 | printk(KERN_WARNING "claw: %s() debug_register failed %d\n", | 3349 | pr_err("Registering with the S/390 debug feature" |
3311 | __func__,ret); | 3350 | " failed with error code %d\n", ret); |
3312 | return ret; | 3351 | return ret; |
3313 | } | 3352 | } |
3314 | CLAW_DBF_TEXT(2, setup, "init_mod"); | 3353 | CLAW_DBF_TEXT(2, setup, "init_mod"); |
@@ -3316,8 +3355,8 @@ claw_init(void) | |||
3316 | if (ret) { | 3355 | if (ret) { |
3317 | CLAW_DBF_TEXT(2, setup, "init_bad"); | 3356 | CLAW_DBF_TEXT(2, setup, "init_bad"); |
3318 | claw_unregister_debug_facility(); | 3357 | claw_unregister_debug_facility(); |
3319 | printk(KERN_WARNING "claw; %s() cu3088 register failed %d\n", | 3358 | pr_err("Registering with the cu3088 device driver failed " |
3320 | __func__,ret); | 3359 | "with error code %d\n", ret); |
3321 | } | 3360 | } |
3322 | return ret; | 3361 | return ret; |
3323 | } | 3362 | } |
diff --git a/drivers/s390/net/ctcm_main.c b/drivers/s390/net/ctcm_main.c index 2678573becec..8f2a888d0a0a 100644 --- a/drivers/s390/net/ctcm_main.c +++ b/drivers/s390/net/ctcm_main.c | |||
@@ -1099,12 +1099,24 @@ static void ctcm_free_netdevice(struct net_device *dev) | |||
1099 | 1099 | ||
1100 | struct mpc_group *ctcmpc_init_mpc_group(struct ctcm_priv *priv); | 1100 | struct mpc_group *ctcmpc_init_mpc_group(struct ctcm_priv *priv); |
1101 | 1101 | ||
1102 | static const struct net_device_ops ctcm_netdev_ops = { | ||
1103 | .ndo_open = ctcm_open, | ||
1104 | .ndo_stop = ctcm_close, | ||
1105 | .ndo_get_stats = ctcm_stats, | ||
1106 | .ndo_change_mtu = ctcm_change_mtu, | ||
1107 | .ndo_start_xmit = ctcm_tx, | ||
1108 | }; | ||
1109 | |||
1110 | static const struct net_device_ops ctcm_mpc_netdev_ops = { | ||
1111 | .ndo_open = ctcm_open, | ||
1112 | .ndo_stop = ctcm_close, | ||
1113 | .ndo_get_stats = ctcm_stats, | ||
1114 | .ndo_change_mtu = ctcm_change_mtu, | ||
1115 | .ndo_start_xmit = ctcmpc_tx, | ||
1116 | }; | ||
1117 | |||
1102 | void static ctcm_dev_setup(struct net_device *dev) | 1118 | void static ctcm_dev_setup(struct net_device *dev) |
1103 | { | 1119 | { |
1104 | dev->open = ctcm_open; | ||
1105 | dev->stop = ctcm_close; | ||
1106 | dev->get_stats = ctcm_stats; | ||
1107 | dev->change_mtu = ctcm_change_mtu; | ||
1108 | dev->type = ARPHRD_SLIP; | 1120 | dev->type = ARPHRD_SLIP; |
1109 | dev->tx_queue_len = 100; | 1121 | dev->tx_queue_len = 100; |
1110 | dev->flags = IFF_POINTOPOINT | IFF_NOARP; | 1122 | dev->flags = IFF_POINTOPOINT | IFF_NOARP; |
@@ -1157,12 +1169,12 @@ static struct net_device *ctcm_init_netdevice(struct ctcm_priv *priv) | |||
1157 | dev->mtu = MPC_BUFSIZE_DEFAULT - | 1169 | dev->mtu = MPC_BUFSIZE_DEFAULT - |
1158 | TH_HEADER_LENGTH - PDU_HEADER_LENGTH; | 1170 | TH_HEADER_LENGTH - PDU_HEADER_LENGTH; |
1159 | 1171 | ||
1160 | dev->hard_start_xmit = ctcmpc_tx; | 1172 | dev->netdev_ops = &ctcm_mpc_netdev_ops; |
1161 | dev->hard_header_len = TH_HEADER_LENGTH + PDU_HEADER_LENGTH; | 1173 | dev->hard_header_len = TH_HEADER_LENGTH + PDU_HEADER_LENGTH; |
1162 | priv->buffer_size = MPC_BUFSIZE_DEFAULT; | 1174 | priv->buffer_size = MPC_BUFSIZE_DEFAULT; |
1163 | } else { | 1175 | } else { |
1164 | dev->mtu = CTCM_BUFSIZE_DEFAULT - LL_HEADER_LENGTH - 2; | 1176 | dev->mtu = CTCM_BUFSIZE_DEFAULT - LL_HEADER_LENGTH - 2; |
1165 | dev->hard_start_xmit = ctcm_tx; | 1177 | dev->netdev_ops = &ctcm_netdev_ops; |
1166 | dev->hard_header_len = LL_HEADER_LENGTH + 2; | 1178 | dev->hard_header_len = LL_HEADER_LENGTH + 2; |
1167 | } | 1179 | } |
1168 | 1180 | ||
diff --git a/drivers/s390/net/lcs.c b/drivers/s390/net/lcs.c index 49c3bfa1afd7..083f787d260d 100644 --- a/drivers/s390/net/lcs.c +++ b/drivers/s390/net/lcs.c | |||
@@ -2101,6 +2101,20 @@ lcs_register_netdev(struct ccwgroup_device *ccwgdev) | |||
2101 | /** | 2101 | /** |
2102 | * lcs_new_device will be called by setting the group device online. | 2102 | * lcs_new_device will be called by setting the group device online. |
2103 | */ | 2103 | */ |
2104 | static const struct net_device_ops lcs_netdev_ops = { | ||
2105 | .ndo_open = lcs_open_device, | ||
2106 | .ndo_stop = lcs_stop_device, | ||
2107 | .ndo_get_stats = lcs_getstats, | ||
2108 | .ndo_start_xmit = lcs_start_xmit, | ||
2109 | }; | ||
2110 | |||
2111 | static const struct net_device_ops lcs_mc_netdev_ops = { | ||
2112 | .ndo_open = lcs_open_device, | ||
2113 | .ndo_stop = lcs_stop_device, | ||
2114 | .ndo_get_stats = lcs_getstats, | ||
2115 | .ndo_start_xmit = lcs_start_xmit, | ||
2116 | .ndo_set_multicast_list = lcs_set_multicast_list, | ||
2117 | }; | ||
2104 | 2118 | ||
2105 | static int | 2119 | static int |
2106 | lcs_new_device(struct ccwgroup_device *ccwgdev) | 2120 | lcs_new_device(struct ccwgroup_device *ccwgdev) |
@@ -2168,14 +2182,11 @@ lcs_new_device(struct ccwgroup_device *ccwgdev) | |||
2168 | goto out; | 2182 | goto out; |
2169 | card->dev = dev; | 2183 | card->dev = dev; |
2170 | card->dev->ml_priv = card; | 2184 | card->dev->ml_priv = card; |
2171 | card->dev->open = lcs_open_device; | 2185 | card->dev->netdev_ops = &lcs_netdev_ops; |
2172 | card->dev->stop = lcs_stop_device; | ||
2173 | card->dev->hard_start_xmit = lcs_start_xmit; | ||
2174 | card->dev->get_stats = lcs_getstats; | ||
2175 | memcpy(card->dev->dev_addr, card->mac, LCS_MAC_LENGTH); | 2186 | memcpy(card->dev->dev_addr, card->mac, LCS_MAC_LENGTH); |
2176 | #ifdef CONFIG_IP_MULTICAST | 2187 | #ifdef CONFIG_IP_MULTICAST |
2177 | if (!lcs_check_multicast_support(card)) | 2188 | if (!lcs_check_multicast_support(card)) |
2178 | card->dev->set_multicast_list = lcs_set_multicast_list; | 2189 | card->dev->netdev_ops = &lcs_mc_netdev_ops; |
2179 | #endif | 2190 | #endif |
2180 | netdev_out: | 2191 | netdev_out: |
2181 | lcs_set_allowed_threads(card,0xffffffff); | 2192 | lcs_set_allowed_threads(card,0xffffffff); |
diff --git a/drivers/s390/net/netiucv.c b/drivers/s390/net/netiucv.c index 930e2fc2a011..1ba4509435f8 100644 --- a/drivers/s390/net/netiucv.c +++ b/drivers/s390/net/netiucv.c | |||
@@ -1876,20 +1876,24 @@ static void netiucv_free_netdevice(struct net_device *dev) | |||
1876 | /** | 1876 | /** |
1877 | * Initialize a net device. (Called from kernel in alloc_netdev()) | 1877 | * Initialize a net device. (Called from kernel in alloc_netdev()) |
1878 | */ | 1878 | */ |
1879 | static const struct net_device_ops netiucv_netdev_ops = { | ||
1880 | .ndo_open = netiucv_open, | ||
1881 | .ndo_stop = netiucv_close, | ||
1882 | .ndo_get_stats = netiucv_stats, | ||
1883 | .ndo_start_xmit = netiucv_tx, | ||
1884 | .ndo_change_mtu = netiucv_change_mtu, | ||
1885 | }; | ||
1886 | |||
1879 | static void netiucv_setup_netdevice(struct net_device *dev) | 1887 | static void netiucv_setup_netdevice(struct net_device *dev) |
1880 | { | 1888 | { |
1881 | dev->mtu = NETIUCV_MTU_DEFAULT; | 1889 | dev->mtu = NETIUCV_MTU_DEFAULT; |
1882 | dev->hard_start_xmit = netiucv_tx; | ||
1883 | dev->open = netiucv_open; | ||
1884 | dev->stop = netiucv_close; | ||
1885 | dev->get_stats = netiucv_stats; | ||
1886 | dev->change_mtu = netiucv_change_mtu; | ||
1887 | dev->destructor = netiucv_free_netdevice; | 1890 | dev->destructor = netiucv_free_netdevice; |
1888 | dev->hard_header_len = NETIUCV_HDRLEN; | 1891 | dev->hard_header_len = NETIUCV_HDRLEN; |
1889 | dev->addr_len = 0; | 1892 | dev->addr_len = 0; |
1890 | dev->type = ARPHRD_SLIP; | 1893 | dev->type = ARPHRD_SLIP; |
1891 | dev->tx_queue_len = NETIUCV_QUEUELEN_DEFAULT; | 1894 | dev->tx_queue_len = NETIUCV_QUEUELEN_DEFAULT; |
1892 | dev->flags = IFF_POINTOPOINT | IFF_NOARP; | 1895 | dev->flags = IFF_POINTOPOINT | IFF_NOARP; |
1896 | dev->netdev_ops = &netiucv_netdev_ops; | ||
1893 | } | 1897 | } |
1894 | 1898 | ||
1895 | /** | 1899 | /** |
diff --git a/drivers/scsi/cxgb3i/cxgb3i_ddp.h b/drivers/scsi/cxgb3i/cxgb3i_ddp.h index 5c7c4d95c493..f675807cc48f 100644 --- a/drivers/scsi/cxgb3i/cxgb3i_ddp.h +++ b/drivers/scsi/cxgb3i/cxgb3i_ddp.h | |||
@@ -13,6 +13,8 @@ | |||
13 | #ifndef __CXGB3I_ULP2_DDP_H__ | 13 | #ifndef __CXGB3I_ULP2_DDP_H__ |
14 | #define __CXGB3I_ULP2_DDP_H__ | 14 | #define __CXGB3I_ULP2_DDP_H__ |
15 | 15 | ||
16 | #include <linux/vmalloc.h> | ||
17 | |||
16 | /** | 18 | /** |
17 | * struct cxgb3i_tag_format - cxgb3i ulp tag format for an iscsi entity | 19 | * struct cxgb3i_tag_format - cxgb3i ulp tag format for an iscsi entity |
18 | * | 20 | * |
diff --git a/include/linux/arcdevice.h b/include/linux/arcdevice.h index a1916078fd08..cd4bcb6989ce 100644 --- a/include/linux/arcdevice.h +++ b/include/linux/arcdevice.h | |||
@@ -235,8 +235,6 @@ struct Outgoing { | |||
235 | 235 | ||
236 | 236 | ||
237 | struct arcnet_local { | 237 | struct arcnet_local { |
238 | struct net_device_stats stats; | ||
239 | |||
240 | uint8_t config, /* current value of CONFIG register */ | 238 | uint8_t config, /* current value of CONFIG register */ |
241 | timeout, /* Extended timeout for COM20020 */ | 239 | timeout, /* Extended timeout for COM20020 */ |
242 | backplane, /* Backplane flag for COM20020 */ | 240 | backplane, /* Backplane flag for COM20020 */ |
@@ -335,7 +333,12 @@ void arcnet_dump_skb(struct net_device *dev, struct sk_buff *skb, char *desc); | |||
335 | 333 | ||
336 | void arcnet_unregister_proto(struct ArcProto *proto); | 334 | void arcnet_unregister_proto(struct ArcProto *proto); |
337 | irqreturn_t arcnet_interrupt(int irq, void *dev_id); | 335 | irqreturn_t arcnet_interrupt(int irq, void *dev_id); |
338 | struct net_device *alloc_arcdev(char *name); | 336 | struct net_device *alloc_arcdev(const char *name); |
337 | |||
338 | int arcnet_open(struct net_device *dev); | ||
339 | int arcnet_close(struct net_device *dev); | ||
340 | int arcnet_send_packet(struct sk_buff *skb, struct net_device *dev); | ||
341 | void arcnet_timeout(struct net_device *dev); | ||
339 | 342 | ||
340 | #endif /* __KERNEL__ */ | 343 | #endif /* __KERNEL__ */ |
341 | #endif /* _LINUX_ARCDEVICE_H */ | 344 | #endif /* _LINUX_ARCDEVICE_H */ |
diff --git a/include/linux/com20020.h b/include/linux/com20020.h index ac6d9a43e085..5dcfb944b6ce 100644 --- a/include/linux/com20020.h +++ b/include/linux/com20020.h | |||
@@ -29,6 +29,7 @@ | |||
29 | 29 | ||
30 | int com20020_check(struct net_device *dev); | 30 | int com20020_check(struct net_device *dev); |
31 | int com20020_found(struct net_device *dev, int shared); | 31 | int com20020_found(struct net_device *dev, int shared); |
32 | extern const struct net_device_ops com20020_netdev_ops; | ||
32 | 33 | ||
33 | /* The number of low I/O ports used by the card. */ | 34 | /* The number of low I/O ports used by the card. */ |
34 | #define ARCNET_TOTAL_SIZE 8 | 35 | #define ARCNET_TOTAL_SIZE 8 |
diff --git a/include/linux/connector.h b/include/linux/connector.h index 34f2789d9b9b..fc65d219d88c 100644 --- a/include/linux/connector.h +++ b/include/linux/connector.h | |||
@@ -109,6 +109,12 @@ struct cn_queue_dev { | |||
109 | unsigned char name[CN_CBQ_NAMELEN]; | 109 | unsigned char name[CN_CBQ_NAMELEN]; |
110 | 110 | ||
111 | struct workqueue_struct *cn_queue; | 111 | struct workqueue_struct *cn_queue; |
112 | /* Sent to kevent to create cn_queue only when needed */ | ||
113 | struct work_struct wq_creation; | ||
114 | /* Tell if the wq_creation job is pending/completed */ | ||
115 | atomic_t wq_requested; | ||
116 | /* Wait for cn_queue to be created */ | ||
117 | wait_queue_head_t wq_created; | ||
112 | 118 | ||
113 | struct list_head queue_list; | 119 | struct list_head queue_list; |
114 | spinlock_t queue_lock; | 120 | spinlock_t queue_lock; |
@@ -164,6 +170,8 @@ int cn_netlink_send(struct cn_msg *, u32, gfp_t); | |||
164 | int cn_queue_add_callback(struct cn_queue_dev *dev, char *name, struct cb_id *id, void (*callback)(void *)); | 170 | int cn_queue_add_callback(struct cn_queue_dev *dev, char *name, struct cb_id *id, void (*callback)(void *)); |
165 | void cn_queue_del_callback(struct cn_queue_dev *dev, struct cb_id *id); | 171 | void cn_queue_del_callback(struct cn_queue_dev *dev, struct cb_id *id); |
166 | 172 | ||
173 | int queue_cn_work(struct cn_callback_entry *cbq, struct work_struct *work); | ||
174 | |||
167 | struct cn_queue_dev *cn_queue_alloc_dev(char *name, struct sock *); | 175 | struct cn_queue_dev *cn_queue_alloc_dev(char *name, struct sock *); |
168 | void cn_queue_free_dev(struct cn_queue_dev *dev); | 176 | void cn_queue_free_dev(struct cn_queue_dev *dev); |
169 | 177 | ||
diff --git a/include/linux/dccp.h b/include/linux/dccp.h index 61734e27abb7..7434a8353e23 100644 --- a/include/linux/dccp.h +++ b/include/linux/dccp.h | |||
@@ -355,46 +355,6 @@ static inline unsigned int dccp_hdr_len(const struct sk_buff *skb) | |||
355 | return __dccp_hdr_len(dccp_hdr(skb)); | 355 | return __dccp_hdr_len(dccp_hdr(skb)); |
356 | } | 356 | } |
357 | 357 | ||
358 | |||
359 | /* initial values for each feature */ | ||
360 | #define DCCPF_INITIAL_SEQUENCE_WINDOW 100 | ||
361 | #define DCCPF_INITIAL_ACK_RATIO 2 | ||
362 | #define DCCPF_INITIAL_CCID DCCPC_CCID2 | ||
363 | /* FIXME: for now we're default to 1 but it should really be 0 */ | ||
364 | #define DCCPF_INITIAL_SEND_NDP_COUNT 1 | ||
365 | |||
366 | /** | ||
367 | * struct dccp_minisock - Minimal DCCP connection representation | ||
368 | * | ||
369 | * Will be used to pass the state from dccp_request_sock to dccp_sock. | ||
370 | * | ||
371 | * @dccpms_sequence_window - Sequence Window Feature (section 7.5.2) | ||
372 | * @dccpms_pending - List of features being negotiated | ||
373 | * @dccpms_conf - | ||
374 | */ | ||
375 | struct dccp_minisock { | ||
376 | __u64 dccpms_sequence_window; | ||
377 | struct list_head dccpms_pending; | ||
378 | struct list_head dccpms_conf; | ||
379 | }; | ||
380 | |||
381 | struct dccp_opt_conf { | ||
382 | __u8 *dccpoc_val; | ||
383 | __u8 dccpoc_len; | ||
384 | }; | ||
385 | |||
386 | struct dccp_opt_pend { | ||
387 | struct list_head dccpop_node; | ||
388 | __u8 dccpop_type; | ||
389 | __u8 dccpop_feat; | ||
390 | __u8 *dccpop_val; | ||
391 | __u8 dccpop_len; | ||
392 | int dccpop_conf; | ||
393 | struct dccp_opt_conf *dccpop_sc; | ||
394 | }; | ||
395 | |||
396 | extern void dccp_minisock_init(struct dccp_minisock *dmsk); | ||
397 | |||
398 | /** | 358 | /** |
399 | * struct dccp_request_sock - represent DCCP-specific connection request | 359 | * struct dccp_request_sock - represent DCCP-specific connection request |
400 | * @dreq_inet_rsk: structure inherited from | 360 | * @dreq_inet_rsk: structure inherited from |
@@ -483,13 +443,14 @@ struct dccp_ackvec; | |||
483 | * @dccps_timestamp_time - time of receiving latest @dccps_timestamp_echo | 443 | * @dccps_timestamp_time - time of receiving latest @dccps_timestamp_echo |
484 | * @dccps_l_ack_ratio - feature-local Ack Ratio | 444 | * @dccps_l_ack_ratio - feature-local Ack Ratio |
485 | * @dccps_r_ack_ratio - feature-remote Ack Ratio | 445 | * @dccps_r_ack_ratio - feature-remote Ack Ratio |
446 | * @dccps_l_seq_win - local Sequence Window (influences ack number validity) | ||
447 | * @dccps_r_seq_win - remote Sequence Window (influences seq number validity) | ||
486 | * @dccps_pcslen - sender partial checksum coverage (via sockopt) | 448 | * @dccps_pcslen - sender partial checksum coverage (via sockopt) |
487 | * @dccps_pcrlen - receiver partial checksum coverage (via sockopt) | 449 | * @dccps_pcrlen - receiver partial checksum coverage (via sockopt) |
488 | * @dccps_send_ndp_count - local Send NDP Count feature (7.7.2) | 450 | * @dccps_send_ndp_count - local Send NDP Count feature (7.7.2) |
489 | * @dccps_ndp_count - number of Non Data Packets since last data packet | 451 | * @dccps_ndp_count - number of Non Data Packets since last data packet |
490 | * @dccps_mss_cache - current value of MSS (path MTU minus header sizes) | 452 | * @dccps_mss_cache - current value of MSS (path MTU minus header sizes) |
491 | * @dccps_rate_last - timestamp for rate-limiting DCCP-Sync (RFC 4340, 7.5.4) | 453 | * @dccps_rate_last - timestamp for rate-limiting DCCP-Sync (RFC 4340, 7.5.4) |
492 | * @dccps_minisock - associated minisock (accessed via dccp_msk) | ||
493 | * @dccps_featneg - tracks feature-negotiation state (mostly during handshake) | 454 | * @dccps_featneg - tracks feature-negotiation state (mostly during handshake) |
494 | * @dccps_hc_rx_ackvec - rx half connection ack vector | 455 | * @dccps_hc_rx_ackvec - rx half connection ack vector |
495 | * @dccps_hc_rx_ccid - CCID used for the receiver (or receiving half-connection) | 456 | * @dccps_hc_rx_ccid - CCID used for the receiver (or receiving half-connection) |
@@ -523,12 +484,13 @@ struct dccp_sock { | |||
523 | __u32 dccps_timestamp_time; | 484 | __u32 dccps_timestamp_time; |
524 | __u16 dccps_l_ack_ratio; | 485 | __u16 dccps_l_ack_ratio; |
525 | __u16 dccps_r_ack_ratio; | 486 | __u16 dccps_r_ack_ratio; |
487 | __u64 dccps_l_seq_win:48; | ||
488 | __u64 dccps_r_seq_win:48; | ||
526 | __u8 dccps_pcslen:4; | 489 | __u8 dccps_pcslen:4; |
527 | __u8 dccps_pcrlen:4; | 490 | __u8 dccps_pcrlen:4; |
528 | __u8 dccps_send_ndp_count:1; | 491 | __u8 dccps_send_ndp_count:1; |
529 | __u64 dccps_ndp_count:48; | 492 | __u64 dccps_ndp_count:48; |
530 | unsigned long dccps_rate_last; | 493 | unsigned long dccps_rate_last; |
531 | struct dccp_minisock dccps_minisock; | ||
532 | struct list_head dccps_featneg; | 494 | struct list_head dccps_featneg; |
533 | struct dccp_ackvec *dccps_hc_rx_ackvec; | 495 | struct dccp_ackvec *dccps_hc_rx_ackvec; |
534 | struct ccid *dccps_hc_rx_ccid; | 496 | struct ccid *dccps_hc_rx_ccid; |
@@ -546,11 +508,6 @@ static inline struct dccp_sock *dccp_sk(const struct sock *sk) | |||
546 | return (struct dccp_sock *)sk; | 508 | return (struct dccp_sock *)sk; |
547 | } | 509 | } |
548 | 510 | ||
549 | static inline struct dccp_minisock *dccp_msk(const struct sock *sk) | ||
550 | { | ||
551 | return (struct dccp_minisock *)&dccp_sk(sk)->dccps_minisock; | ||
552 | } | ||
553 | |||
554 | static inline const char *dccp_role(const struct sock *sk) | 511 | static inline const char *dccp_role(const struct sock *sk) |
555 | { | 512 | { |
556 | switch (dccp_sk(sk)->dccps_role) { | 513 | switch (dccp_sk(sk)->dccps_role) { |
diff --git a/include/linux/hdlc.h b/include/linux/hdlc.h index fd47a151665e..6a6e701f1631 100644 --- a/include/linux/hdlc.h +++ b/include/linux/hdlc.h | |||
@@ -38,6 +38,7 @@ struct hdlc_proto { | |||
38 | int (*ioctl)(struct net_device *dev, struct ifreq *ifr); | 38 | int (*ioctl)(struct net_device *dev, struct ifreq *ifr); |
39 | __be16 (*type_trans)(struct sk_buff *skb, struct net_device *dev); | 39 | __be16 (*type_trans)(struct sk_buff *skb, struct net_device *dev); |
40 | int (*netif_rx)(struct sk_buff *skb); | 40 | int (*netif_rx)(struct sk_buff *skb); |
41 | int (*xmit)(struct sk_buff *skb, struct net_device *dev); | ||
41 | struct module *module; | 42 | struct module *module; |
42 | struct hdlc_proto *next; /* next protocol in the list */ | 43 | struct hdlc_proto *next; /* next protocol in the list */ |
43 | }; | 44 | }; |
@@ -102,6 +103,10 @@ static __inline__ void debug_frame(const struct sk_buff *skb) | |||
102 | int hdlc_open(struct net_device *dev); | 103 | int hdlc_open(struct net_device *dev); |
103 | /* Must be called by hardware driver when HDLC device is being closed */ | 104 | /* Must be called by hardware driver when HDLC device is being closed */ |
104 | void hdlc_close(struct net_device *dev); | 105 | void hdlc_close(struct net_device *dev); |
106 | /* May be used by hardware driver */ | ||
107 | int hdlc_change_mtu(struct net_device *dev, int new_mtu); | ||
108 | /* Must be pointed to by hw driver's dev->netdev_ops->ndo_start_xmit */ | ||
109 | int hdlc_start_xmit(struct sk_buff *skb, struct net_device *dev); | ||
105 | 110 | ||
106 | int attach_hdlc_protocol(struct net_device *dev, struct hdlc_proto *proto, | 111 | int attach_hdlc_protocol(struct net_device *dev, struct hdlc_proto *proto, |
107 | size_t size); | 112 | size_t size); |
diff --git a/include/linux/hdlcdrv.h b/include/linux/hdlcdrv.h index bf6302f6b5f8..0821bac62b83 100644 --- a/include/linux/hdlcdrv.h +++ b/include/linux/hdlcdrv.h | |||
@@ -241,7 +241,6 @@ struct hdlcdrv_state { | |||
241 | struct hdlcdrv_bitbuffer bitbuf_hdlc; | 241 | struct hdlcdrv_bitbuffer bitbuf_hdlc; |
242 | #endif /* HDLCDRV_DEBUG */ | 242 | #endif /* HDLCDRV_DEBUG */ |
243 | 243 | ||
244 | struct net_device_stats stats; | ||
245 | int ptt_keyed; | 244 | int ptt_keyed; |
246 | 245 | ||
247 | /* queued skb for transmission */ | 246 | /* queued skb for transmission */ |
diff --git a/include/linux/ibmtr.h b/include/linux/ibmtr.h index 1c7a0dd5536a..06695b74d405 100644 --- a/include/linux/ibmtr.h +++ b/include/linux/ibmtr.h | |||
@@ -207,7 +207,7 @@ struct tok_info { | |||
207 | unsigned short exsap_station_id; | 207 | unsigned short exsap_station_id; |
208 | unsigned short global_int_enable; | 208 | unsigned short global_int_enable; |
209 | struct sk_buff *current_skb; | 209 | struct sk_buff *current_skb; |
210 | struct net_device_stats tr_stats; | 210 | |
211 | unsigned char auto_speedsave; | 211 | unsigned char auto_speedsave; |
212 | open_state open_status, sap_status; | 212 | open_state open_status, sap_status; |
213 | enum {MANUAL, AUTOMATIC} open_mode; | 213 | enum {MANUAL, AUTOMATIC} open_mode; |
diff --git a/include/linux/if.h b/include/linux/if.h index 2a6e29620a96..1108f3e099e3 100644 --- a/include/linux/if.h +++ b/include/linux/if.h | |||
@@ -66,6 +66,7 @@ | |||
66 | #define IFF_SLAVE_NEEDARP 0x40 /* need ARPs for validation */ | 66 | #define IFF_SLAVE_NEEDARP 0x40 /* need ARPs for validation */ |
67 | #define IFF_ISATAP 0x80 /* ISATAP interface (RFC4214) */ | 67 | #define IFF_ISATAP 0x80 /* ISATAP interface (RFC4214) */ |
68 | #define IFF_MASTER_ARPMON 0x100 /* bonding master, ARP mon in use */ | 68 | #define IFF_MASTER_ARPMON 0x100 /* bonding master, ARP mon in use */ |
69 | #define IFF_WAN_HDLC 0x200 /* WAN HDLC device */ | ||
69 | 70 | ||
70 | #define IF_GET_IFACE 0x0001 /* for querying only */ | 71 | #define IF_GET_IFACE 0x0001 /* for querying only */ |
71 | #define IF_GET_PROTO 0x0002 | 72 | #define IF_GET_PROTO 0x0002 |
diff --git a/include/linux/inetdevice.h b/include/linux/inetdevice.h index 06fcdb45106b..acef2a770b6b 100644 --- a/include/linux/inetdevice.h +++ b/include/linux/inetdevice.h | |||
@@ -108,6 +108,7 @@ static inline void ipv4_devconf_setall(struct in_device *in_dev) | |||
108 | #define IN_DEV_ARPFILTER(in_dev) IN_DEV_ORCONF((in_dev), ARPFILTER) | 108 | #define IN_DEV_ARPFILTER(in_dev) IN_DEV_ORCONF((in_dev), ARPFILTER) |
109 | #define IN_DEV_ARP_ANNOUNCE(in_dev) IN_DEV_MAXCONF((in_dev), ARP_ANNOUNCE) | 109 | #define IN_DEV_ARP_ANNOUNCE(in_dev) IN_DEV_MAXCONF((in_dev), ARP_ANNOUNCE) |
110 | #define IN_DEV_ARP_IGNORE(in_dev) IN_DEV_MAXCONF((in_dev), ARP_IGNORE) | 110 | #define IN_DEV_ARP_IGNORE(in_dev) IN_DEV_MAXCONF((in_dev), ARP_IGNORE) |
111 | #define IN_DEV_ARP_NOTIFY(in_dev) IN_DEV_MAXCONF((in_dev), ARP_NOTIFY) | ||
111 | 112 | ||
112 | struct in_ifaddr | 113 | struct in_ifaddr |
113 | { | 114 | { |
diff --git a/include/linux/mroute.h b/include/linux/mroute.h index 8a455694d682..0d45b4e8d367 100644 --- a/include/linux/mroute.h +++ b/include/linux/mroute.h | |||
@@ -193,6 +193,9 @@ struct vif_device | |||
193 | struct mfc_cache | 193 | struct mfc_cache |
194 | { | 194 | { |
195 | struct mfc_cache *next; /* Next entry on cache line */ | 195 | struct mfc_cache *next; /* Next entry on cache line */ |
196 | #ifdef CONFIG_NET_NS | ||
197 | struct net *mfc_net; | ||
198 | #endif | ||
196 | __be32 mfc_mcastgrp; /* Group the entry belongs to */ | 199 | __be32 mfc_mcastgrp; /* Group the entry belongs to */ |
197 | __be32 mfc_origin; /* Source of packet */ | 200 | __be32 mfc_origin; /* Source of packet */ |
198 | vifi_t mfc_parent; /* Source interface */ | 201 | vifi_t mfc_parent; /* Source interface */ |
@@ -215,6 +218,18 @@ struct mfc_cache | |||
215 | } mfc_un; | 218 | } mfc_un; |
216 | }; | 219 | }; |
217 | 220 | ||
221 | static inline | ||
222 | struct net *mfc_net(const struct mfc_cache *mfc) | ||
223 | { | ||
224 | return read_pnet(&mfc->mfc_net); | ||
225 | } | ||
226 | |||
227 | static inline | ||
228 | void mfc_net_set(struct mfc_cache *mfc, struct net *net) | ||
229 | { | ||
230 | write_pnet(&mfc->mfc_net, hold_net(net)); | ||
231 | } | ||
232 | |||
218 | #define MFC_STATIC 1 | 233 | #define MFC_STATIC 1 |
219 | #define MFC_NOTIFY 2 | 234 | #define MFC_NOTIFY 2 |
220 | 235 | ||
@@ -241,7 +256,8 @@ struct mfc_cache | |||
241 | 256 | ||
242 | #ifdef __KERNEL__ | 257 | #ifdef __KERNEL__ |
243 | struct rtmsg; | 258 | struct rtmsg; |
244 | extern int ipmr_get_route(struct sk_buff *skb, struct rtmsg *rtm, int nowait); | 259 | extern int ipmr_get_route(struct net *net, struct sk_buff *skb, |
260 | struct rtmsg *rtm, int nowait); | ||
245 | #endif | 261 | #endif |
246 | 262 | ||
247 | #endif | 263 | #endif |
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index ec54785d34f9..7a5057fbb7cd 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h | |||
@@ -984,6 +984,9 @@ void netif_napi_add(struct net_device *dev, struct napi_struct *napi, | |||
984 | void netif_napi_del(struct napi_struct *napi); | 984 | void netif_napi_del(struct napi_struct *napi); |
985 | 985 | ||
986 | struct napi_gro_cb { | 986 | struct napi_gro_cb { |
987 | /* This indicates where we are processing relative to skb->data. */ | ||
988 | int data_offset; | ||
989 | |||
987 | /* This is non-zero if the packet may be of the same flow. */ | 990 | /* This is non-zero if the packet may be of the same flow. */ |
988 | int same_flow; | 991 | int same_flow; |
989 | 992 | ||
@@ -1087,6 +1090,29 @@ extern int dev_restart(struct net_device *dev); | |||
1087 | #ifdef CONFIG_NETPOLL_TRAP | 1090 | #ifdef CONFIG_NETPOLL_TRAP |
1088 | extern int netpoll_trap(void); | 1091 | extern int netpoll_trap(void); |
1089 | #endif | 1092 | #endif |
1093 | extern void *skb_gro_header(struct sk_buff *skb, unsigned int hlen); | ||
1094 | extern int skb_gro_receive(struct sk_buff **head, | ||
1095 | struct sk_buff *skb); | ||
1096 | |||
1097 | static inline unsigned int skb_gro_offset(const struct sk_buff *skb) | ||
1098 | { | ||
1099 | return NAPI_GRO_CB(skb)->data_offset; | ||
1100 | } | ||
1101 | |||
1102 | static inline unsigned int skb_gro_len(const struct sk_buff *skb) | ||
1103 | { | ||
1104 | return skb->len - NAPI_GRO_CB(skb)->data_offset; | ||
1105 | } | ||
1106 | |||
1107 | static inline void skb_gro_pull(struct sk_buff *skb, unsigned int len) | ||
1108 | { | ||
1109 | NAPI_GRO_CB(skb)->data_offset += len; | ||
1110 | } | ||
1111 | |||
1112 | static inline void skb_gro_reset_offset(struct sk_buff *skb) | ||
1113 | { | ||
1114 | NAPI_GRO_CB(skb)->data_offset = 0; | ||
1115 | } | ||
1090 | 1116 | ||
1091 | static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev, | 1117 | static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev, |
1092 | unsigned short type, | 1118 | unsigned short type, |
@@ -1375,12 +1401,15 @@ extern int netif_receive_skb(struct sk_buff *skb); | |||
1375 | extern void napi_gro_flush(struct napi_struct *napi); | 1401 | extern void napi_gro_flush(struct napi_struct *napi); |
1376 | extern int dev_gro_receive(struct napi_struct *napi, | 1402 | extern int dev_gro_receive(struct napi_struct *napi, |
1377 | struct sk_buff *skb); | 1403 | struct sk_buff *skb); |
1404 | extern int napi_skb_finish(int ret, struct sk_buff *skb); | ||
1378 | extern int napi_gro_receive(struct napi_struct *napi, | 1405 | extern int napi_gro_receive(struct napi_struct *napi, |
1379 | struct sk_buff *skb); | 1406 | struct sk_buff *skb); |
1380 | extern void napi_reuse_skb(struct napi_struct *napi, | 1407 | extern void napi_reuse_skb(struct napi_struct *napi, |
1381 | struct sk_buff *skb); | 1408 | struct sk_buff *skb); |
1382 | extern struct sk_buff * napi_fraginfo_skb(struct napi_struct *napi, | 1409 | extern struct sk_buff * napi_fraginfo_skb(struct napi_struct *napi, |
1383 | struct napi_gro_fraginfo *info); | 1410 | struct napi_gro_fraginfo *info); |
1411 | extern int napi_frags_finish(struct napi_struct *napi, | ||
1412 | struct sk_buff *skb, int ret); | ||
1384 | extern int napi_gro_frags(struct napi_struct *napi, | 1413 | extern int napi_gro_frags(struct napi_struct *napi, |
1385 | struct napi_gro_fraginfo *info); | 1414 | struct napi_gro_fraginfo *info); |
1386 | extern void netif_nit_deliver(struct sk_buff *skb); | 1415 | extern void netif_nit_deliver(struct sk_buff *skb); |
@@ -1574,56 +1603,6 @@ static inline u32 netif_msg_init(int debug_value, int default_msg_enable_bits) | |||
1574 | return (1 << debug_value) - 1; | 1603 | return (1 << debug_value) - 1; |
1575 | } | 1604 | } |
1576 | 1605 | ||
1577 | /* Test if receive needs to be scheduled but only if up */ | ||
1578 | static inline int netif_rx_schedule_prep(struct napi_struct *napi) | ||
1579 | { | ||
1580 | return napi_schedule_prep(napi); | ||
1581 | } | ||
1582 | |||
1583 | /* Add interface to tail of rx poll list. This assumes that _prep has | ||
1584 | * already been called and returned 1. | ||
1585 | */ | ||
1586 | static inline void __netif_rx_schedule(struct napi_struct *napi) | ||
1587 | { | ||
1588 | __napi_schedule(napi); | ||
1589 | } | ||
1590 | |||
1591 | /* Try to reschedule poll. Called by irq handler. */ | ||
1592 | |||
1593 | static inline void netif_rx_schedule(struct napi_struct *napi) | ||
1594 | { | ||
1595 | if (netif_rx_schedule_prep(napi)) | ||
1596 | __netif_rx_schedule(napi); | ||
1597 | } | ||
1598 | |||
1599 | /* Try to reschedule poll. Called by dev->poll() after netif_rx_complete(). */ | ||
1600 | static inline int netif_rx_reschedule(struct napi_struct *napi) | ||
1601 | { | ||
1602 | if (napi_schedule_prep(napi)) { | ||
1603 | __netif_rx_schedule(napi); | ||
1604 | return 1; | ||
1605 | } | ||
1606 | return 0; | ||
1607 | } | ||
1608 | |||
1609 | /* same as netif_rx_complete, except that local_irq_save(flags) | ||
1610 | * has already been issued | ||
1611 | */ | ||
1612 | static inline void __netif_rx_complete(struct napi_struct *napi) | ||
1613 | { | ||
1614 | __napi_complete(napi); | ||
1615 | } | ||
1616 | |||
1617 | /* Remove interface from poll list: it must be in the poll list | ||
1618 | * on current cpu. This primitive is called by dev->poll(), when | ||
1619 | * it completes the work. The device cannot be out of poll list at this | ||
1620 | * moment, it is BUG(). | ||
1621 | */ | ||
1622 | static inline void netif_rx_complete(struct napi_struct *napi) | ||
1623 | { | ||
1624 | napi_complete(napi); | ||
1625 | } | ||
1626 | |||
1627 | static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu) | 1606 | static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu) |
1628 | { | 1607 | { |
1629 | spin_lock(&txq->_xmit_lock); | 1608 | spin_lock(&txq->_xmit_lock); |
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index 52a9fe08451c..2370184e3654 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h | |||
@@ -2213,6 +2213,8 @@ | |||
2213 | 2213 | ||
2214 | #define PCI_VENDOR_ID_TOPSPIN 0x1867 | 2214 | #define PCI_VENDOR_ID_TOPSPIN 0x1867 |
2215 | 2215 | ||
2216 | #define PCI_VENDOR_ID_SILAN 0x1904 | ||
2217 | |||
2216 | #define PCI_VENDOR_ID_TDI 0x192E | 2218 | #define PCI_VENDOR_ID_TDI 0x192E |
2217 | #define PCI_DEVICE_ID_TDI_EHCI 0x0101 | 2219 | #define PCI_DEVICE_ID_TDI_EHCI 0x0101 |
2218 | 2220 | ||
diff --git a/include/linux/ppp_channel.h b/include/linux/ppp_channel.h index a942892d6dfe..9d64bdf14770 100644 --- a/include/linux/ppp_channel.h +++ b/include/linux/ppp_channel.h | |||
@@ -22,6 +22,7 @@ | |||
22 | #include <linux/list.h> | 22 | #include <linux/list.h> |
23 | #include <linux/skbuff.h> | 23 | #include <linux/skbuff.h> |
24 | #include <linux/poll.h> | 24 | #include <linux/poll.h> |
25 | #include <net/net_namespace.h> | ||
25 | 26 | ||
26 | struct ppp_channel; | 27 | struct ppp_channel; |
27 | 28 | ||
@@ -56,6 +57,9 @@ extern void ppp_input(struct ppp_channel *, struct sk_buff *); | |||
56 | that we may have missed a packet. */ | 57 | that we may have missed a packet. */ |
57 | extern void ppp_input_error(struct ppp_channel *, int code); | 58 | extern void ppp_input_error(struct ppp_channel *, int code); |
58 | 59 | ||
60 | /* Attach a channel to a given PPP unit in specified net. */ | ||
61 | extern int ppp_register_net_channel(struct net *, struct ppp_channel *); | ||
62 | |||
59 | /* Attach a channel to a given PPP unit. */ | 63 | /* Attach a channel to a given PPP unit. */ |
60 | extern int ppp_register_channel(struct ppp_channel *); | 64 | extern int ppp_register_channel(struct ppp_channel *); |
61 | 65 | ||
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index cf2cb50f77d1..08670d017479 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h | |||
@@ -1687,8 +1687,6 @@ extern int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, | |||
1687 | int shiftlen); | 1687 | int shiftlen); |
1688 | 1688 | ||
1689 | extern struct sk_buff *skb_segment(struct sk_buff *skb, int features); | 1689 | extern struct sk_buff *skb_segment(struct sk_buff *skb, int features); |
1690 | extern int skb_gro_receive(struct sk_buff **head, | ||
1691 | struct sk_buff *skb); | ||
1692 | 1690 | ||
1693 | static inline void *skb_header_pointer(const struct sk_buff *skb, int offset, | 1691 | static inline void *skb_header_pointer(const struct sk_buff *skb, int offset, |
1694 | int len, void *buffer) | 1692 | int len, void *buffer) |
@@ -1904,6 +1902,21 @@ static inline void skb_copy_queue_mapping(struct sk_buff *to, const struct sk_bu | |||
1904 | to->queue_mapping = from->queue_mapping; | 1902 | to->queue_mapping = from->queue_mapping; |
1905 | } | 1903 | } |
1906 | 1904 | ||
1905 | static inline void skb_record_rx_queue(struct sk_buff *skb, u16 rx_queue) | ||
1906 | { | ||
1907 | skb->queue_mapping = rx_queue + 1; | ||
1908 | } | ||
1909 | |||
1910 | static inline u16 skb_get_rx_queue(struct sk_buff *skb) | ||
1911 | { | ||
1912 | return skb->queue_mapping - 1; | ||
1913 | } | ||
1914 | |||
1915 | static inline bool skb_rx_queue_recorded(struct sk_buff *skb) | ||
1916 | { | ||
1917 | return (skb->queue_mapping != 0); | ||
1918 | } | ||
1919 | |||
1907 | #ifdef CONFIG_XFRM | 1920 | #ifdef CONFIG_XFRM |
1908 | static inline struct sec_path *skb_sec_path(struct sk_buff *skb) | 1921 | static inline struct sec_path *skb_sec_path(struct sk_buff *skb) |
1909 | { | 1922 | { |
diff --git a/include/linux/smsc911x.h b/include/linux/smsc911x.h index 1cbf0313adde..b32725075d71 100644 --- a/include/linux/smsc911x.h +++ b/include/linux/smsc911x.h | |||
@@ -43,5 +43,8 @@ struct smsc911x_platform_config { | |||
43 | /* Constants for flags */ | 43 | /* Constants for flags */ |
44 | #define SMSC911X_USE_16BIT (BIT(0)) | 44 | #define SMSC911X_USE_16BIT (BIT(0)) |
45 | #define SMSC911X_USE_32BIT (BIT(1)) | 45 | #define SMSC911X_USE_32BIT (BIT(1)) |
46 | #define SMSC911X_FORCE_INTERNAL_PHY (BIT(2)) | ||
47 | #define SMSC911X_FORCE_EXTERNAL_PHY (BIT(3)) | ||
48 | #define SMSC911X_SAVE_MAC_ADDRESS (BIT(4)) | ||
46 | 49 | ||
47 | #endif /* __LINUX_SMSC911X_H__ */ | 50 | #endif /* __LINUX_SMSC911X_H__ */ |
diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h index 39d471d1163b..e76d3b22a466 100644 --- a/include/linux/sysctl.h +++ b/include/linux/sysctl.h | |||
@@ -490,6 +490,7 @@ enum | |||
490 | NET_IPV4_CONF_ARP_IGNORE=19, | 490 | NET_IPV4_CONF_ARP_IGNORE=19, |
491 | NET_IPV4_CONF_PROMOTE_SECONDARIES=20, | 491 | NET_IPV4_CONF_PROMOTE_SECONDARIES=20, |
492 | NET_IPV4_CONF_ARP_ACCEPT=21, | 492 | NET_IPV4_CONF_ARP_ACCEPT=21, |
493 | NET_IPV4_CONF_ARP_NOTIFY=22, | ||
493 | __NET_IPV4_CONF_MAX | 494 | __NET_IPV4_CONF_MAX |
494 | }; | 495 | }; |
495 | 496 | ||
diff --git a/include/linux/virtio_net.h b/include/linux/virtio_net.h index 3efa86c3ecb3..d8e362d52fd8 100644 --- a/include/linux/virtio_net.h +++ b/include/linux/virtio_net.h | |||
@@ -22,11 +22,16 @@ | |||
22 | #define VIRTIO_NET_F_HOST_ECN 13 /* Host can handle TSO[6] w/ ECN in. */ | 22 | #define VIRTIO_NET_F_HOST_ECN 13 /* Host can handle TSO[6] w/ ECN in. */ |
23 | #define VIRTIO_NET_F_HOST_UFO 14 /* Host can handle UFO in. */ | 23 | #define VIRTIO_NET_F_HOST_UFO 14 /* Host can handle UFO in. */ |
24 | #define VIRTIO_NET_F_MRG_RXBUF 15 /* Host can merge receive buffers. */ | 24 | #define VIRTIO_NET_F_MRG_RXBUF 15 /* Host can merge receive buffers. */ |
25 | #define VIRTIO_NET_F_STATUS 16 /* virtio_net_config.status available */ | ||
26 | |||
27 | #define VIRTIO_NET_S_LINK_UP 1 /* Link is up */ | ||
25 | 28 | ||
26 | struct virtio_net_config | 29 | struct virtio_net_config |
27 | { | 30 | { |
28 | /* The config defining mac address (if VIRTIO_NET_F_MAC) */ | 31 | /* The config defining mac address (if VIRTIO_NET_F_MAC) */ |
29 | __u8 mac[6]; | 32 | __u8 mac[6]; |
33 | /* See VIRTIO_NET_F_STATUS and VIRTIO_NET_S_* above */ | ||
34 | __u16 status; | ||
30 | } __attribute__((packed)); | 35 | } __attribute__((packed)); |
31 | 36 | ||
32 | /* This is the first element of the scatter-gather list. If you don't | 37 | /* This is the first element of the scatter-gather list. If you don't |
diff --git a/include/net/atmclip.h b/include/net/atmclip.h index b5a51a7bb364..467c531b8a7e 100644 --- a/include/net/atmclip.h +++ b/include/net/atmclip.h | |||
@@ -50,7 +50,6 @@ struct atmarp_entry { | |||
50 | struct clip_priv { | 50 | struct clip_priv { |
51 | int number; /* for convenience ... */ | 51 | int number; /* for convenience ... */ |
52 | spinlock_t xoff_lock; /* ensures that pop is atomic (SMP) */ | 52 | spinlock_t xoff_lock; /* ensures that pop is atomic (SMP) */ |
53 | struct net_device_stats stats; | ||
54 | struct net_device *next; /* next CLIP interface */ | 53 | struct net_device *next; /* next CLIP interface */ |
55 | }; | 54 | }; |
56 | 55 | ||
diff --git a/include/net/inet_hashtables.h b/include/net/inet_hashtables.h index d0a043153cc6..a44e2248b2ef 100644 --- a/include/net/inet_hashtables.h +++ b/include/net/inet_hashtables.h | |||
@@ -82,6 +82,7 @@ struct inet_bind_bucket { | |||
82 | #endif | 82 | #endif |
83 | unsigned short port; | 83 | unsigned short port; |
84 | signed short fastreuse; | 84 | signed short fastreuse; |
85 | int num_owners; | ||
85 | struct hlist_node node; | 86 | struct hlist_node node; |
86 | struct hlist_head owners; | 87 | struct hlist_head owners; |
87 | }; | 88 | }; |
@@ -133,7 +134,7 @@ struct inet_hashinfo { | |||
133 | struct inet_bind_hashbucket *bhash; | 134 | struct inet_bind_hashbucket *bhash; |
134 | 135 | ||
135 | unsigned int bhash_size; | 136 | unsigned int bhash_size; |
136 | /* Note : 4 bytes padding on 64 bit arches */ | 137 | /* 4 bytes hole on 64 bit */ |
137 | 138 | ||
138 | struct kmem_cache *bind_bucket_cachep; | 139 | struct kmem_cache *bind_bucket_cachep; |
139 | 140 | ||
@@ -150,6 +151,7 @@ struct inet_hashinfo { | |||
150 | struct inet_listen_hashbucket listening_hash[INET_LHTABLE_SIZE] | 151 | struct inet_listen_hashbucket listening_hash[INET_LHTABLE_SIZE] |
151 | ____cacheline_aligned_in_smp; | 152 | ____cacheline_aligned_in_smp; |
152 | 153 | ||
154 | atomic_t bsockets; | ||
153 | }; | 155 | }; |
154 | 156 | ||
155 | static inline struct inet_ehash_bucket *inet_ehash_bucket( | 157 | static inline struct inet_ehash_bucket *inet_ehash_bucket( |
diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h index 977f482d97a9..2eb3814d6258 100644 --- a/include/net/netns/ipv4.h +++ b/include/net/netns/ipv4.h | |||
@@ -54,5 +54,18 @@ struct netns_ipv4 { | |||
54 | 54 | ||
55 | struct timer_list rt_secret_timer; | 55 | struct timer_list rt_secret_timer; |
56 | atomic_t rt_genid; | 56 | atomic_t rt_genid; |
57 | |||
58 | #ifdef CONFIG_IP_MROUTE | ||
59 | struct sock *mroute_sk; | ||
60 | struct mfc_cache **mfc_cache_array; | ||
61 | struct vif_device *vif_table; | ||
62 | int maxvif; | ||
63 | atomic_t cache_resolve_queue_len; | ||
64 | int mroute_do_assert; | ||
65 | int mroute_do_pim; | ||
66 | #if defined(CONFIG_IP_PIMSM_V1) || defined(CONFIG_IP_PIMSM_V2) | ||
67 | int mroute_reg_vif_num; | ||
68 | #endif | ||
69 | #endif | ||
57 | }; | 70 | }; |
58 | #endif | 71 | #endif |
diff --git a/include/net/netrom.h b/include/net/netrom.h index f06852bba62a..15696b1fd30f 100644 --- a/include/net/netrom.h +++ b/include/net/netrom.h | |||
@@ -59,10 +59,6 @@ enum { | |||
59 | #define NR_MAX_WINDOW_SIZE 127 /* Maximum Window Allowable - 127 */ | 59 | #define NR_MAX_WINDOW_SIZE 127 /* Maximum Window Allowable - 127 */ |
60 | #define NR_MAX_PACKET_SIZE 236 /* Maximum Packet Length - 236 */ | 60 | #define NR_MAX_PACKET_SIZE 236 /* Maximum Packet Length - 236 */ |
61 | 61 | ||
62 | struct nr_private { | ||
63 | struct net_device_stats stats; | ||
64 | }; | ||
65 | |||
66 | struct nr_sock { | 62 | struct nr_sock { |
67 | struct sock sock; | 63 | struct sock sock; |
68 | ax25_address user_addr, source_addr, dest_addr; | 64 | ax25_address user_addr, source_addr, dest_addr; |
diff --git a/include/net/phonet/phonet.h b/include/net/phonet/phonet.h index 057b0a8a2885..d43f71b5ec00 100644 --- a/include/net/phonet/phonet.h +++ b/include/net/phonet/phonet.h | |||
@@ -105,7 +105,6 @@ void phonet_proto_unregister(int protocol, struct phonet_protocol *pp); | |||
105 | 105 | ||
106 | int phonet_sysctl_init(void); | 106 | int phonet_sysctl_init(void); |
107 | void phonet_sysctl_exit(void); | 107 | void phonet_sysctl_exit(void); |
108 | void phonet_netlink_register(void); | ||
109 | int isi_register(void); | 108 | int isi_register(void); |
110 | void isi_unregister(void); | 109 | void isi_unregister(void); |
111 | 110 | ||
diff --git a/include/net/phonet/pn_dev.h b/include/net/phonet/pn_dev.h index aa1c59a1d33f..5054dc5ea2c2 100644 --- a/include/net/phonet/pn_dev.h +++ b/include/net/phonet/pn_dev.h | |||
@@ -28,7 +28,7 @@ struct phonet_device_list { | |||
28 | spinlock_t lock; | 28 | spinlock_t lock; |
29 | }; | 29 | }; |
30 | 30 | ||
31 | extern struct phonet_device_list pndevs; | 31 | struct phonet_device_list *phonet_device_list(struct net *net); |
32 | 32 | ||
33 | struct phonet_device { | 33 | struct phonet_device { |
34 | struct list_head list; | 34 | struct list_head list; |
@@ -36,8 +36,9 @@ struct phonet_device { | |||
36 | DECLARE_BITMAP(addrs, 64); | 36 | DECLARE_BITMAP(addrs, 64); |
37 | }; | 37 | }; |
38 | 38 | ||
39 | void phonet_device_init(void); | 39 | int phonet_device_init(void); |
40 | void phonet_device_exit(void); | 40 | void phonet_device_exit(void); |
41 | int phonet_netlink_register(void); | ||
41 | struct net_device *phonet_device_get(struct net *net); | 42 | struct net_device *phonet_device_get(struct net *net); |
42 | 43 | ||
43 | int phonet_address_add(struct net_device *dev, u8 addr); | 44 | int phonet_address_add(struct net_device *dev, u8 addr); |
diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h index 4082f39f5079..e37fe3129c17 100644 --- a/include/net/pkt_sched.h +++ b/include/net/pkt_sched.h | |||
@@ -85,6 +85,7 @@ extern struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r, | |||
85 | struct nlattr *tab); | 85 | struct nlattr *tab); |
86 | extern void qdisc_put_rtab(struct qdisc_rate_table *tab); | 86 | extern void qdisc_put_rtab(struct qdisc_rate_table *tab); |
87 | extern void qdisc_put_stab(struct qdisc_size_table *tab); | 87 | extern void qdisc_put_stab(struct qdisc_size_table *tab); |
88 | extern void qdisc_warn_nonwc(char *txt, struct Qdisc *qdisc); | ||
88 | 89 | ||
89 | extern void __qdisc_run(struct Qdisc *q); | 90 | extern void __qdisc_run(struct Qdisc *q); |
90 | 91 | ||
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h index f8c47429044a..3d78a4d22460 100644 --- a/include/net/sch_generic.h +++ b/include/net/sch_generic.h | |||
@@ -42,9 +42,10 @@ struct Qdisc | |||
42 | int (*enqueue)(struct sk_buff *skb, struct Qdisc *dev); | 42 | int (*enqueue)(struct sk_buff *skb, struct Qdisc *dev); |
43 | struct sk_buff * (*dequeue)(struct Qdisc *dev); | 43 | struct sk_buff * (*dequeue)(struct Qdisc *dev); |
44 | unsigned flags; | 44 | unsigned flags; |
45 | #define TCQ_F_BUILTIN 1 | 45 | #define TCQ_F_BUILTIN 1 |
46 | #define TCQ_F_THROTTLED 2 | 46 | #define TCQ_F_THROTTLED 2 |
47 | #define TCQ_F_INGRESS 4 | 47 | #define TCQ_F_INGRESS 4 |
48 | #define TCQ_F_WARN_NONWC (1 << 16) | ||
48 | int padded; | 49 | int padded; |
49 | struct Qdisc_ops *ops; | 50 | struct Qdisc_ops *ops; |
50 | struct qdisc_size_table *stab; | 51 | struct qdisc_size_table *stab; |
diff --git a/kernel/sysctl_check.c b/kernel/sysctl_check.c index fafeb48f27c0..b38423ca711a 100644 --- a/kernel/sysctl_check.c +++ b/kernel/sysctl_check.c | |||
@@ -219,6 +219,7 @@ static const struct trans_ctl_table trans_net_ipv4_conf_vars_table[] = { | |||
219 | { NET_IPV4_CONF_ARP_IGNORE, "arp_ignore" }, | 219 | { NET_IPV4_CONF_ARP_IGNORE, "arp_ignore" }, |
220 | { NET_IPV4_CONF_PROMOTE_SECONDARIES, "promote_secondaries" }, | 220 | { NET_IPV4_CONF_PROMOTE_SECONDARIES, "promote_secondaries" }, |
221 | { NET_IPV4_CONF_ARP_ACCEPT, "arp_accept" }, | 221 | { NET_IPV4_CONF_ARP_ACCEPT, "arp_accept" }, |
222 | { NET_IPV4_CONF_ARP_NOTIFY, "arp_notify" }, | ||
222 | {} | 223 | {} |
223 | }; | 224 | }; |
224 | 225 | ||
diff --git a/net/802/psnap.c b/net/802/psnap.c index 70980baeb682..6ed711748f26 100644 --- a/net/802/psnap.c +++ b/net/802/psnap.c | |||
@@ -51,7 +51,7 @@ static int snap_rcv(struct sk_buff *skb, struct net_device *dev, | |||
51 | int rc = 1; | 51 | int rc = 1; |
52 | struct datalink_proto *proto; | 52 | struct datalink_proto *proto; |
53 | static struct packet_type snap_packet_type = { | 53 | static struct packet_type snap_packet_type = { |
54 | .type = __constant_htons(ETH_P_SNAP), | 54 | .type = cpu_to_be16(ETH_P_SNAP), |
55 | }; | 55 | }; |
56 | 56 | ||
57 | if (unlikely(!pskb_may_pull(skb, 5))) | 57 | if (unlikely(!pskb_may_pull(skb, 5))) |
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c index 41e8f65bd3f0..4163ea65bf41 100644 --- a/net/8021q/vlan.c +++ b/net/8021q/vlan.c | |||
@@ -52,7 +52,7 @@ static const char vlan_copyright[] = "Ben Greear <greearb@candelatech.com>"; | |||
52 | static const char vlan_buggyright[] = "David S. Miller <davem@redhat.com>"; | 52 | static const char vlan_buggyright[] = "David S. Miller <davem@redhat.com>"; |
53 | 53 | ||
54 | static struct packet_type vlan_packet_type = { | 54 | static struct packet_type vlan_packet_type = { |
55 | .type = __constant_htons(ETH_P_8021Q), | 55 | .type = cpu_to_be16(ETH_P_8021Q), |
56 | .func = vlan_skb_recv, /* VLAN receive method */ | 56 | .func = vlan_skb_recv, /* VLAN receive method */ |
57 | }; | 57 | }; |
58 | 58 | ||
diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c index e9db889d6222..378fa69d625a 100644 --- a/net/8021q/vlan_core.c +++ b/net/8021q/vlan_core.c | |||
@@ -98,22 +98,9 @@ drop: | |||
98 | int vlan_gro_receive(struct napi_struct *napi, struct vlan_group *grp, | 98 | int vlan_gro_receive(struct napi_struct *napi, struct vlan_group *grp, |
99 | unsigned int vlan_tci, struct sk_buff *skb) | 99 | unsigned int vlan_tci, struct sk_buff *skb) |
100 | { | 100 | { |
101 | int err = NET_RX_SUCCESS; | 101 | skb_gro_reset_offset(skb); |
102 | 102 | ||
103 | switch (vlan_gro_common(napi, grp, vlan_tci, skb)) { | 103 | return napi_skb_finish(vlan_gro_common(napi, grp, vlan_tci, skb), skb); |
104 | case -1: | ||
105 | return netif_receive_skb(skb); | ||
106 | |||
107 | case 2: | ||
108 | err = NET_RX_DROP; | ||
109 | /* fall through */ | ||
110 | |||
111 | case 1: | ||
112 | kfree_skb(skb); | ||
113 | break; | ||
114 | } | ||
115 | |||
116 | return err; | ||
117 | } | 104 | } |
118 | EXPORT_SYMBOL(vlan_gro_receive); | 105 | EXPORT_SYMBOL(vlan_gro_receive); |
119 | 106 | ||
@@ -121,27 +108,11 @@ int vlan_gro_frags(struct napi_struct *napi, struct vlan_group *grp, | |||
121 | unsigned int vlan_tci, struct napi_gro_fraginfo *info) | 108 | unsigned int vlan_tci, struct napi_gro_fraginfo *info) |
122 | { | 109 | { |
123 | struct sk_buff *skb = napi_fraginfo_skb(napi, info); | 110 | struct sk_buff *skb = napi_fraginfo_skb(napi, info); |
124 | int err = NET_RX_DROP; | ||
125 | 111 | ||
126 | if (!skb) | 112 | if (!skb) |
127 | goto out; | 113 | return NET_RX_DROP; |
128 | |||
129 | err = NET_RX_SUCCESS; | ||
130 | |||
131 | switch (vlan_gro_common(napi, grp, vlan_tci, skb)) { | ||
132 | case -1: | ||
133 | return netif_receive_skb(skb); | ||
134 | |||
135 | case 2: | ||
136 | err = NET_RX_DROP; | ||
137 | /* fall through */ | ||
138 | |||
139 | case 1: | ||
140 | napi_reuse_skb(napi, skb); | ||
141 | break; | ||
142 | } | ||
143 | 114 | ||
144 | out: | 115 | return napi_frags_finish(napi, skb, |
145 | return err; | 116 | vlan_gro_common(napi, grp, vlan_tci, skb)); |
146 | } | 117 | } |
147 | EXPORT_SYMBOL(vlan_gro_frags); | 118 | EXPORT_SYMBOL(vlan_gro_frags); |
diff --git a/net/Kconfig b/net/Kconfig index cdb8fdef6c4a..a12bae0e3fe9 100644 --- a/net/Kconfig +++ b/net/Kconfig | |||
@@ -185,6 +185,7 @@ source "net/x25/Kconfig" | |||
185 | source "net/lapb/Kconfig" | 185 | source "net/lapb/Kconfig" |
186 | source "net/econet/Kconfig" | 186 | source "net/econet/Kconfig" |
187 | source "net/wanrouter/Kconfig" | 187 | source "net/wanrouter/Kconfig" |
188 | source "net/phonet/Kconfig" | ||
188 | source "net/sched/Kconfig" | 189 | source "net/sched/Kconfig" |
189 | source "net/dcb/Kconfig" | 190 | source "net/dcb/Kconfig" |
190 | 191 | ||
@@ -229,7 +230,6 @@ source "net/can/Kconfig" | |||
229 | source "net/irda/Kconfig" | 230 | source "net/irda/Kconfig" |
230 | source "net/bluetooth/Kconfig" | 231 | source "net/bluetooth/Kconfig" |
231 | source "net/rxrpc/Kconfig" | 232 | source "net/rxrpc/Kconfig" |
232 | source "net/phonet/Kconfig" | ||
233 | 233 | ||
234 | config FIB_RULES | 234 | config FIB_RULES |
235 | bool | 235 | bool |
diff --git a/net/appletalk/ddp.c b/net/appletalk/ddp.c index 5abce07fb50a..510a6782da8f 100644 --- a/net/appletalk/ddp.c +++ b/net/appletalk/ddp.c | |||
@@ -1861,12 +1861,12 @@ static struct notifier_block ddp_notifier = { | |||
1861 | }; | 1861 | }; |
1862 | 1862 | ||
1863 | static struct packet_type ltalk_packet_type = { | 1863 | static struct packet_type ltalk_packet_type = { |
1864 | .type = __constant_htons(ETH_P_LOCALTALK), | 1864 | .type = cpu_to_be16(ETH_P_LOCALTALK), |
1865 | .func = ltalk_rcv, | 1865 | .func = ltalk_rcv, |
1866 | }; | 1866 | }; |
1867 | 1867 | ||
1868 | static struct packet_type ppptalk_packet_type = { | 1868 | static struct packet_type ppptalk_packet_type = { |
1869 | .type = __constant_htons(ETH_P_PPPTALK), | 1869 | .type = cpu_to_be16(ETH_P_PPPTALK), |
1870 | .func = atalk_rcv, | 1870 | .func = atalk_rcv, |
1871 | }; | 1871 | }; |
1872 | 1872 | ||
diff --git a/net/appletalk/dev.c b/net/appletalk/dev.c index d856a62ab50f..72277d70c980 100644 --- a/net/appletalk/dev.c +++ b/net/appletalk/dev.c | |||
@@ -9,22 +9,20 @@ | |||
9 | #include <linux/if_arp.h> | 9 | #include <linux/if_arp.h> |
10 | #include <linux/if_ltalk.h> | 10 | #include <linux/if_ltalk.h> |
11 | 11 | ||
12 | #ifdef CONFIG_COMPAT_NET_DEV_OPS | ||
12 | static int ltalk_change_mtu(struct net_device *dev, int mtu) | 13 | static int ltalk_change_mtu(struct net_device *dev, int mtu) |
13 | { | 14 | { |
14 | return -EINVAL; | 15 | return -EINVAL; |
15 | } | 16 | } |
16 | 17 | #endif | |
17 | static int ltalk_mac_addr(struct net_device *dev, void *addr) | ||
18 | { | ||
19 | return -EINVAL; | ||
20 | } | ||
21 | 18 | ||
22 | static void ltalk_setup(struct net_device *dev) | 19 | static void ltalk_setup(struct net_device *dev) |
23 | { | 20 | { |
24 | /* Fill in the fields of the device structure with localtalk-generic values. */ | 21 | /* Fill in the fields of the device structure with localtalk-generic values. */ |
25 | 22 | ||
23 | #ifdef CONFIG_COMPAT_NET_DEV_OPS | ||
26 | dev->change_mtu = ltalk_change_mtu; | 24 | dev->change_mtu = ltalk_change_mtu; |
27 | dev->set_mac_address = ltalk_mac_addr; | 25 | #endif |
28 | 26 | ||
29 | dev->type = ARPHRD_LOCALTLK; | 27 | dev->type = ARPHRD_LOCALTLK; |
30 | dev->hard_header_len = LTALK_HLEN; | 28 | dev->hard_header_len = LTALK_HLEN; |
diff --git a/net/atm/br2684.c b/net/atm/br2684.c index ea9438fc6855..334fcd4a4ea4 100644 --- a/net/atm/br2684.c +++ b/net/atm/br2684.c | |||
@@ -83,7 +83,6 @@ struct br2684_dev { | |||
83 | struct list_head br2684_devs; | 83 | struct list_head br2684_devs; |
84 | int number; | 84 | int number; |
85 | struct list_head brvccs; /* one device <=> one vcc (before xmas) */ | 85 | struct list_head brvccs; /* one device <=> one vcc (before xmas) */ |
86 | struct net_device_stats stats; | ||
87 | int mac_was_set; | 86 | int mac_was_set; |
88 | enum br2684_payload payload; | 87 | enum br2684_payload payload; |
89 | }; | 88 | }; |
@@ -148,9 +147,10 @@ static struct net_device *br2684_find_dev(const struct br2684_if_spec *s) | |||
148 | * the way for multiple vcc's per itf. Returns true if we can send, | 147 | * the way for multiple vcc's per itf. Returns true if we can send, |
149 | * otherwise false | 148 | * otherwise false |
150 | */ | 149 | */ |
151 | static int br2684_xmit_vcc(struct sk_buff *skb, struct br2684_dev *brdev, | 150 | static int br2684_xmit_vcc(struct sk_buff *skb, struct net_device *dev, |
152 | struct br2684_vcc *brvcc) | 151 | struct br2684_vcc *brvcc) |
153 | { | 152 | { |
153 | struct br2684_dev *brdev = BRPRIV(dev); | ||
154 | struct atm_vcc *atmvcc; | 154 | struct atm_vcc *atmvcc; |
155 | int minheadroom = (brvcc->encaps == e_llc) ? 10 : 2; | 155 | int minheadroom = (brvcc->encaps == e_llc) ? 10 : 2; |
156 | 156 | ||
@@ -211,8 +211,8 @@ static int br2684_xmit_vcc(struct sk_buff *skb, struct br2684_dev *brdev, | |||
211 | } | 211 | } |
212 | atomic_add(skb->truesize, &sk_atm(atmvcc)->sk_wmem_alloc); | 212 | atomic_add(skb->truesize, &sk_atm(atmvcc)->sk_wmem_alloc); |
213 | ATM_SKB(skb)->atm_options = atmvcc->atm_options; | 213 | ATM_SKB(skb)->atm_options = atmvcc->atm_options; |
214 | brdev->stats.tx_packets++; | 214 | dev->stats.tx_packets++; |
215 | brdev->stats.tx_bytes += skb->len; | 215 | dev->stats.tx_bytes += skb->len; |
216 | atmvcc->send(atmvcc, skb); | 216 | atmvcc->send(atmvcc, skb); |
217 | return 1; | 217 | return 1; |
218 | } | 218 | } |
@@ -233,14 +233,14 @@ static int br2684_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
233 | brvcc = pick_outgoing_vcc(skb, brdev); | 233 | brvcc = pick_outgoing_vcc(skb, brdev); |
234 | if (brvcc == NULL) { | 234 | if (brvcc == NULL) { |
235 | pr_debug("no vcc attached to dev %s\n", dev->name); | 235 | pr_debug("no vcc attached to dev %s\n", dev->name); |
236 | brdev->stats.tx_errors++; | 236 | dev->stats.tx_errors++; |
237 | brdev->stats.tx_carrier_errors++; | 237 | dev->stats.tx_carrier_errors++; |
238 | /* netif_stop_queue(dev); */ | 238 | /* netif_stop_queue(dev); */ |
239 | dev_kfree_skb(skb); | 239 | dev_kfree_skb(skb); |
240 | read_unlock(&devs_lock); | 240 | read_unlock(&devs_lock); |
241 | return 0; | 241 | return 0; |
242 | } | 242 | } |
243 | if (!br2684_xmit_vcc(skb, brdev, brvcc)) { | 243 | if (!br2684_xmit_vcc(skb, dev, brvcc)) { |
244 | /* | 244 | /* |
245 | * We should probably use netif_*_queue() here, but that | 245 | * We should probably use netif_*_queue() here, but that |
246 | * involves added complication. We need to walk before | 246 | * involves added complication. We need to walk before |
@@ -248,27 +248,20 @@ static int br2684_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
248 | * | 248 | * |
249 | * Don't free here! this pointer might be no longer valid! | 249 | * Don't free here! this pointer might be no longer valid! |
250 | */ | 250 | */ |
251 | brdev->stats.tx_errors++; | 251 | dev->stats.tx_errors++; |
252 | brdev->stats.tx_fifo_errors++; | 252 | dev->stats.tx_fifo_errors++; |
253 | } | 253 | } |
254 | read_unlock(&devs_lock); | 254 | read_unlock(&devs_lock); |
255 | return 0; | 255 | return 0; |
256 | } | 256 | } |
257 | 257 | ||
258 | static struct net_device_stats *br2684_get_stats(struct net_device *dev) | ||
259 | { | ||
260 | pr_debug("br2684_get_stats\n"); | ||
261 | return &BRPRIV(dev)->stats; | ||
262 | } | ||
263 | |||
264 | /* | 258 | /* |
265 | * We remember when the MAC gets set, so we don't override it later with | 259 | * We remember when the MAC gets set, so we don't override it later with |
266 | * the ESI of the ATM card of the first VC | 260 | * the ESI of the ATM card of the first VC |
267 | */ | 261 | */ |
268 | static int (*my_eth_mac_addr) (struct net_device *, void *); | ||
269 | static int br2684_mac_addr(struct net_device *dev, void *p) | 262 | static int br2684_mac_addr(struct net_device *dev, void *p) |
270 | { | 263 | { |
271 | int err = my_eth_mac_addr(dev, p); | 264 | int err = eth_mac_addr(dev, p); |
272 | if (!err) | 265 | if (!err) |
273 | BRPRIV(dev)->mac_was_set = 1; | 266 | BRPRIV(dev)->mac_was_set = 1; |
274 | return err; | 267 | return err; |
@@ -430,17 +423,17 @@ static void br2684_push(struct atm_vcc *atmvcc, struct sk_buff *skb) | |||
430 | /* sigh, interface is down? */ | 423 | /* sigh, interface is down? */ |
431 | if (unlikely(!(net_dev->flags & IFF_UP))) | 424 | if (unlikely(!(net_dev->flags & IFF_UP))) |
432 | goto dropped; | 425 | goto dropped; |
433 | brdev->stats.rx_packets++; | 426 | net_dev->stats.rx_packets++; |
434 | brdev->stats.rx_bytes += skb->len; | 427 | net_dev->stats.rx_bytes += skb->len; |
435 | memset(ATM_SKB(skb), 0, sizeof(struct atm_skb_data)); | 428 | memset(ATM_SKB(skb), 0, sizeof(struct atm_skb_data)); |
436 | netif_rx(skb); | 429 | netif_rx(skb); |
437 | return; | 430 | return; |
438 | 431 | ||
439 | dropped: | 432 | dropped: |
440 | brdev->stats.rx_dropped++; | 433 | net_dev->stats.rx_dropped++; |
441 | goto free_skb; | 434 | goto free_skb; |
442 | error: | 435 | error: |
443 | brdev->stats.rx_errors++; | 436 | net_dev->stats.rx_errors++; |
444 | free_skb: | 437 | free_skb: |
445 | dev_kfree_skb(skb); | 438 | dev_kfree_skb(skb); |
446 | return; | 439 | return; |
@@ -531,8 +524,8 @@ static int br2684_regvcc(struct atm_vcc *atmvcc, void __user * arg) | |||
531 | 524 | ||
532 | skb->next = skb->prev = NULL; | 525 | skb->next = skb->prev = NULL; |
533 | br2684_push(atmvcc, skb); | 526 | br2684_push(atmvcc, skb); |
534 | BRPRIV(skb->dev)->stats.rx_bytes -= skb->len; | 527 | skb->dev->stats.rx_bytes -= skb->len; |
535 | BRPRIV(skb->dev)->stats.rx_packets--; | 528 | skb->dev->stats.rx_packets--; |
536 | 529 | ||
537 | skb = next; | 530 | skb = next; |
538 | } | 531 | } |
@@ -544,17 +537,20 @@ static int br2684_regvcc(struct atm_vcc *atmvcc, void __user * arg) | |||
544 | return err; | 537 | return err; |
545 | } | 538 | } |
546 | 539 | ||
540 | static const struct net_device_ops br2684_netdev_ops = { | ||
541 | .ndo_start_xmit = br2684_start_xmit, | ||
542 | .ndo_set_mac_address = br2684_mac_addr, | ||
543 | .ndo_change_mtu = eth_change_mtu, | ||
544 | .ndo_validate_addr = eth_validate_addr, | ||
545 | }; | ||
546 | |||
547 | static void br2684_setup(struct net_device *netdev) | 547 | static void br2684_setup(struct net_device *netdev) |
548 | { | 548 | { |
549 | struct br2684_dev *brdev = BRPRIV(netdev); | 549 | struct br2684_dev *brdev = BRPRIV(netdev); |
550 | 550 | ||
551 | ether_setup(netdev); | 551 | ether_setup(netdev); |
552 | brdev->net_dev = netdev; | ||
553 | 552 | ||
554 | my_eth_mac_addr = netdev->set_mac_address; | 553 | netdev->netdev_ops = &br2684_netdev_ops; |
555 | netdev->set_mac_address = br2684_mac_addr; | ||
556 | netdev->hard_start_xmit = br2684_start_xmit; | ||
557 | netdev->get_stats = br2684_get_stats; | ||
558 | 554 | ||
559 | INIT_LIST_HEAD(&brdev->brvccs); | 555 | INIT_LIST_HEAD(&brdev->brvccs); |
560 | } | 556 | } |
@@ -565,10 +561,8 @@ static void br2684_setup_routed(struct net_device *netdev) | |||
565 | brdev->net_dev = netdev; | 561 | brdev->net_dev = netdev; |
566 | 562 | ||
567 | netdev->hard_header_len = 0; | 563 | netdev->hard_header_len = 0; |
568 | my_eth_mac_addr = netdev->set_mac_address; | 564 | |
569 | netdev->set_mac_address = br2684_mac_addr; | 565 | netdev->netdev_ops = &br2684_netdev_ops; |
570 | netdev->hard_start_xmit = br2684_start_xmit; | ||
571 | netdev->get_stats = br2684_get_stats; | ||
572 | netdev->addr_len = 0; | 566 | netdev->addr_len = 0; |
573 | netdev->mtu = 1500; | 567 | netdev->mtu = 1500; |
574 | netdev->type = ARPHRD_PPP; | 568 | netdev->type = ARPHRD_PPP; |
diff --git a/net/atm/clip.c b/net/atm/clip.c index 2d33a83be799..da42fd06b61f 100644 --- a/net/atm/clip.c +++ b/net/atm/clip.c | |||
@@ -214,15 +214,15 @@ static void clip_push(struct atm_vcc *vcc, struct sk_buff *skb) | |||
214 | skb->protocol = ((__be16 *) skb->data)[3]; | 214 | skb->protocol = ((__be16 *) skb->data)[3]; |
215 | skb_pull(skb, RFC1483LLC_LEN); | 215 | skb_pull(skb, RFC1483LLC_LEN); |
216 | if (skb->protocol == htons(ETH_P_ARP)) { | 216 | if (skb->protocol == htons(ETH_P_ARP)) { |
217 | PRIV(skb->dev)->stats.rx_packets++; | 217 | skb->dev->stats.rx_packets++; |
218 | PRIV(skb->dev)->stats.rx_bytes += skb->len; | 218 | skb->dev->stats.rx_bytes += skb->len; |
219 | clip_arp_rcv(skb); | 219 | clip_arp_rcv(skb); |
220 | return; | 220 | return; |
221 | } | 221 | } |
222 | } | 222 | } |
223 | clip_vcc->last_use = jiffies; | 223 | clip_vcc->last_use = jiffies; |
224 | PRIV(skb->dev)->stats.rx_packets++; | 224 | skb->dev->stats.rx_packets++; |
225 | PRIV(skb->dev)->stats.rx_bytes += skb->len; | 225 | skb->dev->stats.rx_bytes += skb->len; |
226 | memset(ATM_SKB(skb), 0, sizeof(struct atm_skb_data)); | 226 | memset(ATM_SKB(skb), 0, sizeof(struct atm_skb_data)); |
227 | netif_rx(skb); | 227 | netif_rx(skb); |
228 | } | 228 | } |
@@ -372,7 +372,7 @@ static int clip_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
372 | if (!skb->dst) { | 372 | if (!skb->dst) { |
373 | printk(KERN_ERR "clip_start_xmit: skb->dst == NULL\n"); | 373 | printk(KERN_ERR "clip_start_xmit: skb->dst == NULL\n"); |
374 | dev_kfree_skb(skb); | 374 | dev_kfree_skb(skb); |
375 | clip_priv->stats.tx_dropped++; | 375 | dev->stats.tx_dropped++; |
376 | return 0; | 376 | return 0; |
377 | } | 377 | } |
378 | if (!skb->dst->neighbour) { | 378 | if (!skb->dst->neighbour) { |
@@ -380,13 +380,13 @@ static int clip_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
380 | skb->dst->neighbour = clip_find_neighbour(skb->dst, 1); | 380 | skb->dst->neighbour = clip_find_neighbour(skb->dst, 1); |
381 | if (!skb->dst->neighbour) { | 381 | if (!skb->dst->neighbour) { |
382 | dev_kfree_skb(skb); /* lost that one */ | 382 | dev_kfree_skb(skb); /* lost that one */ |
383 | clip_priv->stats.tx_dropped++; | 383 | dev->stats.tx_dropped++; |
384 | return 0; | 384 | return 0; |
385 | } | 385 | } |
386 | #endif | 386 | #endif |
387 | printk(KERN_ERR "clip_start_xmit: NO NEIGHBOUR !\n"); | 387 | printk(KERN_ERR "clip_start_xmit: NO NEIGHBOUR !\n"); |
388 | dev_kfree_skb(skb); | 388 | dev_kfree_skb(skb); |
389 | clip_priv->stats.tx_dropped++; | 389 | dev->stats.tx_dropped++; |
390 | return 0; | 390 | return 0; |
391 | } | 391 | } |
392 | entry = NEIGH2ENTRY(skb->dst->neighbour); | 392 | entry = NEIGH2ENTRY(skb->dst->neighbour); |
@@ -400,7 +400,7 @@ static int clip_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
400 | skb_queue_tail(&entry->neigh->arp_queue, skb); | 400 | skb_queue_tail(&entry->neigh->arp_queue, skb); |
401 | else { | 401 | else { |
402 | dev_kfree_skb(skb); | 402 | dev_kfree_skb(skb); |
403 | clip_priv->stats.tx_dropped++; | 403 | dev->stats.tx_dropped++; |
404 | } | 404 | } |
405 | return 0; | 405 | return 0; |
406 | } | 406 | } |
@@ -423,8 +423,8 @@ static int clip_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
423 | printk(KERN_WARNING "clip_start_xmit: XOFF->XOFF transition\n"); | 423 | printk(KERN_WARNING "clip_start_xmit: XOFF->XOFF transition\n"); |
424 | return 0; | 424 | return 0; |
425 | } | 425 | } |
426 | clip_priv->stats.tx_packets++; | 426 | dev->stats.tx_packets++; |
427 | clip_priv->stats.tx_bytes += skb->len; | 427 | dev->stats.tx_bytes += skb->len; |
428 | vcc->send(vcc, skb); | 428 | vcc->send(vcc, skb); |
429 | if (atm_may_send(vcc, 0)) { | 429 | if (atm_may_send(vcc, 0)) { |
430 | entry->vccs->xoff = 0; | 430 | entry->vccs->xoff = 0; |
@@ -443,11 +443,6 @@ static int clip_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
443 | return 0; | 443 | return 0; |
444 | } | 444 | } |
445 | 445 | ||
446 | static struct net_device_stats *clip_get_stats(struct net_device *dev) | ||
447 | { | ||
448 | return &PRIV(dev)->stats; | ||
449 | } | ||
450 | |||
451 | static int clip_mkip(struct atm_vcc *vcc, int timeout) | 446 | static int clip_mkip(struct atm_vcc *vcc, int timeout) |
452 | { | 447 | { |
453 | struct clip_vcc *clip_vcc; | 448 | struct clip_vcc *clip_vcc; |
@@ -501,8 +496,8 @@ static int clip_mkip(struct atm_vcc *vcc, int timeout) | |||
501 | 496 | ||
502 | skb_get(skb); | 497 | skb_get(skb); |
503 | clip_push(vcc, skb); | 498 | clip_push(vcc, skb); |
504 | PRIV(skb->dev)->stats.rx_packets--; | 499 | skb->dev->stats.rx_packets--; |
505 | PRIV(skb->dev)->stats.rx_bytes -= len; | 500 | skb->dev->stats.rx_bytes -= len; |
506 | kfree_skb(skb); | 501 | kfree_skb(skb); |
507 | } | 502 | } |
508 | 503 | ||
@@ -561,7 +556,6 @@ static void clip_setup(struct net_device *dev) | |||
561 | { | 556 | { |
562 | dev->hard_start_xmit = clip_start_xmit; | 557 | dev->hard_start_xmit = clip_start_xmit; |
563 | /* sg_xmit ... */ | 558 | /* sg_xmit ... */ |
564 | dev->get_stats = clip_get_stats; | ||
565 | dev->type = ARPHRD_ATM; | 559 | dev->type = ARPHRD_ATM; |
566 | dev->hard_header_len = RFC1483LLC_LEN; | 560 | dev->hard_header_len = RFC1483LLC_LEN; |
567 | dev->mtu = RFC1626_MTU; | 561 | dev->mtu = RFC1626_MTU; |
diff --git a/net/atm/lec.c b/net/atm/lec.c index e5e301550e8a..c0cba9a037e8 100644 --- a/net/atm/lec.c +++ b/net/atm/lec.c | |||
@@ -62,7 +62,6 @@ static unsigned char bridge_ula_lec[] = { 0x01, 0x80, 0xc2, 0x00, 0x00 }; | |||
62 | static int lec_open(struct net_device *dev); | 62 | static int lec_open(struct net_device *dev); |
63 | static int lec_start_xmit(struct sk_buff *skb, struct net_device *dev); | 63 | static int lec_start_xmit(struct sk_buff *skb, struct net_device *dev); |
64 | static int lec_close(struct net_device *dev); | 64 | static int lec_close(struct net_device *dev); |
65 | static struct net_device_stats *lec_get_stats(struct net_device *dev); | ||
66 | static void lec_init(struct net_device *dev); | 65 | static void lec_init(struct net_device *dev); |
67 | static struct lec_arp_table *lec_arp_find(struct lec_priv *priv, | 66 | static struct lec_arp_table *lec_arp_find(struct lec_priv *priv, |
68 | const unsigned char *mac_addr); | 67 | const unsigned char *mac_addr); |
@@ -218,28 +217,28 @@ static unsigned char *get_tr_dst(unsigned char *packet, unsigned char *rdesc) | |||
218 | 217 | ||
219 | static int lec_open(struct net_device *dev) | 218 | static int lec_open(struct net_device *dev) |
220 | { | 219 | { |
221 | struct lec_priv *priv = netdev_priv(dev); | ||
222 | |||
223 | netif_start_queue(dev); | 220 | netif_start_queue(dev); |
224 | memset(&priv->stats, 0, sizeof(struct net_device_stats)); | 221 | memset(&dev->stats, 0, sizeof(struct net_device_stats)); |
225 | 222 | ||
226 | return 0; | 223 | return 0; |
227 | } | 224 | } |
228 | 225 | ||
229 | static __inline__ void | 226 | static void |
230 | lec_send(struct atm_vcc *vcc, struct sk_buff *skb, struct lec_priv *priv) | 227 | lec_send(struct atm_vcc *vcc, struct sk_buff *skb) |
231 | { | 228 | { |
229 | struct net_device *dev = skb->dev; | ||
230 | |||
232 | ATM_SKB(skb)->vcc = vcc; | 231 | ATM_SKB(skb)->vcc = vcc; |
233 | ATM_SKB(skb)->atm_options = vcc->atm_options; | 232 | ATM_SKB(skb)->atm_options = vcc->atm_options; |
234 | 233 | ||
235 | atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc); | 234 | atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc); |
236 | if (vcc->send(vcc, skb) < 0) { | 235 | if (vcc->send(vcc, skb) < 0) { |
237 | priv->stats.tx_dropped++; | 236 | dev->stats.tx_dropped++; |
238 | return; | 237 | return; |
239 | } | 238 | } |
240 | 239 | ||
241 | priv->stats.tx_packets++; | 240 | dev->stats.tx_packets++; |
242 | priv->stats.tx_bytes += skb->len; | 241 | dev->stats.tx_bytes += skb->len; |
243 | } | 242 | } |
244 | 243 | ||
245 | static void lec_tx_timeout(struct net_device *dev) | 244 | static void lec_tx_timeout(struct net_device *dev) |
@@ -270,7 +269,7 @@ static int lec_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
270 | pr_debug("lec_start_xmit called\n"); | 269 | pr_debug("lec_start_xmit called\n"); |
271 | if (!priv->lecd) { | 270 | if (!priv->lecd) { |
272 | printk("%s:No lecd attached\n", dev->name); | 271 | printk("%s:No lecd attached\n", dev->name); |
273 | priv->stats.tx_errors++; | 272 | dev->stats.tx_errors++; |
274 | netif_stop_queue(dev); | 273 | netif_stop_queue(dev); |
275 | return -EUNATCH; | 274 | return -EUNATCH; |
276 | } | 275 | } |
@@ -345,7 +344,7 @@ static int lec_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
345 | GFP_ATOMIC); | 344 | GFP_ATOMIC); |
346 | dev_kfree_skb(skb); | 345 | dev_kfree_skb(skb); |
347 | if (skb2 == NULL) { | 346 | if (skb2 == NULL) { |
348 | priv->stats.tx_dropped++; | 347 | dev->stats.tx_dropped++; |
349 | return 0; | 348 | return 0; |
350 | } | 349 | } |
351 | skb = skb2; | 350 | skb = skb2; |
@@ -380,7 +379,7 @@ static int lec_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
380 | ("%s:lec_start_xmit: tx queue full or no arp entry, dropping, ", | 379 | ("%s:lec_start_xmit: tx queue full or no arp entry, dropping, ", |
381 | dev->name); | 380 | dev->name); |
382 | pr_debug("MAC address %pM\n", lec_h->h_dest); | 381 | pr_debug("MAC address %pM\n", lec_h->h_dest); |
383 | priv->stats.tx_dropped++; | 382 | dev->stats.tx_dropped++; |
384 | dev_kfree_skb(skb); | 383 | dev_kfree_skb(skb); |
385 | } | 384 | } |
386 | goto out; | 385 | goto out; |
@@ -392,10 +391,10 @@ static int lec_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
392 | while (entry && (skb2 = skb_dequeue(&entry->tx_wait))) { | 391 | while (entry && (skb2 = skb_dequeue(&entry->tx_wait))) { |
393 | pr_debug("lec.c: emptying tx queue, "); | 392 | pr_debug("lec.c: emptying tx queue, "); |
394 | pr_debug("MAC address %pM\n", lec_h->h_dest); | 393 | pr_debug("MAC address %pM\n", lec_h->h_dest); |
395 | lec_send(vcc, skb2, priv); | 394 | lec_send(vcc, skb2); |
396 | } | 395 | } |
397 | 396 | ||
398 | lec_send(vcc, skb, priv); | 397 | lec_send(vcc, skb); |
399 | 398 | ||
400 | if (!atm_may_send(vcc, 0)) { | 399 | if (!atm_may_send(vcc, 0)) { |
401 | struct lec_vcc_priv *vpriv = LEC_VCC_PRIV(vcc); | 400 | struct lec_vcc_priv *vpriv = LEC_VCC_PRIV(vcc); |
@@ -427,15 +426,6 @@ static int lec_close(struct net_device *dev) | |||
427 | return 0; | 426 | return 0; |
428 | } | 427 | } |
429 | 428 | ||
430 | /* | ||
431 | * Get the current statistics. | ||
432 | * This may be called with the card open or closed. | ||
433 | */ | ||
434 | static struct net_device_stats *lec_get_stats(struct net_device *dev) | ||
435 | { | ||
436 | return &((struct lec_priv *)netdev_priv(dev))->stats; | ||
437 | } | ||
438 | |||
439 | static int lec_atm_send(struct atm_vcc *vcc, struct sk_buff *skb) | 429 | static int lec_atm_send(struct atm_vcc *vcc, struct sk_buff *skb) |
440 | { | 430 | { |
441 | unsigned long flags; | 431 | unsigned long flags; |
@@ -677,17 +667,19 @@ static void lec_set_multicast_list(struct net_device *dev) | |||
677 | return; | 667 | return; |
678 | } | 668 | } |
679 | 669 | ||
670 | static const struct net_device_ops lec_netdev_ops = { | ||
671 | .ndo_open = lec_open, | ||
672 | .ndo_stop = lec_close, | ||
673 | .ndo_start_xmit = lec_start_xmit, | ||
674 | .ndo_change_mtu = lec_change_mtu, | ||
675 | .ndo_tx_timeout = lec_tx_timeout, | ||
676 | .ndo_set_multicast_list = lec_set_multicast_list, | ||
677 | }; | ||
678 | |||
679 | |||
680 | static void lec_init(struct net_device *dev) | 680 | static void lec_init(struct net_device *dev) |
681 | { | 681 | { |
682 | dev->change_mtu = lec_change_mtu; | 682 | dev->netdev_ops = &lec_netdev_ops; |
683 | dev->open = lec_open; | ||
684 | dev->stop = lec_close; | ||
685 | dev->hard_start_xmit = lec_start_xmit; | ||
686 | dev->tx_timeout = lec_tx_timeout; | ||
687 | |||
688 | dev->get_stats = lec_get_stats; | ||
689 | dev->set_multicast_list = lec_set_multicast_list; | ||
690 | dev->do_ioctl = NULL; | ||
691 | printk("%s: Initialized!\n", dev->name); | 683 | printk("%s: Initialized!\n", dev->name); |
692 | } | 684 | } |
693 | 685 | ||
@@ -810,8 +802,8 @@ static void lec_push(struct atm_vcc *vcc, struct sk_buff *skb) | |||
810 | else | 802 | else |
811 | #endif | 803 | #endif |
812 | skb->protocol = eth_type_trans(skb, dev); | 804 | skb->protocol = eth_type_trans(skb, dev); |
813 | priv->stats.rx_packets++; | 805 | dev->stats.rx_packets++; |
814 | priv->stats.rx_bytes += skb->len; | 806 | dev->stats.rx_bytes += skb->len; |
815 | memset(ATM_SKB(skb), 0, sizeof(struct atm_skb_data)); | 807 | memset(ATM_SKB(skb), 0, sizeof(struct atm_skb_data)); |
816 | netif_rx(skb); | 808 | netif_rx(skb); |
817 | } | 809 | } |
@@ -1887,7 +1879,7 @@ restart: | |||
1887 | lec_arp_hold(entry); | 1879 | lec_arp_hold(entry); |
1888 | spin_unlock_irqrestore(&priv->lec_arp_lock, flags); | 1880 | spin_unlock_irqrestore(&priv->lec_arp_lock, flags); |
1889 | while ((skb = skb_dequeue(&entry->tx_wait)) != NULL) | 1881 | while ((skb = skb_dequeue(&entry->tx_wait)) != NULL) |
1890 | lec_send(vcc, skb, entry->priv); | 1882 | lec_send(vcc, skb); |
1891 | entry->last_used = jiffies; | 1883 | entry->last_used = jiffies; |
1892 | entry->status = ESI_FORWARD_DIRECT; | 1884 | entry->status = ESI_FORWARD_DIRECT; |
1893 | lec_arp_put(entry); | 1885 | lec_arp_put(entry); |
@@ -2305,7 +2297,7 @@ restart: | |||
2305 | lec_arp_hold(entry); | 2297 | lec_arp_hold(entry); |
2306 | spin_unlock_irqrestore(&priv->lec_arp_lock, flags); | 2298 | spin_unlock_irqrestore(&priv->lec_arp_lock, flags); |
2307 | while ((skb = skb_dequeue(&entry->tx_wait)) != NULL) | 2299 | while ((skb = skb_dequeue(&entry->tx_wait)) != NULL) |
2308 | lec_send(vcc, skb, entry->priv); | 2300 | lec_send(vcc, skb); |
2309 | entry->last_used = jiffies; | 2301 | entry->last_used = jiffies; |
2310 | entry->status = ESI_FORWARD_DIRECT; | 2302 | entry->status = ESI_FORWARD_DIRECT; |
2311 | lec_arp_put(entry); | 2303 | lec_arp_put(entry); |
diff --git a/net/atm/lec.h b/net/atm/lec.h index 0d376682c1a3..9d14d196cc1d 100644 --- a/net/atm/lec.h +++ b/net/atm/lec.h | |||
@@ -69,7 +69,6 @@ struct lane2_ops { | |||
69 | #define LEC_ARP_TABLE_SIZE 16 | 69 | #define LEC_ARP_TABLE_SIZE 16 |
70 | 70 | ||
71 | struct lec_priv { | 71 | struct lec_priv { |
72 | struct net_device_stats stats; | ||
73 | unsigned short lecid; /* Lecid of this client */ | 72 | unsigned short lecid; /* Lecid of this client */ |
74 | struct hlist_head lec_arp_empty_ones; | 73 | struct hlist_head lec_arp_empty_ones; |
75 | /* Used for storing VCC's that don't have a MAC address attached yet */ | 74 | /* Used for storing VCC's that don't have a MAC address attached yet */ |
diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c index 00d9e5e13158..d127fd3ba5c6 100644 --- a/net/ax25/af_ax25.c +++ b/net/ax25/af_ax25.c | |||
@@ -1986,7 +1986,7 @@ static const struct proto_ops ax25_proto_ops = { | |||
1986 | * Called by socket.c on kernel start up | 1986 | * Called by socket.c on kernel start up |
1987 | */ | 1987 | */ |
1988 | static struct packet_type ax25_packet_type = { | 1988 | static struct packet_type ax25_packet_type = { |
1989 | .type = __constant_htons(ETH_P_AX25), | 1989 | .type = cpu_to_be16(ETH_P_AX25), |
1990 | .dev = NULL, /* All devices */ | 1990 | .dev = NULL, /* All devices */ |
1991 | .func = ax25_kiss_rcv, | 1991 | .func = ax25_kiss_rcv, |
1992 | }; | 1992 | }; |
diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c index cf754ace0b75..3953ac4214c8 100644 --- a/net/bridge/br_netfilter.c +++ b/net/bridge/br_netfilter.c | |||
@@ -107,7 +107,7 @@ static void fake_update_pmtu(struct dst_entry *dst, u32 mtu) | |||
107 | 107 | ||
108 | static struct dst_ops fake_dst_ops = { | 108 | static struct dst_ops fake_dst_ops = { |
109 | .family = AF_INET, | 109 | .family = AF_INET, |
110 | .protocol = __constant_htons(ETH_P_IP), | 110 | .protocol = cpu_to_be16(ETH_P_IP), |
111 | .update_pmtu = fake_update_pmtu, | 111 | .update_pmtu = fake_update_pmtu, |
112 | .entries = ATOMIC_INIT(0), | 112 | .entries = ATOMIC_INIT(0), |
113 | }; | 113 | }; |
diff --git a/net/can/af_can.c b/net/can/af_can.c index fa417ca6cbe6..d90e8dd975fc 100644 --- a/net/can/af_can.c +++ b/net/can/af_can.c | |||
@@ -828,7 +828,7 @@ static int can_notifier(struct notifier_block *nb, unsigned long msg, | |||
828 | */ | 828 | */ |
829 | 829 | ||
830 | static struct packet_type can_packet __read_mostly = { | 830 | static struct packet_type can_packet __read_mostly = { |
831 | .type = __constant_htons(ETH_P_CAN), | 831 | .type = cpu_to_be16(ETH_P_CAN), |
832 | .dev = NULL, | 832 | .dev = NULL, |
833 | .func = can_rcv, | 833 | .func = can_rcv, |
834 | }; | 834 | }; |
diff --git a/net/core/dev.c b/net/core/dev.c index 5379b0c1190a..220f52a1001e 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -135,6 +135,14 @@ | |||
135 | /* This should be increased if a protocol with a bigger head is added. */ | 135 | /* This should be increased if a protocol with a bigger head is added. */ |
136 | #define GRO_MAX_HEAD (MAX_HEADER + 128) | 136 | #define GRO_MAX_HEAD (MAX_HEADER + 128) |
137 | 137 | ||
138 | enum { | ||
139 | GRO_MERGED, | ||
140 | GRO_MERGED_FREE, | ||
141 | GRO_HELD, | ||
142 | GRO_NORMAL, | ||
143 | GRO_DROP, | ||
144 | }; | ||
145 | |||
138 | /* | 146 | /* |
139 | * The list of packet types we will receive (as opposed to discard) | 147 | * The list of packet types we will receive (as opposed to discard) |
140 | * and the routines to invoke. | 148 | * and the routines to invoke. |
@@ -207,6 +215,13 @@ static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex) | |||
207 | return &net->dev_index_head[ifindex & ((1 << NETDEV_HASHBITS) - 1)]; | 215 | return &net->dev_index_head[ifindex & ((1 << NETDEV_HASHBITS) - 1)]; |
208 | } | 216 | } |
209 | 217 | ||
218 | static inline void *skb_gro_mac_header(struct sk_buff *skb) | ||
219 | { | ||
220 | return skb_mac_header(skb) < skb->data ? skb_mac_header(skb) : | ||
221 | page_address(skb_shinfo(skb)->frags[0].page) + | ||
222 | skb_shinfo(skb)->frags[0].page_offset; | ||
223 | } | ||
224 | |||
210 | /* Device list insertion */ | 225 | /* Device list insertion */ |
211 | static int list_netdevice(struct net_device *dev) | 226 | static int list_netdevice(struct net_device *dev) |
212 | { | 227 | { |
@@ -1708,56 +1723,26 @@ out_kfree_skb: | |||
1708 | return 0; | 1723 | return 0; |
1709 | } | 1724 | } |
1710 | 1725 | ||
1711 | static u32 simple_tx_hashrnd; | 1726 | static u32 skb_tx_hashrnd; |
1712 | static int simple_tx_hashrnd_initialized = 0; | 1727 | static int skb_tx_hashrnd_initialized = 0; |
1713 | 1728 | ||
1714 | static u16 simple_tx_hash(struct net_device *dev, struct sk_buff *skb) | 1729 | static u16 skb_tx_hash(struct net_device *dev, struct sk_buff *skb) |
1715 | { | 1730 | { |
1716 | u32 addr1, addr2, ports; | 1731 | u32 hash; |
1717 | u32 hash, ihl; | ||
1718 | u8 ip_proto = 0; | ||
1719 | 1732 | ||
1720 | if (unlikely(!simple_tx_hashrnd_initialized)) { | 1733 | if (unlikely(!skb_tx_hashrnd_initialized)) { |
1721 | get_random_bytes(&simple_tx_hashrnd, 4); | 1734 | get_random_bytes(&skb_tx_hashrnd, 4); |
1722 | simple_tx_hashrnd_initialized = 1; | 1735 | skb_tx_hashrnd_initialized = 1; |
1723 | } | 1736 | } |
1724 | 1737 | ||
1725 | switch (skb->protocol) { | 1738 | if (skb_rx_queue_recorded(skb)) { |
1726 | case htons(ETH_P_IP): | 1739 | hash = skb_get_rx_queue(skb); |
1727 | if (!(ip_hdr(skb)->frag_off & htons(IP_MF | IP_OFFSET))) | 1740 | } else if (skb->sk && skb->sk->sk_hash) { |
1728 | ip_proto = ip_hdr(skb)->protocol; | 1741 | hash = skb->sk->sk_hash; |
1729 | addr1 = ip_hdr(skb)->saddr; | 1742 | } else |
1730 | addr2 = ip_hdr(skb)->daddr; | 1743 | hash = skb->protocol; |
1731 | ihl = ip_hdr(skb)->ihl; | ||
1732 | break; | ||
1733 | case htons(ETH_P_IPV6): | ||
1734 | ip_proto = ipv6_hdr(skb)->nexthdr; | ||
1735 | addr1 = ipv6_hdr(skb)->saddr.s6_addr32[3]; | ||
1736 | addr2 = ipv6_hdr(skb)->daddr.s6_addr32[3]; | ||
1737 | ihl = (40 >> 2); | ||
1738 | break; | ||
1739 | default: | ||
1740 | return 0; | ||
1741 | } | ||
1742 | |||
1743 | |||
1744 | switch (ip_proto) { | ||
1745 | case IPPROTO_TCP: | ||
1746 | case IPPROTO_UDP: | ||
1747 | case IPPROTO_DCCP: | ||
1748 | case IPPROTO_ESP: | ||
1749 | case IPPROTO_AH: | ||
1750 | case IPPROTO_SCTP: | ||
1751 | case IPPROTO_UDPLITE: | ||
1752 | ports = *((u32 *) (skb_network_header(skb) + (ihl * 4))); | ||
1753 | break; | ||
1754 | |||
1755 | default: | ||
1756 | ports = 0; | ||
1757 | break; | ||
1758 | } | ||
1759 | 1744 | ||
1760 | hash = jhash_3words(addr1, addr2, ports, simple_tx_hashrnd); | 1745 | hash = jhash_1word(hash, skb_tx_hashrnd); |
1761 | 1746 | ||
1762 | return (u16) (((u64) hash * dev->real_num_tx_queues) >> 32); | 1747 | return (u16) (((u64) hash * dev->real_num_tx_queues) >> 32); |
1763 | } | 1748 | } |
@@ -1771,7 +1756,7 @@ static struct netdev_queue *dev_pick_tx(struct net_device *dev, | |||
1771 | if (ops->ndo_select_queue) | 1756 | if (ops->ndo_select_queue) |
1772 | queue_index = ops->ndo_select_queue(dev, skb); | 1757 | queue_index = ops->ndo_select_queue(dev, skb); |
1773 | else if (dev->real_num_tx_queues > 1) | 1758 | else if (dev->real_num_tx_queues > 1) |
1774 | queue_index = simple_tx_hash(dev, skb); | 1759 | queue_index = skb_tx_hash(dev, skb); |
1775 | 1760 | ||
1776 | skb_set_queue_mapping(skb, queue_index); | 1761 | skb_set_queue_mapping(skb, queue_index); |
1777 | return netdev_get_tx_queue(dev, queue_index); | 1762 | return netdev_get_tx_queue(dev, queue_index); |
@@ -2372,7 +2357,6 @@ static int napi_gro_complete(struct sk_buff *skb) | |||
2372 | 2357 | ||
2373 | out: | 2358 | out: |
2374 | skb_shinfo(skb)->gso_size = 0; | 2359 | skb_shinfo(skb)->gso_size = 0; |
2375 | __skb_push(skb, -skb_network_offset(skb)); | ||
2376 | return netif_receive_skb(skb); | 2360 | return netif_receive_skb(skb); |
2377 | } | 2361 | } |
2378 | 2362 | ||
@@ -2390,6 +2374,25 @@ void napi_gro_flush(struct napi_struct *napi) | |||
2390 | } | 2374 | } |
2391 | EXPORT_SYMBOL(napi_gro_flush); | 2375 | EXPORT_SYMBOL(napi_gro_flush); |
2392 | 2376 | ||
2377 | void *skb_gro_header(struct sk_buff *skb, unsigned int hlen) | ||
2378 | { | ||
2379 | unsigned int offset = skb_gro_offset(skb); | ||
2380 | |||
2381 | hlen += offset; | ||
2382 | if (hlen <= skb_headlen(skb)) | ||
2383 | return skb->data + offset; | ||
2384 | |||
2385 | if (unlikely(!skb_shinfo(skb)->nr_frags || | ||
2386 | skb_shinfo(skb)->frags[0].size <= | ||
2387 | hlen - skb_headlen(skb) || | ||
2388 | PageHighMem(skb_shinfo(skb)->frags[0].page))) | ||
2389 | return pskb_may_pull(skb, hlen) ? skb->data + offset : NULL; | ||
2390 | |||
2391 | return page_address(skb_shinfo(skb)->frags[0].page) + | ||
2392 | skb_shinfo(skb)->frags[0].page_offset + offset; | ||
2393 | } | ||
2394 | EXPORT_SYMBOL(skb_gro_header); | ||
2395 | |||
2393 | int dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb) | 2396 | int dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb) |
2394 | { | 2397 | { |
2395 | struct sk_buff **pp = NULL; | 2398 | struct sk_buff **pp = NULL; |
@@ -2399,7 +2402,7 @@ int dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb) | |||
2399 | int count = 0; | 2402 | int count = 0; |
2400 | int same_flow; | 2403 | int same_flow; |
2401 | int mac_len; | 2404 | int mac_len; |
2402 | int free; | 2405 | int ret; |
2403 | 2406 | ||
2404 | if (!(skb->dev->features & NETIF_F_GRO)) | 2407 | if (!(skb->dev->features & NETIF_F_GRO)) |
2405 | goto normal; | 2408 | goto normal; |
@@ -2410,11 +2413,13 @@ int dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb) | |||
2410 | rcu_read_lock(); | 2413 | rcu_read_lock(); |
2411 | list_for_each_entry_rcu(ptype, head, list) { | 2414 | list_for_each_entry_rcu(ptype, head, list) { |
2412 | struct sk_buff *p; | 2415 | struct sk_buff *p; |
2416 | void *mac; | ||
2413 | 2417 | ||
2414 | if (ptype->type != type || ptype->dev || !ptype->gro_receive) | 2418 | if (ptype->type != type || ptype->dev || !ptype->gro_receive) |
2415 | continue; | 2419 | continue; |
2416 | 2420 | ||
2417 | skb_reset_network_header(skb); | 2421 | skb_set_network_header(skb, skb_gro_offset(skb)); |
2422 | mac = skb_gro_mac_header(skb); | ||
2418 | mac_len = skb->network_header - skb->mac_header; | 2423 | mac_len = skb->network_header - skb->mac_header; |
2419 | skb->mac_len = mac_len; | 2424 | skb->mac_len = mac_len; |
2420 | NAPI_GRO_CB(skb)->same_flow = 0; | 2425 | NAPI_GRO_CB(skb)->same_flow = 0; |
@@ -2428,8 +2433,7 @@ int dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb) | |||
2428 | continue; | 2433 | continue; |
2429 | 2434 | ||
2430 | if (p->mac_len != mac_len || | 2435 | if (p->mac_len != mac_len || |
2431 | memcmp(skb_mac_header(p), skb_mac_header(skb), | 2436 | memcmp(skb_mac_header(p), mac, mac_len)) |
2432 | mac_len)) | ||
2433 | NAPI_GRO_CB(p)->same_flow = 0; | 2437 | NAPI_GRO_CB(p)->same_flow = 0; |
2434 | } | 2438 | } |
2435 | 2439 | ||
@@ -2442,7 +2446,7 @@ int dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb) | |||
2442 | goto normal; | 2446 | goto normal; |
2443 | 2447 | ||
2444 | same_flow = NAPI_GRO_CB(skb)->same_flow; | 2448 | same_flow = NAPI_GRO_CB(skb)->same_flow; |
2445 | free = NAPI_GRO_CB(skb)->free; | 2449 | ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED; |
2446 | 2450 | ||
2447 | if (pp) { | 2451 | if (pp) { |
2448 | struct sk_buff *nskb = *pp; | 2452 | struct sk_buff *nskb = *pp; |
@@ -2456,21 +2460,28 @@ int dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb) | |||
2456 | if (same_flow) | 2460 | if (same_flow) |
2457 | goto ok; | 2461 | goto ok; |
2458 | 2462 | ||
2459 | if (NAPI_GRO_CB(skb)->flush || count >= MAX_GRO_SKBS) { | 2463 | if (NAPI_GRO_CB(skb)->flush || count >= MAX_GRO_SKBS) |
2460 | __skb_push(skb, -skb_network_offset(skb)); | ||
2461 | goto normal; | 2464 | goto normal; |
2462 | } | ||
2463 | 2465 | ||
2464 | NAPI_GRO_CB(skb)->count = 1; | 2466 | NAPI_GRO_CB(skb)->count = 1; |
2465 | skb_shinfo(skb)->gso_size = skb->len; | 2467 | skb_shinfo(skb)->gso_size = skb_gro_len(skb); |
2466 | skb->next = napi->gro_list; | 2468 | skb->next = napi->gro_list; |
2467 | napi->gro_list = skb; | 2469 | napi->gro_list = skb; |
2470 | ret = GRO_HELD; | ||
2471 | |||
2472 | pull: | ||
2473 | if (unlikely(!pskb_may_pull(skb, skb_gro_offset(skb)))) { | ||
2474 | if (napi->gro_list == skb) | ||
2475 | napi->gro_list = skb->next; | ||
2476 | ret = GRO_DROP; | ||
2477 | } | ||
2468 | 2478 | ||
2469 | ok: | 2479 | ok: |
2470 | return free; | 2480 | return ret; |
2471 | 2481 | ||
2472 | normal: | 2482 | normal: |
2473 | return -1; | 2483 | ret = GRO_NORMAL; |
2484 | goto pull; | ||
2474 | } | 2485 | } |
2475 | EXPORT_SYMBOL(dev_gro_receive); | 2486 | EXPORT_SYMBOL(dev_gro_receive); |
2476 | 2487 | ||
@@ -2486,18 +2497,32 @@ static int __napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb) | |||
2486 | return dev_gro_receive(napi, skb); | 2497 | return dev_gro_receive(napi, skb); |
2487 | } | 2498 | } |
2488 | 2499 | ||
2489 | int napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb) | 2500 | int napi_skb_finish(int ret, struct sk_buff *skb) |
2490 | { | 2501 | { |
2491 | switch (__napi_gro_receive(napi, skb)) { | 2502 | int err = NET_RX_SUCCESS; |
2492 | case -1: | 2503 | |
2504 | switch (ret) { | ||
2505 | case GRO_NORMAL: | ||
2493 | return netif_receive_skb(skb); | 2506 | return netif_receive_skb(skb); |
2494 | 2507 | ||
2495 | case 1: | 2508 | case GRO_DROP: |
2509 | err = NET_RX_DROP; | ||
2510 | /* fall through */ | ||
2511 | |||
2512 | case GRO_MERGED_FREE: | ||
2496 | kfree_skb(skb); | 2513 | kfree_skb(skb); |
2497 | break; | 2514 | break; |
2498 | } | 2515 | } |
2499 | 2516 | ||
2500 | return NET_RX_SUCCESS; | 2517 | return err; |
2518 | } | ||
2519 | EXPORT_SYMBOL(napi_skb_finish); | ||
2520 | |||
2521 | int napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb) | ||
2522 | { | ||
2523 | skb_gro_reset_offset(skb); | ||
2524 | |||
2525 | return napi_skb_finish(__napi_gro_receive(napi, skb), skb); | ||
2501 | } | 2526 | } |
2502 | EXPORT_SYMBOL(napi_gro_receive); | 2527 | EXPORT_SYMBOL(napi_gro_receive); |
2503 | 2528 | ||
@@ -2515,6 +2540,9 @@ struct sk_buff *napi_fraginfo_skb(struct napi_struct *napi, | |||
2515 | { | 2540 | { |
2516 | struct net_device *dev = napi->dev; | 2541 | struct net_device *dev = napi->dev; |
2517 | struct sk_buff *skb = napi->skb; | 2542 | struct sk_buff *skb = napi->skb; |
2543 | struct ethhdr *eth; | ||
2544 | skb_frag_t *frag; | ||
2545 | int i; | ||
2518 | 2546 | ||
2519 | napi->skb = NULL; | 2547 | napi->skb = NULL; |
2520 | 2548 | ||
@@ -2527,20 +2555,36 @@ struct sk_buff *napi_fraginfo_skb(struct napi_struct *napi, | |||
2527 | } | 2555 | } |
2528 | 2556 | ||
2529 | BUG_ON(info->nr_frags > MAX_SKB_FRAGS); | 2557 | BUG_ON(info->nr_frags > MAX_SKB_FRAGS); |
2558 | frag = &info->frags[info->nr_frags - 1]; | ||
2559 | |||
2560 | for (i = skb_shinfo(skb)->nr_frags; i < info->nr_frags; i++) { | ||
2561 | skb_fill_page_desc(skb, i, frag->page, frag->page_offset, | ||
2562 | frag->size); | ||
2563 | frag++; | ||
2564 | } | ||
2530 | skb_shinfo(skb)->nr_frags = info->nr_frags; | 2565 | skb_shinfo(skb)->nr_frags = info->nr_frags; |
2531 | memcpy(skb_shinfo(skb)->frags, info->frags, sizeof(info->frags)); | ||
2532 | 2566 | ||
2533 | skb->data_len = info->len; | 2567 | skb->data_len = info->len; |
2534 | skb->len += info->len; | 2568 | skb->len += info->len; |
2535 | skb->truesize += info->len; | 2569 | skb->truesize += info->len; |
2536 | 2570 | ||
2537 | if (!pskb_may_pull(skb, ETH_HLEN)) { | 2571 | skb_reset_mac_header(skb); |
2572 | skb_gro_reset_offset(skb); | ||
2573 | |||
2574 | eth = skb_gro_header(skb, sizeof(*eth)); | ||
2575 | if (!eth) { | ||
2538 | napi_reuse_skb(napi, skb); | 2576 | napi_reuse_skb(napi, skb); |
2539 | skb = NULL; | 2577 | skb = NULL; |
2540 | goto out; | 2578 | goto out; |
2541 | } | 2579 | } |
2542 | 2580 | ||
2543 | skb->protocol = eth_type_trans(skb, dev); | 2581 | skb_gro_pull(skb, sizeof(*eth)); |
2582 | |||
2583 | /* | ||
2584 | * This works because the only protocols we care about don't require | ||
2585 | * special handling. We'll fix it up properly at the end. | ||
2586 | */ | ||
2587 | skb->protocol = eth->h_proto; | ||
2544 | 2588 | ||
2545 | skb->ip_summed = info->ip_summed; | 2589 | skb->ip_summed = info->ip_summed; |
2546 | skb->csum = info->csum; | 2590 | skb->csum = info->csum; |
@@ -2550,29 +2594,43 @@ out: | |||
2550 | } | 2594 | } |
2551 | EXPORT_SYMBOL(napi_fraginfo_skb); | 2595 | EXPORT_SYMBOL(napi_fraginfo_skb); |
2552 | 2596 | ||
2553 | int napi_gro_frags(struct napi_struct *napi, struct napi_gro_fraginfo *info) | 2597 | int napi_frags_finish(struct napi_struct *napi, struct sk_buff *skb, int ret) |
2554 | { | 2598 | { |
2555 | struct sk_buff *skb = napi_fraginfo_skb(napi, info); | 2599 | int err = NET_RX_SUCCESS; |
2556 | int err = NET_RX_DROP; | ||
2557 | 2600 | ||
2558 | if (!skb) | 2601 | switch (ret) { |
2559 | goto out; | 2602 | case GRO_NORMAL: |
2603 | case GRO_HELD: | ||
2604 | skb->protocol = eth_type_trans(skb, napi->dev); | ||
2560 | 2605 | ||
2561 | err = NET_RX_SUCCESS; | 2606 | if (ret == GRO_NORMAL) |
2607 | return netif_receive_skb(skb); | ||
2562 | 2608 | ||
2563 | switch (__napi_gro_receive(napi, skb)) { | 2609 | skb_gro_pull(skb, -ETH_HLEN); |
2564 | case -1: | 2610 | break; |
2565 | return netif_receive_skb(skb); | ||
2566 | 2611 | ||
2567 | case 0: | 2612 | case GRO_DROP: |
2568 | goto out; | 2613 | err = NET_RX_DROP; |
2569 | } | 2614 | /* fall through */ |
2570 | 2615 | ||
2571 | napi_reuse_skb(napi, skb); | 2616 | case GRO_MERGED_FREE: |
2617 | napi_reuse_skb(napi, skb); | ||
2618 | break; | ||
2619 | } | ||
2572 | 2620 | ||
2573 | out: | ||
2574 | return err; | 2621 | return err; |
2575 | } | 2622 | } |
2623 | EXPORT_SYMBOL(napi_frags_finish); | ||
2624 | |||
2625 | int napi_gro_frags(struct napi_struct *napi, struct napi_gro_fraginfo *info) | ||
2626 | { | ||
2627 | struct sk_buff *skb = napi_fraginfo_skb(napi, info); | ||
2628 | |||
2629 | if (!skb) | ||
2630 | return NET_RX_DROP; | ||
2631 | |||
2632 | return napi_frags_finish(napi, skb, __napi_gro_receive(napi, skb)); | ||
2633 | } | ||
2576 | EXPORT_SYMBOL(napi_gro_frags); | 2634 | EXPORT_SYMBOL(napi_gro_frags); |
2577 | 2635 | ||
2578 | static int process_backlog(struct napi_struct *napi, int quota) | 2636 | static int process_backlog(struct napi_struct *napi, int quota) |
diff --git a/net/core/skbuff.c b/net/core/skbuff.c index da74b844f4ea..e55d1ef5690d 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c | |||
@@ -1333,14 +1333,39 @@ static void sock_spd_release(struct splice_pipe_desc *spd, unsigned int i) | |||
1333 | put_page(spd->pages[i]); | 1333 | put_page(spd->pages[i]); |
1334 | } | 1334 | } |
1335 | 1335 | ||
1336 | static inline struct page *linear_to_page(struct page *page, unsigned int len, | 1336 | static inline struct page *linear_to_page(struct page *page, unsigned int *len, |
1337 | unsigned int offset) | 1337 | unsigned int *offset, |
1338 | { | 1338 | struct sk_buff *skb) |
1339 | struct page *p = alloc_pages(GFP_KERNEL, 0); | 1339 | { |
1340 | struct sock *sk = skb->sk; | ||
1341 | struct page *p = sk->sk_sndmsg_page; | ||
1342 | unsigned int off; | ||
1343 | |||
1344 | if (!p) { | ||
1345 | new_page: | ||
1346 | p = sk->sk_sndmsg_page = alloc_pages(sk->sk_allocation, 0); | ||
1347 | if (!p) | ||
1348 | return NULL; | ||
1340 | 1349 | ||
1341 | if (!p) | 1350 | off = sk->sk_sndmsg_off = 0; |
1342 | return NULL; | 1351 | /* hold one ref to this page until it's full */ |
1343 | memcpy(page_address(p) + offset, page_address(page) + offset, len); | 1352 | } else { |
1353 | unsigned int mlen; | ||
1354 | |||
1355 | off = sk->sk_sndmsg_off; | ||
1356 | mlen = PAGE_SIZE - off; | ||
1357 | if (mlen < 64 && mlen < *len) { | ||
1358 | put_page(p); | ||
1359 | goto new_page; | ||
1360 | } | ||
1361 | |||
1362 | *len = min_t(unsigned int, *len, mlen); | ||
1363 | } | ||
1364 | |||
1365 | memcpy(page_address(p) + off, page_address(page) + *offset, *len); | ||
1366 | sk->sk_sndmsg_off += *len; | ||
1367 | *offset = off; | ||
1368 | get_page(p); | ||
1344 | 1369 | ||
1345 | return p; | 1370 | return p; |
1346 | } | 1371 | } |
@@ -1349,21 +1374,21 @@ static inline struct page *linear_to_page(struct page *page, unsigned int len, | |||
1349 | * Fill page/offset/length into spd, if it can hold more pages. | 1374 | * Fill page/offset/length into spd, if it can hold more pages. |
1350 | */ | 1375 | */ |
1351 | static inline int spd_fill_page(struct splice_pipe_desc *spd, struct page *page, | 1376 | static inline int spd_fill_page(struct splice_pipe_desc *spd, struct page *page, |
1352 | unsigned int len, unsigned int offset, | 1377 | unsigned int *len, unsigned int offset, |
1353 | struct sk_buff *skb, int linear) | 1378 | struct sk_buff *skb, int linear) |
1354 | { | 1379 | { |
1355 | if (unlikely(spd->nr_pages == PIPE_BUFFERS)) | 1380 | if (unlikely(spd->nr_pages == PIPE_BUFFERS)) |
1356 | return 1; | 1381 | return 1; |
1357 | 1382 | ||
1358 | if (linear) { | 1383 | if (linear) { |
1359 | page = linear_to_page(page, len, offset); | 1384 | page = linear_to_page(page, len, &offset, skb); |
1360 | if (!page) | 1385 | if (!page) |
1361 | return 1; | 1386 | return 1; |
1362 | } else | 1387 | } else |
1363 | get_page(page); | 1388 | get_page(page); |
1364 | 1389 | ||
1365 | spd->pages[spd->nr_pages] = page; | 1390 | spd->pages[spd->nr_pages] = page; |
1366 | spd->partial[spd->nr_pages].len = len; | 1391 | spd->partial[spd->nr_pages].len = *len; |
1367 | spd->partial[spd->nr_pages].offset = offset; | 1392 | spd->partial[spd->nr_pages].offset = offset; |
1368 | spd->nr_pages++; | 1393 | spd->nr_pages++; |
1369 | 1394 | ||
@@ -1405,7 +1430,7 @@ static inline int __splice_segment(struct page *page, unsigned int poff, | |||
1405 | /* the linear region may spread across several pages */ | 1430 | /* the linear region may spread across several pages */ |
1406 | flen = min_t(unsigned int, flen, PAGE_SIZE - poff); | 1431 | flen = min_t(unsigned int, flen, PAGE_SIZE - poff); |
1407 | 1432 | ||
1408 | if (spd_fill_page(spd, page, flen, poff, skb, linear)) | 1433 | if (spd_fill_page(spd, page, &flen, poff, skb, linear)) |
1409 | return 1; | 1434 | return 1; |
1410 | 1435 | ||
1411 | __segment_seek(&page, &poff, &plen, flen); | 1436 | __segment_seek(&page, &poff, &plen, flen); |
@@ -2585,17 +2610,23 @@ int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb) | |||
2585 | struct sk_buff *p = *head; | 2610 | struct sk_buff *p = *head; |
2586 | struct sk_buff *nskb; | 2611 | struct sk_buff *nskb; |
2587 | unsigned int headroom; | 2612 | unsigned int headroom; |
2588 | unsigned int hlen = p->data - skb_mac_header(p); | 2613 | unsigned int len = skb_gro_len(skb); |
2589 | unsigned int len = skb->len; | ||
2590 | 2614 | ||
2591 | if (hlen + p->len + len >= 65536) | 2615 | if (p->len + len >= 65536) |
2592 | return -E2BIG; | 2616 | return -E2BIG; |
2593 | 2617 | ||
2594 | if (skb_shinfo(p)->frag_list) | 2618 | if (skb_shinfo(p)->frag_list) |
2595 | goto merge; | 2619 | goto merge; |
2596 | else if (!skb_headlen(p) && !skb_headlen(skb) && | 2620 | else if (skb_headlen(skb) <= skb_gro_offset(skb)) { |
2597 | skb_shinfo(p)->nr_frags + skb_shinfo(skb)->nr_frags < | 2621 | if (skb_shinfo(p)->nr_frags + skb_shinfo(skb)->nr_frags > |
2598 | MAX_SKB_FRAGS) { | 2622 | MAX_SKB_FRAGS) |
2623 | return -E2BIG; | ||
2624 | |||
2625 | skb_shinfo(skb)->frags[0].page_offset += | ||
2626 | skb_gro_offset(skb) - skb_headlen(skb); | ||
2627 | skb_shinfo(skb)->frags[0].size -= | ||
2628 | skb_gro_offset(skb) - skb_headlen(skb); | ||
2629 | |||
2599 | memcpy(skb_shinfo(p)->frags + skb_shinfo(p)->nr_frags, | 2630 | memcpy(skb_shinfo(p)->frags + skb_shinfo(p)->nr_frags, |
2600 | skb_shinfo(skb)->frags, | 2631 | skb_shinfo(skb)->frags, |
2601 | skb_shinfo(skb)->nr_frags * sizeof(skb_frag_t)); | 2632 | skb_shinfo(skb)->nr_frags * sizeof(skb_frag_t)); |
@@ -2612,7 +2643,7 @@ int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb) | |||
2612 | } | 2643 | } |
2613 | 2644 | ||
2614 | headroom = skb_headroom(p); | 2645 | headroom = skb_headroom(p); |
2615 | nskb = netdev_alloc_skb(p->dev, headroom); | 2646 | nskb = netdev_alloc_skb(p->dev, headroom + skb_gro_offset(p)); |
2616 | if (unlikely(!nskb)) | 2647 | if (unlikely(!nskb)) |
2617 | return -ENOMEM; | 2648 | return -ENOMEM; |
2618 | 2649 | ||
@@ -2620,12 +2651,15 @@ int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb) | |||
2620 | nskb->mac_len = p->mac_len; | 2651 | nskb->mac_len = p->mac_len; |
2621 | 2652 | ||
2622 | skb_reserve(nskb, headroom); | 2653 | skb_reserve(nskb, headroom); |
2654 | __skb_put(nskb, skb_gro_offset(p)); | ||
2623 | 2655 | ||
2624 | skb_set_mac_header(nskb, -hlen); | 2656 | skb_set_mac_header(nskb, skb_mac_header(p) - p->data); |
2625 | skb_set_network_header(nskb, skb_network_offset(p)); | 2657 | skb_set_network_header(nskb, skb_network_offset(p)); |
2626 | skb_set_transport_header(nskb, skb_transport_offset(p)); | 2658 | skb_set_transport_header(nskb, skb_transport_offset(p)); |
2627 | 2659 | ||
2628 | memcpy(skb_mac_header(nskb), skb_mac_header(p), hlen); | 2660 | __skb_pull(p, skb_gro_offset(p)); |
2661 | memcpy(skb_mac_header(nskb), skb_mac_header(p), | ||
2662 | p->data - skb_mac_header(p)); | ||
2629 | 2663 | ||
2630 | *NAPI_GRO_CB(nskb) = *NAPI_GRO_CB(p); | 2664 | *NAPI_GRO_CB(nskb) = *NAPI_GRO_CB(p); |
2631 | skb_shinfo(nskb)->frag_list = p; | 2665 | skb_shinfo(nskb)->frag_list = p; |
diff --git a/net/dccp/dccp.h b/net/dccp/dccp.h index f2230fc168e1..08a569ff02d1 100644 --- a/net/dccp/dccp.h +++ b/net/dccp/dccp.h | |||
@@ -42,9 +42,11 @@ | |||
42 | extern int dccp_debug; | 42 | extern int dccp_debug; |
43 | #define dccp_pr_debug(format, a...) DCCP_PR_DEBUG(dccp_debug, format, ##a) | 43 | #define dccp_pr_debug(format, a...) DCCP_PR_DEBUG(dccp_debug, format, ##a) |
44 | #define dccp_pr_debug_cat(format, a...) DCCP_PRINTK(dccp_debug, format, ##a) | 44 | #define dccp_pr_debug_cat(format, a...) DCCP_PRINTK(dccp_debug, format, ##a) |
45 | #define dccp_debug(fmt, a...) dccp_pr_debug_cat(KERN_DEBUG fmt, ##a) | ||
45 | #else | 46 | #else |
46 | #define dccp_pr_debug(format, a...) | 47 | #define dccp_pr_debug(format, a...) |
47 | #define dccp_pr_debug_cat(format, a...) | 48 | #define dccp_pr_debug_cat(format, a...) |
49 | #define dccp_debug(format, a...) | ||
48 | #endif | 50 | #endif |
49 | 51 | ||
50 | extern struct inet_hashinfo dccp_hashinfo; | 52 | extern struct inet_hashinfo dccp_hashinfo; |
@@ -95,9 +97,6 @@ extern void dccp_time_wait(struct sock *sk, int state, int timeo); | |||
95 | extern int sysctl_dccp_request_retries; | 97 | extern int sysctl_dccp_request_retries; |
96 | extern int sysctl_dccp_retries1; | 98 | extern int sysctl_dccp_retries1; |
97 | extern int sysctl_dccp_retries2; | 99 | extern int sysctl_dccp_retries2; |
98 | extern int sysctl_dccp_feat_sequence_window; | ||
99 | extern int sysctl_dccp_feat_rx_ccid; | ||
100 | extern int sysctl_dccp_feat_tx_ccid; | ||
101 | extern int sysctl_dccp_tx_qlen; | 100 | extern int sysctl_dccp_tx_qlen; |
102 | extern int sysctl_dccp_sync_ratelimit; | 101 | extern int sysctl_dccp_sync_ratelimit; |
103 | 102 | ||
@@ -409,23 +408,21 @@ static inline void dccp_hdr_set_ack(struct dccp_hdr_ack_bits *dhack, | |||
409 | static inline void dccp_update_gsr(struct sock *sk, u64 seq) | 408 | static inline void dccp_update_gsr(struct sock *sk, u64 seq) |
410 | { | 409 | { |
411 | struct dccp_sock *dp = dccp_sk(sk); | 410 | struct dccp_sock *dp = dccp_sk(sk); |
412 | const struct dccp_minisock *dmsk = dccp_msk(sk); | ||
413 | 411 | ||
414 | dp->dccps_gsr = seq; | 412 | dp->dccps_gsr = seq; |
415 | dccp_set_seqno(&dp->dccps_swl, | 413 | /* Sequence validity window depends on remote Sequence Window (7.5.1) */ |
416 | dp->dccps_gsr + 1 - (dmsk->dccpms_sequence_window / 4)); | 414 | dp->dccps_swl = SUB48(ADD48(dp->dccps_gsr, 1), dp->dccps_r_seq_win / 4); |
417 | dccp_set_seqno(&dp->dccps_swh, | 415 | dp->dccps_swh = ADD48(dp->dccps_gsr, (3 * dp->dccps_r_seq_win) / 4); |
418 | dp->dccps_gsr + (3 * dmsk->dccpms_sequence_window) / 4); | ||
419 | } | 416 | } |
420 | 417 | ||
421 | static inline void dccp_update_gss(struct sock *sk, u64 seq) | 418 | static inline void dccp_update_gss(struct sock *sk, u64 seq) |
422 | { | 419 | { |
423 | struct dccp_sock *dp = dccp_sk(sk); | 420 | struct dccp_sock *dp = dccp_sk(sk); |
424 | 421 | ||
425 | dp->dccps_awh = dp->dccps_gss = seq; | 422 | dp->dccps_gss = seq; |
426 | dccp_set_seqno(&dp->dccps_awl, | 423 | /* Ack validity window depends on local Sequence Window value (7.5.1) */ |
427 | (dp->dccps_gss - | 424 | dp->dccps_awl = SUB48(ADD48(dp->dccps_gss, 1), dp->dccps_l_seq_win); |
428 | dccp_msk(sk)->dccpms_sequence_window + 1)); | 425 | dp->dccps_awh = dp->dccps_gss; |
429 | } | 426 | } |
430 | 427 | ||
431 | static inline int dccp_ack_pending(const struct sock *sk) | 428 | static inline int dccp_ack_pending(const struct sock *sk) |
diff --git a/net/dccp/feat.c b/net/dccp/feat.c index 4152308958ab..b04160a2eea5 100644 --- a/net/dccp/feat.c +++ b/net/dccp/feat.c | |||
@@ -25,6 +25,11 @@ | |||
25 | #include "ccid.h" | 25 | #include "ccid.h" |
26 | #include "feat.h" | 26 | #include "feat.h" |
27 | 27 | ||
28 | /* feature-specific sysctls - initialised to the defaults from RFC 4340, 6.4 */ | ||
29 | unsigned long sysctl_dccp_sequence_window __read_mostly = 100; | ||
30 | int sysctl_dccp_rx_ccid __read_mostly = 2, | ||
31 | sysctl_dccp_tx_ccid __read_mostly = 2; | ||
32 | |||
28 | /* | 33 | /* |
29 | * Feature activation handlers. | 34 | * Feature activation handlers. |
30 | * | 35 | * |
@@ -51,8 +56,17 @@ static int dccp_hdlr_ccid(struct sock *sk, u64 ccid, bool rx) | |||
51 | 56 | ||
52 | static int dccp_hdlr_seq_win(struct sock *sk, u64 seq_win, bool rx) | 57 | static int dccp_hdlr_seq_win(struct sock *sk, u64 seq_win, bool rx) |
53 | { | 58 | { |
54 | if (!rx) | 59 | struct dccp_sock *dp = dccp_sk(sk); |
55 | dccp_msk(sk)->dccpms_sequence_window = seq_win; | 60 | |
61 | if (rx) { | ||
62 | dp->dccps_r_seq_win = seq_win; | ||
63 | /* propagate changes to update SWL/SWH */ | ||
64 | dccp_update_gsr(sk, dp->dccps_gsr); | ||
65 | } else { | ||
66 | dp->dccps_l_seq_win = seq_win; | ||
67 | /* propagate changes to update AWL */ | ||
68 | dccp_update_gss(sk, dp->dccps_gss); | ||
69 | } | ||
56 | return 0; | 70 | return 0; |
57 | } | 71 | } |
58 | 72 | ||
@@ -194,6 +208,100 @@ static int dccp_feat_default_value(u8 feat_num) | |||
194 | return idx < 0 ? 0 : dccp_feat_table[idx].default_value; | 208 | return idx < 0 ? 0 : dccp_feat_table[idx].default_value; |
195 | } | 209 | } |
196 | 210 | ||
211 | /* | ||
212 | * Debugging and verbose-printing section | ||
213 | */ | ||
214 | static const char *dccp_feat_fname(const u8 feat) | ||
215 | { | ||
216 | static const char *feature_names[] = { | ||
217 | [DCCPF_RESERVED] = "Reserved", | ||
218 | [DCCPF_CCID] = "CCID", | ||
219 | [DCCPF_SHORT_SEQNOS] = "Allow Short Seqnos", | ||
220 | [DCCPF_SEQUENCE_WINDOW] = "Sequence Window", | ||
221 | [DCCPF_ECN_INCAPABLE] = "ECN Incapable", | ||
222 | [DCCPF_ACK_RATIO] = "Ack Ratio", | ||
223 | [DCCPF_SEND_ACK_VECTOR] = "Send ACK Vector", | ||
224 | [DCCPF_SEND_NDP_COUNT] = "Send NDP Count", | ||
225 | [DCCPF_MIN_CSUM_COVER] = "Min. Csum Coverage", | ||
226 | [DCCPF_DATA_CHECKSUM] = "Send Data Checksum", | ||
227 | }; | ||
228 | if (feat > DCCPF_DATA_CHECKSUM && feat < DCCPF_MIN_CCID_SPECIFIC) | ||
229 | return feature_names[DCCPF_RESERVED]; | ||
230 | |||
231 | if (feat == DCCPF_SEND_LEV_RATE) | ||
232 | return "Send Loss Event Rate"; | ||
233 | if (feat >= DCCPF_MIN_CCID_SPECIFIC) | ||
234 | return "CCID-specific"; | ||
235 | |||
236 | return feature_names[feat]; | ||
237 | } | ||
238 | |||
239 | static const char *dccp_feat_sname[] = { "DEFAULT", "INITIALISING", "CHANGING", | ||
240 | "UNSTABLE", "STABLE" }; | ||
241 | |||
242 | #ifdef CONFIG_IP_DCCP_DEBUG | ||
243 | static const char *dccp_feat_oname(const u8 opt) | ||
244 | { | ||
245 | switch (opt) { | ||
246 | case DCCPO_CHANGE_L: return "Change_L"; | ||
247 | case DCCPO_CONFIRM_L: return "Confirm_L"; | ||
248 | case DCCPO_CHANGE_R: return "Change_R"; | ||
249 | case DCCPO_CONFIRM_R: return "Confirm_R"; | ||
250 | } | ||
251 | return NULL; | ||
252 | } | ||
253 | |||
254 | static void dccp_feat_printval(u8 feat_num, dccp_feat_val const *val) | ||
255 | { | ||
256 | u8 i, type = dccp_feat_type(feat_num); | ||
257 | |||
258 | if (val == NULL || (type == FEAT_SP && val->sp.vec == NULL)) | ||
259 | dccp_pr_debug_cat("(NULL)"); | ||
260 | else if (type == FEAT_SP) | ||
261 | for (i = 0; i < val->sp.len; i++) | ||
262 | dccp_pr_debug_cat("%s%u", i ? " " : "", val->sp.vec[i]); | ||
263 | else if (type == FEAT_NN) | ||
264 | dccp_pr_debug_cat("%llu", (unsigned long long)val->nn); | ||
265 | else | ||
266 | dccp_pr_debug_cat("unknown type %u", type); | ||
267 | } | ||
268 | |||
269 | static void dccp_feat_printvals(u8 feat_num, u8 *list, u8 len) | ||
270 | { | ||
271 | u8 type = dccp_feat_type(feat_num); | ||
272 | dccp_feat_val fval = { .sp.vec = list, .sp.len = len }; | ||
273 | |||
274 | if (type == FEAT_NN) | ||
275 | fval.nn = dccp_decode_value_var(list, len); | ||
276 | dccp_feat_printval(feat_num, &fval); | ||
277 | } | ||
278 | |||
279 | static void dccp_feat_print_entry(struct dccp_feat_entry const *entry) | ||
280 | { | ||
281 | dccp_debug(" * %s %s = ", entry->is_local ? "local" : "remote", | ||
282 | dccp_feat_fname(entry->feat_num)); | ||
283 | dccp_feat_printval(entry->feat_num, &entry->val); | ||
284 | dccp_pr_debug_cat(", state=%s %s\n", dccp_feat_sname[entry->state], | ||
285 | entry->needs_confirm ? "(Confirm pending)" : ""); | ||
286 | } | ||
287 | |||
288 | #define dccp_feat_print_opt(opt, feat, val, len, mandatory) do { \ | ||
289 | dccp_pr_debug("%s(%s, ", dccp_feat_oname(opt), dccp_feat_fname(feat));\ | ||
290 | dccp_feat_printvals(feat, val, len); \ | ||
291 | dccp_pr_debug_cat(") %s\n", mandatory ? "!" : ""); } while (0) | ||
292 | |||
293 | #define dccp_feat_print_fnlist(fn_list) { \ | ||
294 | const struct dccp_feat_entry *___entry; \ | ||
295 | \ | ||
296 | dccp_pr_debug("List Dump:\n"); \ | ||
297 | list_for_each_entry(___entry, fn_list, node) \ | ||
298 | dccp_feat_print_entry(___entry); \ | ||
299 | } | ||
300 | #else /* ! CONFIG_IP_DCCP_DEBUG */ | ||
301 | #define dccp_feat_print_opt(opt, feat, val, len, mandatory) | ||
302 | #define dccp_feat_print_fnlist(fn_list) | ||
303 | #endif | ||
304 | |||
197 | static int __dccp_feat_activate(struct sock *sk, const int idx, | 305 | static int __dccp_feat_activate(struct sock *sk, const int idx, |
198 | const bool is_local, dccp_feat_val const *fval) | 306 | const bool is_local, dccp_feat_val const *fval) |
199 | { | 307 | { |
@@ -226,6 +334,10 @@ static int __dccp_feat_activate(struct sock *sk, const int idx, | |||
226 | /* Location is RX if this is a local-RX or remote-TX feature */ | 334 | /* Location is RX if this is a local-RX or remote-TX feature */ |
227 | rx = (is_local == (dccp_feat_table[idx].rxtx == FEAT_AT_RX)); | 335 | rx = (is_local == (dccp_feat_table[idx].rxtx == FEAT_AT_RX)); |
228 | 336 | ||
337 | dccp_debug(" -> activating %s %s, %sval=%llu\n", rx ? "RX" : "TX", | ||
338 | dccp_feat_fname(dccp_feat_table[idx].feat_num), | ||
339 | fval ? "" : "default ", (unsigned long long)val); | ||
340 | |||
229 | return dccp_feat_table[idx].activation_hdlr(sk, val, rx); | 341 | return dccp_feat_table[idx].activation_hdlr(sk, val, rx); |
230 | } | 342 | } |
231 | 343 | ||
@@ -530,6 +642,7 @@ int dccp_feat_insert_opts(struct dccp_sock *dp, struct dccp_request_sock *dreq, | |||
530 | return -1; | 642 | return -1; |
531 | } | 643 | } |
532 | } | 644 | } |
645 | dccp_feat_print_opt(opt, pos->feat_num, ptr, len, 0); | ||
533 | 646 | ||
534 | if (dccp_insert_fn_opt(skb, opt, pos->feat_num, ptr, len, rpt)) | 647 | if (dccp_insert_fn_opt(skb, opt, pos->feat_num, ptr, len, rpt)) |
535 | return -1; | 648 | return -1; |
@@ -783,6 +896,7 @@ int dccp_feat_finalise_settings(struct dccp_sock *dp) | |||
783 | while (i--) | 896 | while (i--) |
784 | if (ccids[i] > 0 && dccp_feat_propagate_ccid(fn, ccids[i], i)) | 897 | if (ccids[i] > 0 && dccp_feat_propagate_ccid(fn, ccids[i], i)) |
785 | return -1; | 898 | return -1; |
899 | dccp_feat_print_fnlist(fn); | ||
786 | return 0; | 900 | return 0; |
787 | } | 901 | } |
788 | 902 | ||
@@ -901,6 +1015,8 @@ static u8 dccp_feat_change_recv(struct list_head *fn, u8 is_mandatory, u8 opt, | |||
901 | if (len == 0 || type == FEAT_UNKNOWN) /* 6.1 and 6.6.8 */ | 1015 | if (len == 0 || type == FEAT_UNKNOWN) /* 6.1 and 6.6.8 */ |
902 | goto unknown_feature_or_value; | 1016 | goto unknown_feature_or_value; |
903 | 1017 | ||
1018 | dccp_feat_print_opt(opt, feat, val, len, is_mandatory); | ||
1019 | |||
904 | /* | 1020 | /* |
905 | * Negotiation of NN features: Change R is invalid, so there is no | 1021 | * Negotiation of NN features: Change R is invalid, so there is no |
906 | * simultaneous negotiation; hence we do not look up in the list. | 1022 | * simultaneous negotiation; hence we do not look up in the list. |
@@ -1006,6 +1122,8 @@ static u8 dccp_feat_confirm_recv(struct list_head *fn, u8 is_mandatory, u8 opt, | |||
1006 | const bool local = (opt == DCCPO_CONFIRM_R); | 1122 | const bool local = (opt == DCCPO_CONFIRM_R); |
1007 | struct dccp_feat_entry *entry = dccp_feat_list_lookup(fn, feat, local); | 1123 | struct dccp_feat_entry *entry = dccp_feat_list_lookup(fn, feat, local); |
1008 | 1124 | ||
1125 | dccp_feat_print_opt(opt, feat, val, len, is_mandatory); | ||
1126 | |||
1009 | if (entry == NULL) { /* nothing queued: ignore or handle error */ | 1127 | if (entry == NULL) { /* nothing queued: ignore or handle error */ |
1010 | if (is_mandatory && type == FEAT_UNKNOWN) | 1128 | if (is_mandatory && type == FEAT_UNKNOWN) |
1011 | return DCCP_RESET_CODE_MANDATORY_ERROR; | 1129 | return DCCP_RESET_CODE_MANDATORY_ERROR; |
@@ -1115,23 +1233,70 @@ int dccp_feat_parse_options(struct sock *sk, struct dccp_request_sock *dreq, | |||
1115 | return 0; /* ignore FN options in all other states */ | 1233 | return 0; /* ignore FN options in all other states */ |
1116 | } | 1234 | } |
1117 | 1235 | ||
1236 | /** | ||
1237 | * dccp_feat_init - Seed feature negotiation with host-specific defaults | ||
1238 | * This initialises global defaults, depending on the value of the sysctls. | ||
1239 | * These can later be overridden by registering changes via setsockopt calls. | ||
1240 | * The last link in the chain is finalise_settings, to make sure that between | ||
1241 | * here and the start of actual feature negotiation no inconsistencies enter. | ||
1242 | * | ||
1243 | * All features not appearing below use either defaults or are otherwise | ||
1244 | * later adjusted through dccp_feat_finalise_settings(). | ||
1245 | */ | ||
1118 | int dccp_feat_init(struct sock *sk) | 1246 | int dccp_feat_init(struct sock *sk) |
1119 | { | 1247 | { |
1120 | struct dccp_sock *dp = dccp_sk(sk); | 1248 | struct list_head *fn = &dccp_sk(sk)->dccps_featneg; |
1121 | struct dccp_minisock *dmsk = dccp_msk(sk); | 1249 | u8 on = 1, off = 0; |
1122 | int rc; | 1250 | int rc; |
1251 | struct { | ||
1252 | u8 *val; | ||
1253 | u8 len; | ||
1254 | } tx, rx; | ||
1255 | |||
1256 | /* Non-negotiable (NN) features */ | ||
1257 | rc = __feat_register_nn(fn, DCCPF_SEQUENCE_WINDOW, 0, | ||
1258 | sysctl_dccp_sequence_window); | ||
1259 | if (rc) | ||
1260 | return rc; | ||
1261 | |||
1262 | /* Server-priority (SP) features */ | ||
1263 | |||
1264 | /* Advertise that short seqnos are not supported (7.6.1) */ | ||
1265 | rc = __feat_register_sp(fn, DCCPF_SHORT_SEQNOS, true, true, &off, 1); | ||
1266 | if (rc) | ||
1267 | return rc; | ||
1123 | 1268 | ||
1124 | INIT_LIST_HEAD(&dmsk->dccpms_pending); /* XXX no longer used */ | 1269 | /* RFC 4340 12.1: "If a DCCP is not ECN capable, ..." */ |
1125 | INIT_LIST_HEAD(&dmsk->dccpms_conf); /* XXX no longer used */ | 1270 | rc = __feat_register_sp(fn, DCCPF_ECN_INCAPABLE, true, true, &on, 1); |
1271 | if (rc) | ||
1272 | return rc; | ||
1273 | |||
1274 | /* | ||
1275 | * We advertise the available list of CCIDs and reorder according to | ||
1276 | * preferences, to avoid failure resulting from negotiating different | ||
1277 | * singleton values (which always leads to failure). | ||
1278 | * These settings can still (later) be overridden via sockopts. | ||
1279 | */ | ||
1280 | if (ccid_get_builtin_ccids(&tx.val, &tx.len) || | ||
1281 | ccid_get_builtin_ccids(&rx.val, &rx.len)) | ||
1282 | return -ENOBUFS; | ||
1283 | |||
1284 | if (!dccp_feat_prefer(sysctl_dccp_tx_ccid, tx.val, tx.len) || | ||
1285 | !dccp_feat_prefer(sysctl_dccp_rx_ccid, rx.val, rx.len)) | ||
1286 | goto free_ccid_lists; | ||
1287 | |||
1288 | rc = __feat_register_sp(fn, DCCPF_CCID, true, false, tx.val, tx.len); | ||
1289 | if (rc) | ||
1290 | goto free_ccid_lists; | ||
1291 | |||
1292 | rc = __feat_register_sp(fn, DCCPF_CCID, false, false, rx.val, rx.len); | ||
1126 | 1293 | ||
1127 | /* Ack ratio */ | 1294 | free_ccid_lists: |
1128 | rc = __feat_register_nn(&dp->dccps_featneg, DCCPF_ACK_RATIO, 0, | 1295 | kfree(tx.val); |
1129 | dp->dccps_l_ack_ratio); | 1296 | kfree(rx.val); |
1130 | return rc; | 1297 | return rc; |
1131 | } | 1298 | } |
1132 | 1299 | ||
1133 | EXPORT_SYMBOL_GPL(dccp_feat_init); | ||
1134 | |||
1135 | int dccp_feat_activate_values(struct sock *sk, struct list_head *fn_list) | 1300 | int dccp_feat_activate_values(struct sock *sk, struct list_head *fn_list) |
1136 | { | 1301 | { |
1137 | struct dccp_sock *dp = dccp_sk(sk); | 1302 | struct dccp_sock *dp = dccp_sk(sk); |
@@ -1156,9 +1321,10 @@ int dccp_feat_activate_values(struct sock *sk, struct list_head *fn_list) | |||
1156 | goto activation_failed; | 1321 | goto activation_failed; |
1157 | } | 1322 | } |
1158 | if (cur->state != FEAT_STABLE) { | 1323 | if (cur->state != FEAT_STABLE) { |
1159 | DCCP_CRIT("Negotiation of %s %u failed in state %u", | 1324 | DCCP_CRIT("Negotiation of %s %s failed in state %s", |
1160 | cur->is_local ? "local" : "remote", | 1325 | cur->is_local ? "local" : "remote", |
1161 | cur->feat_num, cur->state); | 1326 | dccp_feat_fname(cur->feat_num), |
1327 | dccp_feat_sname[cur->state]); | ||
1162 | goto activation_failed; | 1328 | goto activation_failed; |
1163 | } | 1329 | } |
1164 | fvals[idx][cur->is_local] = &cur->val; | 1330 | fvals[idx][cur->is_local] = &cur->val; |
@@ -1199,43 +1365,3 @@ activation_failed: | |||
1199 | dp->dccps_hc_rx_ackvec = NULL; | 1365 | dp->dccps_hc_rx_ackvec = NULL; |
1200 | return -1; | 1366 | return -1; |
1201 | } | 1367 | } |
1202 | |||
1203 | #ifdef CONFIG_IP_DCCP_DEBUG | ||
1204 | const char *dccp_feat_typename(const u8 type) | ||
1205 | { | ||
1206 | switch(type) { | ||
1207 | case DCCPO_CHANGE_L: return("ChangeL"); | ||
1208 | case DCCPO_CONFIRM_L: return("ConfirmL"); | ||
1209 | case DCCPO_CHANGE_R: return("ChangeR"); | ||
1210 | case DCCPO_CONFIRM_R: return("ConfirmR"); | ||
1211 | /* the following case must not appear in feature negotation */ | ||
1212 | default: dccp_pr_debug("unknown type %d [BUG!]\n", type); | ||
1213 | } | ||
1214 | return NULL; | ||
1215 | } | ||
1216 | |||
1217 | const char *dccp_feat_name(const u8 feat) | ||
1218 | { | ||
1219 | static const char *feature_names[] = { | ||
1220 | [DCCPF_RESERVED] = "Reserved", | ||
1221 | [DCCPF_CCID] = "CCID", | ||
1222 | [DCCPF_SHORT_SEQNOS] = "Allow Short Seqnos", | ||
1223 | [DCCPF_SEQUENCE_WINDOW] = "Sequence Window", | ||
1224 | [DCCPF_ECN_INCAPABLE] = "ECN Incapable", | ||
1225 | [DCCPF_ACK_RATIO] = "Ack Ratio", | ||
1226 | [DCCPF_SEND_ACK_VECTOR] = "Send ACK Vector", | ||
1227 | [DCCPF_SEND_NDP_COUNT] = "Send NDP Count", | ||
1228 | [DCCPF_MIN_CSUM_COVER] = "Min. Csum Coverage", | ||
1229 | [DCCPF_DATA_CHECKSUM] = "Send Data Checksum", | ||
1230 | }; | ||
1231 | if (feat > DCCPF_DATA_CHECKSUM && feat < DCCPF_MIN_CCID_SPECIFIC) | ||
1232 | return feature_names[DCCPF_RESERVED]; | ||
1233 | |||
1234 | if (feat == DCCPF_SEND_LEV_RATE) | ||
1235 | return "Send Loss Event Rate"; | ||
1236 | if (feat >= DCCPF_MIN_CCID_SPECIFIC) | ||
1237 | return "CCID-specific"; | ||
1238 | |||
1239 | return feature_names[feat]; | ||
1240 | } | ||
1241 | #endif /* CONFIG_IP_DCCP_DEBUG */ | ||
diff --git a/net/dccp/feat.h b/net/dccp/feat.h index 9b46e2a7866e..f96721619def 100644 --- a/net/dccp/feat.h +++ b/net/dccp/feat.h | |||
@@ -100,26 +100,21 @@ struct ccid_dependency { | |||
100 | u8 val; | 100 | u8 val; |
101 | }; | 101 | }; |
102 | 102 | ||
103 | #ifdef CONFIG_IP_DCCP_DEBUG | 103 | /* |
104 | extern const char *dccp_feat_typename(const u8 type); | 104 | * Sysctls to seed defaults for feature negotiation |
105 | extern const char *dccp_feat_name(const u8 feat); | 105 | */ |
106 | 106 | extern unsigned long sysctl_dccp_sequence_window; | |
107 | static inline void dccp_feat_debug(const u8 type, const u8 feat, const u8 val) | 107 | extern int sysctl_dccp_rx_ccid; |
108 | { | 108 | extern int sysctl_dccp_tx_ccid; |
109 | dccp_pr_debug("%s(%s (%d), %d)\n", dccp_feat_typename(type), | ||
110 | dccp_feat_name(feat), feat, val); | ||
111 | } | ||
112 | #else | ||
113 | #define dccp_feat_debug(type, feat, val) | ||
114 | #endif /* CONFIG_IP_DCCP_DEBUG */ | ||
115 | 109 | ||
110 | extern int dccp_feat_init(struct sock *sk); | ||
111 | extern void dccp_feat_initialise_sysctls(void); | ||
116 | extern int dccp_feat_register_sp(struct sock *sk, u8 feat, u8 is_local, | 112 | extern int dccp_feat_register_sp(struct sock *sk, u8 feat, u8 is_local, |
117 | u8 const *list, u8 len); | 113 | u8 const *list, u8 len); |
118 | extern int dccp_feat_register_nn(struct sock *sk, u8 feat, u64 val); | 114 | extern int dccp_feat_register_nn(struct sock *sk, u8 feat, u64 val); |
119 | extern int dccp_feat_parse_options(struct sock *, struct dccp_request_sock *, | 115 | extern int dccp_feat_parse_options(struct sock *, struct dccp_request_sock *, |
120 | u8 mand, u8 opt, u8 feat, u8 *val, u8 len); | 116 | u8 mand, u8 opt, u8 feat, u8 *val, u8 len); |
121 | extern int dccp_feat_clone_list(struct list_head const *, struct list_head *); | 117 | extern int dccp_feat_clone_list(struct list_head const *, struct list_head *); |
122 | extern int dccp_feat_init(struct sock *sk); | ||
123 | 118 | ||
124 | /* | 119 | /* |
125 | * Encoding variable-length options and their maximum length. | 120 | * Encoding variable-length options and their maximum length. |
diff --git a/net/dccp/minisocks.c b/net/dccp/minisocks.c index 6821ae33dd37..5ca49cec95f5 100644 --- a/net/dccp/minisocks.c +++ b/net/dccp/minisocks.c | |||
@@ -42,11 +42,6 @@ struct inet_timewait_death_row dccp_death_row = { | |||
42 | 42 | ||
43 | EXPORT_SYMBOL_GPL(dccp_death_row); | 43 | EXPORT_SYMBOL_GPL(dccp_death_row); |
44 | 44 | ||
45 | void dccp_minisock_init(struct dccp_minisock *dmsk) | ||
46 | { | ||
47 | dmsk->dccpms_sequence_window = sysctl_dccp_feat_sequence_window; | ||
48 | } | ||
49 | |||
50 | void dccp_time_wait(struct sock *sk, int state, int timeo) | 45 | void dccp_time_wait(struct sock *sk, int state, int timeo) |
51 | { | 46 | { |
52 | struct inet_timewait_sock *tw = NULL; | 47 | struct inet_timewait_sock *tw = NULL; |
@@ -110,7 +105,6 @@ struct sock *dccp_create_openreq_child(struct sock *sk, | |||
110 | struct dccp_request_sock *dreq = dccp_rsk(req); | 105 | struct dccp_request_sock *dreq = dccp_rsk(req); |
111 | struct inet_connection_sock *newicsk = inet_csk(newsk); | 106 | struct inet_connection_sock *newicsk = inet_csk(newsk); |
112 | struct dccp_sock *newdp = dccp_sk(newsk); | 107 | struct dccp_sock *newdp = dccp_sk(newsk); |
113 | struct dccp_minisock *newdmsk = dccp_msk(newsk); | ||
114 | 108 | ||
115 | newdp->dccps_role = DCCP_ROLE_SERVER; | 109 | newdp->dccps_role = DCCP_ROLE_SERVER; |
116 | newdp->dccps_hc_rx_ackvec = NULL; | 110 | newdp->dccps_hc_rx_ackvec = NULL; |
@@ -128,10 +122,6 @@ struct sock *dccp_create_openreq_child(struct sock *sk, | |||
128 | * Initialize S.GAR := S.ISS | 122 | * Initialize S.GAR := S.ISS |
129 | * Set S.ISR, S.GSR, S.SWL, S.SWH from packet or Init Cookies | 123 | * Set S.ISR, S.GSR, S.SWL, S.SWH from packet or Init Cookies |
130 | */ | 124 | */ |
131 | |||
132 | /* See dccp_v4_conn_request */ | ||
133 | newdmsk->dccpms_sequence_window = req->rcv_wnd; | ||
134 | |||
135 | newdp->dccps_gar = newdp->dccps_iss = dreq->dreq_iss; | 125 | newdp->dccps_gar = newdp->dccps_iss = dreq->dreq_iss; |
136 | dccp_update_gss(newsk, dreq->dreq_iss); | 126 | dccp_update_gss(newsk, dreq->dreq_iss); |
137 | 127 | ||
@@ -290,7 +280,6 @@ int dccp_reqsk_init(struct request_sock *req, | |||
290 | inet_rsk(req)->rmt_port = dccp_hdr(skb)->dccph_sport; | 280 | inet_rsk(req)->rmt_port = dccp_hdr(skb)->dccph_sport; |
291 | inet_rsk(req)->loc_port = dccp_hdr(skb)->dccph_dport; | 281 | inet_rsk(req)->loc_port = dccp_hdr(skb)->dccph_dport; |
292 | inet_rsk(req)->acked = 0; | 282 | inet_rsk(req)->acked = 0; |
293 | req->rcv_wnd = sysctl_dccp_feat_sequence_window; | ||
294 | dreq->dreq_timestamp_echo = 0; | 283 | dreq->dreq_timestamp_echo = 0; |
295 | 284 | ||
296 | /* inherit feature negotiation options from listening socket */ | 285 | /* inherit feature negotiation options from listening socket */ |
diff --git a/net/dccp/options.c b/net/dccp/options.c index 7b1165c21f51..1b08cae9c65b 100644 --- a/net/dccp/options.c +++ b/net/dccp/options.c | |||
@@ -23,10 +23,6 @@ | |||
23 | #include "dccp.h" | 23 | #include "dccp.h" |
24 | #include "feat.h" | 24 | #include "feat.h" |
25 | 25 | ||
26 | int sysctl_dccp_feat_sequence_window = DCCPF_INITIAL_SEQUENCE_WINDOW; | ||
27 | int sysctl_dccp_feat_rx_ccid = DCCPF_INITIAL_CCID; | ||
28 | int sysctl_dccp_feat_tx_ccid = DCCPF_INITIAL_CCID; | ||
29 | |||
30 | u64 dccp_decode_value_var(const u8 *bf, const u8 len) | 26 | u64 dccp_decode_value_var(const u8 *bf, const u8 len) |
31 | { | 27 | { |
32 | u64 value = 0; | 28 | u64 value = 0; |
@@ -502,10 +498,6 @@ int dccp_insert_fn_opt(struct sk_buff *skb, u8 type, u8 feat, | |||
502 | *to++ = *val; | 498 | *to++ = *val; |
503 | if (len) | 499 | if (len) |
504 | memcpy(to, val, len); | 500 | memcpy(to, val, len); |
505 | |||
506 | dccp_pr_debug("%s(%s (%d), ...), length %d\n", | ||
507 | dccp_feat_typename(type), | ||
508 | dccp_feat_name(feat), feat, len); | ||
509 | return 0; | 501 | return 0; |
510 | } | 502 | } |
511 | 503 | ||
diff --git a/net/dccp/proto.c b/net/dccp/proto.c index 945b4d5d23b3..314a1b5c033c 100644 --- a/net/dccp/proto.c +++ b/net/dccp/proto.c | |||
@@ -174,8 +174,6 @@ int dccp_init_sock(struct sock *sk, const __u8 ctl_sock_initialized) | |||
174 | struct dccp_sock *dp = dccp_sk(sk); | 174 | struct dccp_sock *dp = dccp_sk(sk); |
175 | struct inet_connection_sock *icsk = inet_csk(sk); | 175 | struct inet_connection_sock *icsk = inet_csk(sk); |
176 | 176 | ||
177 | dccp_minisock_init(&dp->dccps_minisock); | ||
178 | |||
179 | icsk->icsk_rto = DCCP_TIMEOUT_INIT; | 177 | icsk->icsk_rto = DCCP_TIMEOUT_INIT; |
180 | icsk->icsk_syn_retries = sysctl_dccp_request_retries; | 178 | icsk->icsk_syn_retries = sysctl_dccp_request_retries; |
181 | sk->sk_state = DCCP_CLOSED; | 179 | sk->sk_state = DCCP_CLOSED; |
diff --git a/net/dccp/sysctl.c b/net/dccp/sysctl.c index 018e210875e1..a5a1856234e7 100644 --- a/net/dccp/sysctl.c +++ b/net/dccp/sysctl.c | |||
@@ -18,55 +18,72 @@ | |||
18 | #error This file should not be compiled without CONFIG_SYSCTL defined | 18 | #error This file should not be compiled without CONFIG_SYSCTL defined |
19 | #endif | 19 | #endif |
20 | 20 | ||
21 | /* Boundary values */ | ||
22 | static int zero = 0, | ||
23 | u8_max = 0xFF; | ||
24 | static unsigned long seqw_min = 32; | ||
25 | |||
21 | static struct ctl_table dccp_default_table[] = { | 26 | static struct ctl_table dccp_default_table[] = { |
22 | { | 27 | { |
23 | .procname = "seq_window", | 28 | .procname = "seq_window", |
24 | .data = &sysctl_dccp_feat_sequence_window, | 29 | .data = &sysctl_dccp_sequence_window, |
25 | .maxlen = sizeof(sysctl_dccp_feat_sequence_window), | 30 | .maxlen = sizeof(sysctl_dccp_sequence_window), |
26 | .mode = 0644, | 31 | .mode = 0644, |
27 | .proc_handler = proc_dointvec, | 32 | .proc_handler = proc_doulongvec_minmax, |
33 | .extra1 = &seqw_min, /* RFC 4340, 7.5.2 */ | ||
28 | }, | 34 | }, |
29 | { | 35 | { |
30 | .procname = "rx_ccid", | 36 | .procname = "rx_ccid", |
31 | .data = &sysctl_dccp_feat_rx_ccid, | 37 | .data = &sysctl_dccp_rx_ccid, |
32 | .maxlen = sizeof(sysctl_dccp_feat_rx_ccid), | 38 | .maxlen = sizeof(sysctl_dccp_rx_ccid), |
33 | .mode = 0644, | 39 | .mode = 0644, |
34 | .proc_handler = proc_dointvec, | 40 | .proc_handler = proc_dointvec_minmax, |
41 | .extra1 = &zero, | ||
42 | .extra2 = &u8_max, /* RFC 4340, 10. */ | ||
35 | }, | 43 | }, |
36 | { | 44 | { |
37 | .procname = "tx_ccid", | 45 | .procname = "tx_ccid", |
38 | .data = &sysctl_dccp_feat_tx_ccid, | 46 | .data = &sysctl_dccp_tx_ccid, |
39 | .maxlen = sizeof(sysctl_dccp_feat_tx_ccid), | 47 | .maxlen = sizeof(sysctl_dccp_tx_ccid), |
40 | .mode = 0644, | 48 | .mode = 0644, |
41 | .proc_handler = proc_dointvec, | 49 | .proc_handler = proc_dointvec_minmax, |
50 | .extra1 = &zero, | ||
51 | .extra2 = &u8_max, /* RFC 4340, 10. */ | ||
42 | }, | 52 | }, |
43 | { | 53 | { |
44 | .procname = "request_retries", | 54 | .procname = "request_retries", |
45 | .data = &sysctl_dccp_request_retries, | 55 | .data = &sysctl_dccp_request_retries, |
46 | .maxlen = sizeof(sysctl_dccp_request_retries), | 56 | .maxlen = sizeof(sysctl_dccp_request_retries), |
47 | .mode = 0644, | 57 | .mode = 0644, |
48 | .proc_handler = proc_dointvec, | 58 | .proc_handler = proc_dointvec_minmax, |
59 | .extra1 = &zero, | ||
60 | .extra2 = &u8_max, | ||
49 | }, | 61 | }, |
50 | { | 62 | { |
51 | .procname = "retries1", | 63 | .procname = "retries1", |
52 | .data = &sysctl_dccp_retries1, | 64 | .data = &sysctl_dccp_retries1, |
53 | .maxlen = sizeof(sysctl_dccp_retries1), | 65 | .maxlen = sizeof(sysctl_dccp_retries1), |
54 | .mode = 0644, | 66 | .mode = 0644, |
55 | .proc_handler = proc_dointvec, | 67 | .proc_handler = proc_dointvec_minmax, |
68 | .extra1 = &zero, | ||
69 | .extra2 = &u8_max, | ||
56 | }, | 70 | }, |
57 | { | 71 | { |
58 | .procname = "retries2", | 72 | .procname = "retries2", |
59 | .data = &sysctl_dccp_retries2, | 73 | .data = &sysctl_dccp_retries2, |
60 | .maxlen = sizeof(sysctl_dccp_retries2), | 74 | .maxlen = sizeof(sysctl_dccp_retries2), |
61 | .mode = 0644, | 75 | .mode = 0644, |
62 | .proc_handler = proc_dointvec, | 76 | .proc_handler = proc_dointvec_minmax, |
77 | .extra1 = &zero, | ||
78 | .extra2 = &u8_max, | ||
63 | }, | 79 | }, |
64 | { | 80 | { |
65 | .procname = "tx_qlen", | 81 | .procname = "tx_qlen", |
66 | .data = &sysctl_dccp_tx_qlen, | 82 | .data = &sysctl_dccp_tx_qlen, |
67 | .maxlen = sizeof(sysctl_dccp_tx_qlen), | 83 | .maxlen = sizeof(sysctl_dccp_tx_qlen), |
68 | .mode = 0644, | 84 | .mode = 0644, |
69 | .proc_handler = proc_dointvec, | 85 | .proc_handler = proc_dointvec_minmax, |
86 | .extra1 = &zero, | ||
70 | }, | 87 | }, |
71 | { | 88 | { |
72 | .procname = "sync_ratelimit", | 89 | .procname = "sync_ratelimit", |
diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c index cf0e18499297..12bf7d4c16c6 100644 --- a/net/decnet/af_decnet.c +++ b/net/decnet/af_decnet.c | |||
@@ -2113,7 +2113,7 @@ static struct notifier_block dn_dev_notifier = { | |||
2113 | extern int dn_route_rcv(struct sk_buff *, struct net_device *, struct packet_type *, struct net_device *); | 2113 | extern int dn_route_rcv(struct sk_buff *, struct net_device *, struct packet_type *, struct net_device *); |
2114 | 2114 | ||
2115 | static struct packet_type dn_dix_packet_type = { | 2115 | static struct packet_type dn_dix_packet_type = { |
2116 | .type = __constant_htons(ETH_P_DNA_RT), | 2116 | .type = cpu_to_be16(ETH_P_DNA_RT), |
2117 | .dev = NULL, /* All devices */ | 2117 | .dev = NULL, /* All devices */ |
2118 | .func = dn_route_rcv, | 2118 | .func = dn_route_rcv, |
2119 | }; | 2119 | }; |
diff --git a/net/decnet/dn_route.c b/net/decnet/dn_route.c index c754670b7fca..5130dee0b384 100644 --- a/net/decnet/dn_route.c +++ b/net/decnet/dn_route.c | |||
@@ -124,7 +124,7 @@ int decnet_dst_gc_interval = 2; | |||
124 | 124 | ||
125 | static struct dst_ops dn_dst_ops = { | 125 | static struct dst_ops dn_dst_ops = { |
126 | .family = PF_DECnet, | 126 | .family = PF_DECnet, |
127 | .protocol = __constant_htons(ETH_P_DNA_RT), | 127 | .protocol = cpu_to_be16(ETH_P_DNA_RT), |
128 | .gc_thresh = 128, | 128 | .gc_thresh = 128, |
129 | .gc = dn_dst_gc, | 129 | .gc = dn_dst_gc, |
130 | .check = dn_dst_check, | 130 | .check = dn_dst_check, |
diff --git a/net/dsa/mv88e6123_61_65.c b/net/dsa/mv88e6123_61_65.c index ec8c6a0482d3..100318722214 100644 --- a/net/dsa/mv88e6123_61_65.c +++ b/net/dsa/mv88e6123_61_65.c | |||
@@ -394,7 +394,7 @@ static int mv88e6123_61_65_get_sset_count(struct dsa_switch *ds) | |||
394 | } | 394 | } |
395 | 395 | ||
396 | static struct dsa_switch_driver mv88e6123_61_65_switch_driver = { | 396 | static struct dsa_switch_driver mv88e6123_61_65_switch_driver = { |
397 | .tag_protocol = __constant_htons(ETH_P_EDSA), | 397 | .tag_protocol = cpu_to_be16(ETH_P_EDSA), |
398 | .priv_size = sizeof(struct mv88e6xxx_priv_state), | 398 | .priv_size = sizeof(struct mv88e6xxx_priv_state), |
399 | .probe = mv88e6123_61_65_probe, | 399 | .probe = mv88e6123_61_65_probe, |
400 | .setup = mv88e6123_61_65_setup, | 400 | .setup = mv88e6123_61_65_setup, |
diff --git a/net/dsa/mv88e6131.c b/net/dsa/mv88e6131.c index 374d46a01265..70fae2444cb6 100644 --- a/net/dsa/mv88e6131.c +++ b/net/dsa/mv88e6131.c | |||
@@ -353,7 +353,7 @@ static int mv88e6131_get_sset_count(struct dsa_switch *ds) | |||
353 | } | 353 | } |
354 | 354 | ||
355 | static struct dsa_switch_driver mv88e6131_switch_driver = { | 355 | static struct dsa_switch_driver mv88e6131_switch_driver = { |
356 | .tag_protocol = __constant_htons(ETH_P_DSA), | 356 | .tag_protocol = cpu_to_be16(ETH_P_DSA), |
357 | .priv_size = sizeof(struct mv88e6xxx_priv_state), | 357 | .priv_size = sizeof(struct mv88e6xxx_priv_state), |
358 | .probe = mv88e6131_probe, | 358 | .probe = mv88e6131_probe, |
359 | .setup = mv88e6131_setup, | 359 | .setup = mv88e6131_setup, |
diff --git a/net/dsa/tag_dsa.c b/net/dsa/tag_dsa.c index f99a019b939e..63e532a69fdb 100644 --- a/net/dsa/tag_dsa.c +++ b/net/dsa/tag_dsa.c | |||
@@ -176,7 +176,7 @@ out: | |||
176 | } | 176 | } |
177 | 177 | ||
178 | static struct packet_type dsa_packet_type = { | 178 | static struct packet_type dsa_packet_type = { |
179 | .type = __constant_htons(ETH_P_DSA), | 179 | .type = cpu_to_be16(ETH_P_DSA), |
180 | .func = dsa_rcv, | 180 | .func = dsa_rcv, |
181 | }; | 181 | }; |
182 | 182 | ||
diff --git a/net/dsa/tag_edsa.c b/net/dsa/tag_edsa.c index 328ec957f786..6197f9a7ef42 100644 --- a/net/dsa/tag_edsa.c +++ b/net/dsa/tag_edsa.c | |||
@@ -195,7 +195,7 @@ out: | |||
195 | } | 195 | } |
196 | 196 | ||
197 | static struct packet_type edsa_packet_type = { | 197 | static struct packet_type edsa_packet_type = { |
198 | .type = __constant_htons(ETH_P_EDSA), | 198 | .type = cpu_to_be16(ETH_P_EDSA), |
199 | .func = edsa_rcv, | 199 | .func = edsa_rcv, |
200 | }; | 200 | }; |
201 | 201 | ||
diff --git a/net/dsa/tag_trailer.c b/net/dsa/tag_trailer.c index b59132878ad1..d7e7f424ff0c 100644 --- a/net/dsa/tag_trailer.c +++ b/net/dsa/tag_trailer.c | |||
@@ -112,7 +112,7 @@ out: | |||
112 | } | 112 | } |
113 | 113 | ||
114 | static struct packet_type trailer_packet_type = { | 114 | static struct packet_type trailer_packet_type = { |
115 | .type = __constant_htons(ETH_P_TRAILER), | 115 | .type = cpu_to_be16(ETH_P_TRAILER), |
116 | .func = trailer_rcv, | 116 | .func = trailer_rcv, |
117 | }; | 117 | }; |
118 | 118 | ||
diff --git a/net/econet/af_econet.c b/net/econet/af_econet.c index 8789d2bb1b06..7bf35582f656 100644 --- a/net/econet/af_econet.c +++ b/net/econet/af_econet.c | |||
@@ -1103,7 +1103,7 @@ drop: | |||
1103 | } | 1103 | } |
1104 | 1104 | ||
1105 | static struct packet_type econet_packet_type = { | 1105 | static struct packet_type econet_packet_type = { |
1106 | .type = __constant_htons(ETH_P_ECONET), | 1106 | .type = cpu_to_be16(ETH_P_ECONET), |
1107 | .func = econet_rcv, | 1107 | .func = econet_rcv, |
1108 | }; | 1108 | }; |
1109 | 1109 | ||
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index 743f5542d65a..c79087719df0 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c | |||
@@ -369,7 +369,6 @@ lookup_protocol: | |||
369 | sock_init_data(sock, sk); | 369 | sock_init_data(sock, sk); |
370 | 370 | ||
371 | sk->sk_destruct = inet_sock_destruct; | 371 | sk->sk_destruct = inet_sock_destruct; |
372 | sk->sk_family = PF_INET; | ||
373 | sk->sk_protocol = protocol; | 372 | sk->sk_protocol = protocol; |
374 | sk->sk_backlog_rcv = sk->sk_prot->backlog_rcv; | 373 | sk->sk_backlog_rcv = sk->sk_prot->backlog_rcv; |
375 | 374 | ||
@@ -1253,10 +1252,10 @@ static struct sk_buff **inet_gro_receive(struct sk_buff **head, | |||
1253 | int proto; | 1252 | int proto; |
1254 | int id; | 1253 | int id; |
1255 | 1254 | ||
1256 | if (unlikely(!pskb_may_pull(skb, sizeof(*iph)))) | 1255 | iph = skb_gro_header(skb, sizeof(*iph)); |
1256 | if (unlikely(!iph)) | ||
1257 | goto out; | 1257 | goto out; |
1258 | 1258 | ||
1259 | iph = ip_hdr(skb); | ||
1260 | proto = iph->protocol & (MAX_INET_PROTOS - 1); | 1259 | proto = iph->protocol & (MAX_INET_PROTOS - 1); |
1261 | 1260 | ||
1262 | rcu_read_lock(); | 1261 | rcu_read_lock(); |
@@ -1270,7 +1269,7 @@ static struct sk_buff **inet_gro_receive(struct sk_buff **head, | |||
1270 | if (unlikely(ip_fast_csum((u8 *)iph, iph->ihl))) | 1269 | if (unlikely(ip_fast_csum((u8 *)iph, iph->ihl))) |
1271 | goto out_unlock; | 1270 | goto out_unlock; |
1272 | 1271 | ||
1273 | flush = ntohs(iph->tot_len) != skb->len || | 1272 | flush = ntohs(iph->tot_len) != skb_gro_len(skb) || |
1274 | iph->frag_off != htons(IP_DF); | 1273 | iph->frag_off != htons(IP_DF); |
1275 | id = ntohs(iph->id); | 1274 | id = ntohs(iph->id); |
1276 | 1275 | ||
@@ -1298,8 +1297,8 @@ static struct sk_buff **inet_gro_receive(struct sk_buff **head, | |||
1298 | } | 1297 | } |
1299 | 1298 | ||
1300 | NAPI_GRO_CB(skb)->flush |= flush; | 1299 | NAPI_GRO_CB(skb)->flush |= flush; |
1301 | __skb_pull(skb, sizeof(*iph)); | 1300 | skb_gro_pull(skb, sizeof(*iph)); |
1302 | skb_reset_transport_header(skb); | 1301 | skb_set_transport_header(skb, skb_gro_offset(skb)); |
1303 | 1302 | ||
1304 | pp = ops->gro_receive(head, skb); | 1303 | pp = ops->gro_receive(head, skb); |
1305 | 1304 | ||
@@ -1501,7 +1500,7 @@ static int ipv4_proc_init(void); | |||
1501 | */ | 1500 | */ |
1502 | 1501 | ||
1503 | static struct packet_type ip_packet_type = { | 1502 | static struct packet_type ip_packet_type = { |
1504 | .type = __constant_htons(ETH_P_IP), | 1503 | .type = cpu_to_be16(ETH_P_IP), |
1505 | .func = ip_rcv, | 1504 | .func = ip_rcv, |
1506 | .gso_send_check = inet_gso_send_check, | 1505 | .gso_send_check = inet_gso_send_check, |
1507 | .gso_segment = inet_gso_segment, | 1506 | .gso_segment = inet_gso_segment, |
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c index 29a74c01d8de..3f6b7354699b 100644 --- a/net/ipv4/arp.c +++ b/net/ipv4/arp.c | |||
@@ -1226,7 +1226,7 @@ void arp_ifdown(struct net_device *dev) | |||
1226 | */ | 1226 | */ |
1227 | 1227 | ||
1228 | static struct packet_type arp_packet_type = { | 1228 | static struct packet_type arp_packet_type = { |
1229 | .type = __constant_htons(ETH_P_ARP), | 1229 | .type = cpu_to_be16(ETH_P_ARP), |
1230 | .func = arp_rcv, | 1230 | .func = arp_rcv, |
1231 | }; | 1231 | }; |
1232 | 1232 | ||
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c index 309997edc8a5..d519a6a66726 100644 --- a/net/ipv4/devinet.c +++ b/net/ipv4/devinet.c | |||
@@ -1075,6 +1075,14 @@ static int inetdev_event(struct notifier_block *this, unsigned long event, | |||
1075 | } | 1075 | } |
1076 | } | 1076 | } |
1077 | ip_mc_up(in_dev); | 1077 | ip_mc_up(in_dev); |
1078 | /* fall through */ | ||
1079 | case NETDEV_CHANGEADDR: | ||
1080 | if (IN_DEV_ARP_NOTIFY(in_dev)) | ||
1081 | arp_send(ARPOP_REQUEST, ETH_P_ARP, | ||
1082 | in_dev->ifa_list->ifa_address, | ||
1083 | dev, | ||
1084 | in_dev->ifa_list->ifa_address, | ||
1085 | NULL, dev->dev_addr, NULL); | ||
1078 | break; | 1086 | break; |
1079 | case NETDEV_DOWN: | 1087 | case NETDEV_DOWN: |
1080 | ip_mc_down(in_dev); | 1088 | ip_mc_down(in_dev); |
@@ -1439,6 +1447,7 @@ static struct devinet_sysctl_table { | |||
1439 | DEVINET_SYSCTL_RW_ENTRY(ARP_ANNOUNCE, "arp_announce"), | 1447 | DEVINET_SYSCTL_RW_ENTRY(ARP_ANNOUNCE, "arp_announce"), |
1440 | DEVINET_SYSCTL_RW_ENTRY(ARP_IGNORE, "arp_ignore"), | 1448 | DEVINET_SYSCTL_RW_ENTRY(ARP_IGNORE, "arp_ignore"), |
1441 | DEVINET_SYSCTL_RW_ENTRY(ARP_ACCEPT, "arp_accept"), | 1449 | DEVINET_SYSCTL_RW_ENTRY(ARP_ACCEPT, "arp_accept"), |
1450 | DEVINET_SYSCTL_RW_ENTRY(ARP_NOTIFY, "arp_notify"), | ||
1442 | 1451 | ||
1443 | DEVINET_SYSCTL_FLUSHING_ENTRY(NOXFRM, "disable_xfrm"), | 1452 | DEVINET_SYSCTL_FLUSHING_ENTRY(NOXFRM, "disable_xfrm"), |
1444 | DEVINET_SYSCTL_FLUSHING_ENTRY(NOPOLICY, "disable_policy"), | 1453 | DEVINET_SYSCTL_FLUSHING_ENTRY(NOPOLICY, "disable_policy"), |
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c index f26ab38680de..22cd19ee44e5 100644 --- a/net/ipv4/inet_connection_sock.c +++ b/net/ipv4/inet_connection_sock.c | |||
@@ -93,24 +93,40 @@ int inet_csk_get_port(struct sock *sk, unsigned short snum) | |||
93 | struct inet_bind_hashbucket *head; | 93 | struct inet_bind_hashbucket *head; |
94 | struct hlist_node *node; | 94 | struct hlist_node *node; |
95 | struct inet_bind_bucket *tb; | 95 | struct inet_bind_bucket *tb; |
96 | int ret; | 96 | int ret, attempts = 5; |
97 | struct net *net = sock_net(sk); | 97 | struct net *net = sock_net(sk); |
98 | int smallest_size = -1, smallest_rover; | ||
98 | 99 | ||
99 | local_bh_disable(); | 100 | local_bh_disable(); |
100 | if (!snum) { | 101 | if (!snum) { |
101 | int remaining, rover, low, high; | 102 | int remaining, rover, low, high; |
102 | 103 | ||
104 | again: | ||
103 | inet_get_local_port_range(&low, &high); | 105 | inet_get_local_port_range(&low, &high); |
104 | remaining = (high - low) + 1; | 106 | remaining = (high - low) + 1; |
105 | rover = net_random() % remaining + low; | 107 | smallest_rover = rover = net_random() % remaining + low; |
106 | 108 | ||
109 | smallest_size = -1; | ||
107 | do { | 110 | do { |
108 | head = &hashinfo->bhash[inet_bhashfn(net, rover, | 111 | head = &hashinfo->bhash[inet_bhashfn(net, rover, |
109 | hashinfo->bhash_size)]; | 112 | hashinfo->bhash_size)]; |
110 | spin_lock(&head->lock); | 113 | spin_lock(&head->lock); |
111 | inet_bind_bucket_for_each(tb, node, &head->chain) | 114 | inet_bind_bucket_for_each(tb, node, &head->chain) |
112 | if (ib_net(tb) == net && tb->port == rover) | 115 | if (ib_net(tb) == net && tb->port == rover) { |
116 | if (tb->fastreuse > 0 && | ||
117 | sk->sk_reuse && | ||
118 | sk->sk_state != TCP_LISTEN && | ||
119 | (tb->num_owners < smallest_size || smallest_size == -1)) { | ||
120 | smallest_size = tb->num_owners; | ||
121 | smallest_rover = rover; | ||
122 | if (atomic_read(&hashinfo->bsockets) > (high - low) + 1) { | ||
123 | spin_unlock(&head->lock); | ||
124 | snum = smallest_rover; | ||
125 | goto have_snum; | ||
126 | } | ||
127 | } | ||
113 | goto next; | 128 | goto next; |
129 | } | ||
114 | break; | 130 | break; |
115 | next: | 131 | next: |
116 | spin_unlock(&head->lock); | 132 | spin_unlock(&head->lock); |
@@ -125,14 +141,19 @@ int inet_csk_get_port(struct sock *sk, unsigned short snum) | |||
125 | * the top level, not from the 'break;' statement. | 141 | * the top level, not from the 'break;' statement. |
126 | */ | 142 | */ |
127 | ret = 1; | 143 | ret = 1; |
128 | if (remaining <= 0) | 144 | if (remaining <= 0) { |
145 | if (smallest_size != -1) { | ||
146 | snum = smallest_rover; | ||
147 | goto have_snum; | ||
148 | } | ||
129 | goto fail; | 149 | goto fail; |
130 | 150 | } | |
131 | /* OK, here is the one we will use. HEAD is | 151 | /* OK, here is the one we will use. HEAD is |
132 | * non-NULL and we hold it's mutex. | 152 | * non-NULL and we hold it's mutex. |
133 | */ | 153 | */ |
134 | snum = rover; | 154 | snum = rover; |
135 | } else { | 155 | } else { |
156 | have_snum: | ||
136 | head = &hashinfo->bhash[inet_bhashfn(net, snum, | 157 | head = &hashinfo->bhash[inet_bhashfn(net, snum, |
137 | hashinfo->bhash_size)]; | 158 | hashinfo->bhash_size)]; |
138 | spin_lock(&head->lock); | 159 | spin_lock(&head->lock); |
@@ -145,12 +166,19 @@ int inet_csk_get_port(struct sock *sk, unsigned short snum) | |||
145 | tb_found: | 166 | tb_found: |
146 | if (!hlist_empty(&tb->owners)) { | 167 | if (!hlist_empty(&tb->owners)) { |
147 | if (tb->fastreuse > 0 && | 168 | if (tb->fastreuse > 0 && |
148 | sk->sk_reuse && sk->sk_state != TCP_LISTEN) { | 169 | sk->sk_reuse && sk->sk_state != TCP_LISTEN && |
170 | smallest_size == -1) { | ||
149 | goto success; | 171 | goto success; |
150 | } else { | 172 | } else { |
151 | ret = 1; | 173 | ret = 1; |
152 | if (inet_csk(sk)->icsk_af_ops->bind_conflict(sk, tb)) | 174 | if (inet_csk(sk)->icsk_af_ops->bind_conflict(sk, tb)) { |
175 | if (sk->sk_reuse && sk->sk_state != TCP_LISTEN && | ||
176 | smallest_size != -1 && --attempts >= 0) { | ||
177 | spin_unlock(&head->lock); | ||
178 | goto again; | ||
179 | } | ||
153 | goto fail_unlock; | 180 | goto fail_unlock; |
181 | } | ||
154 | } | 182 | } |
155 | } | 183 | } |
156 | tb_not_found: | 184 | tb_not_found: |
diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c index 6a1045da48d2..625cc5f64c94 100644 --- a/net/ipv4/inet_hashtables.c +++ b/net/ipv4/inet_hashtables.c | |||
@@ -38,6 +38,7 @@ struct inet_bind_bucket *inet_bind_bucket_create(struct kmem_cache *cachep, | |||
38 | write_pnet(&tb->ib_net, hold_net(net)); | 38 | write_pnet(&tb->ib_net, hold_net(net)); |
39 | tb->port = snum; | 39 | tb->port = snum; |
40 | tb->fastreuse = 0; | 40 | tb->fastreuse = 0; |
41 | tb->num_owners = 0; | ||
41 | INIT_HLIST_HEAD(&tb->owners); | 42 | INIT_HLIST_HEAD(&tb->owners); |
42 | hlist_add_head(&tb->node, &head->chain); | 43 | hlist_add_head(&tb->node, &head->chain); |
43 | } | 44 | } |
@@ -59,8 +60,13 @@ void inet_bind_bucket_destroy(struct kmem_cache *cachep, struct inet_bind_bucket | |||
59 | void inet_bind_hash(struct sock *sk, struct inet_bind_bucket *tb, | 60 | void inet_bind_hash(struct sock *sk, struct inet_bind_bucket *tb, |
60 | const unsigned short snum) | 61 | const unsigned short snum) |
61 | { | 62 | { |
63 | struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo; | ||
64 | |||
65 | atomic_inc(&hashinfo->bsockets); | ||
66 | |||
62 | inet_sk(sk)->num = snum; | 67 | inet_sk(sk)->num = snum; |
63 | sk_add_bind_node(sk, &tb->owners); | 68 | sk_add_bind_node(sk, &tb->owners); |
69 | tb->num_owners++; | ||
64 | inet_csk(sk)->icsk_bind_hash = tb; | 70 | inet_csk(sk)->icsk_bind_hash = tb; |
65 | } | 71 | } |
66 | 72 | ||
@@ -75,9 +81,12 @@ static void __inet_put_port(struct sock *sk) | |||
75 | struct inet_bind_hashbucket *head = &hashinfo->bhash[bhash]; | 81 | struct inet_bind_hashbucket *head = &hashinfo->bhash[bhash]; |
76 | struct inet_bind_bucket *tb; | 82 | struct inet_bind_bucket *tb; |
77 | 83 | ||
84 | atomic_dec(&hashinfo->bsockets); | ||
85 | |||
78 | spin_lock(&head->lock); | 86 | spin_lock(&head->lock); |
79 | tb = inet_csk(sk)->icsk_bind_hash; | 87 | tb = inet_csk(sk)->icsk_bind_hash; |
80 | __sk_del_bind_node(sk); | 88 | __sk_del_bind_node(sk); |
89 | tb->num_owners--; | ||
81 | inet_csk(sk)->icsk_bind_hash = NULL; | 90 | inet_csk(sk)->icsk_bind_hash = NULL; |
82 | inet_sk(sk)->num = 0; | 91 | inet_sk(sk)->num = 0; |
83 | inet_bind_bucket_destroy(hashinfo->bind_bucket_cachep, tb); | 92 | inet_bind_bucket_destroy(hashinfo->bind_bucket_cachep, tb); |
@@ -444,9 +453,9 @@ int __inet_hash_connect(struct inet_timewait_death_row *death_row, | |||
444 | */ | 453 | */ |
445 | inet_bind_bucket_for_each(tb, node, &head->chain) { | 454 | inet_bind_bucket_for_each(tb, node, &head->chain) { |
446 | if (ib_net(tb) == net && tb->port == port) { | 455 | if (ib_net(tb) == net && tb->port == port) { |
447 | WARN_ON(hlist_empty(&tb->owners)); | ||
448 | if (tb->fastreuse >= 0) | 456 | if (tb->fastreuse >= 0) |
449 | goto next_port; | 457 | goto next_port; |
458 | WARN_ON(hlist_empty(&tb->owners)); | ||
450 | if (!check_established(death_row, sk, | 459 | if (!check_established(death_row, sk, |
451 | port, &tw)) | 460 | port, &tw)) |
452 | goto ok; | 461 | goto ok; |
@@ -523,6 +532,7 @@ void inet_hashinfo_init(struct inet_hashinfo *h) | |||
523 | { | 532 | { |
524 | int i; | 533 | int i; |
525 | 534 | ||
535 | atomic_set(&h->bsockets, 0); | ||
526 | for (i = 0; i < INET_LHTABLE_SIZE; i++) { | 536 | for (i = 0; i < INET_LHTABLE_SIZE; i++) { |
527 | spin_lock_init(&h->listening_hash[i].lock); | 537 | spin_lock_init(&h->listening_hash[i].lock); |
528 | INIT_HLIST_NULLS_HEAD(&h->listening_hash[i].head, | 538 | INIT_HLIST_NULLS_HEAD(&h->listening_hash[i].head, |
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c index 0101521f366b..07a188afb3ac 100644 --- a/net/ipv4/ip_gre.c +++ b/net/ipv4/ip_gre.c | |||
@@ -164,67 +164,124 @@ static DEFINE_RWLOCK(ipgre_lock); | |||
164 | 164 | ||
165 | /* Given src, dst and key, find appropriate for input tunnel. */ | 165 | /* Given src, dst and key, find appropriate for input tunnel. */ |
166 | 166 | ||
167 | static struct ip_tunnel * ipgre_tunnel_lookup(struct net *net, | 167 | static struct ip_tunnel * ipgre_tunnel_lookup(struct net_device *dev, |
168 | __be32 remote, __be32 local, | 168 | __be32 remote, __be32 local, |
169 | __be32 key, __be16 gre_proto) | 169 | __be32 key, __be16 gre_proto) |
170 | { | 170 | { |
171 | struct net *net = dev_net(dev); | ||
172 | int link = dev->ifindex; | ||
171 | unsigned h0 = HASH(remote); | 173 | unsigned h0 = HASH(remote); |
172 | unsigned h1 = HASH(key); | 174 | unsigned h1 = HASH(key); |
173 | struct ip_tunnel *t; | 175 | struct ip_tunnel *t, *cand = NULL; |
174 | struct ip_tunnel *t2 = NULL; | ||
175 | struct ipgre_net *ign = net_generic(net, ipgre_net_id); | 176 | struct ipgre_net *ign = net_generic(net, ipgre_net_id); |
176 | int dev_type = (gre_proto == htons(ETH_P_TEB)) ? | 177 | int dev_type = (gre_proto == htons(ETH_P_TEB)) ? |
177 | ARPHRD_ETHER : ARPHRD_IPGRE; | 178 | ARPHRD_ETHER : ARPHRD_IPGRE; |
179 | int score, cand_score = 4; | ||
178 | 180 | ||
179 | for (t = ign->tunnels_r_l[h0^h1]; t; t = t->next) { | 181 | for (t = ign->tunnels_r_l[h0^h1]; t; t = t->next) { |
180 | if (local == t->parms.iph.saddr && remote == t->parms.iph.daddr) { | 182 | if (local != t->parms.iph.saddr || |
181 | if (t->parms.i_key == key && t->dev->flags & IFF_UP) { | 183 | remote != t->parms.iph.daddr || |
182 | if (t->dev->type == dev_type) | 184 | key != t->parms.i_key || |
183 | return t; | 185 | !(t->dev->flags & IFF_UP)) |
184 | if (t->dev->type == ARPHRD_IPGRE && !t2) | 186 | continue; |
185 | t2 = t; | 187 | |
186 | } | 188 | if (t->dev->type != ARPHRD_IPGRE && |
189 | t->dev->type != dev_type) | ||
190 | continue; | ||
191 | |||
192 | score = 0; | ||
193 | if (t->parms.link != link) | ||
194 | score |= 1; | ||
195 | if (t->dev->type != dev_type) | ||
196 | score |= 2; | ||
197 | if (score == 0) | ||
198 | return t; | ||
199 | |||
200 | if (score < cand_score) { | ||
201 | cand = t; | ||
202 | cand_score = score; | ||
187 | } | 203 | } |
188 | } | 204 | } |
189 | 205 | ||
190 | for (t = ign->tunnels_r[h0^h1]; t; t = t->next) { | 206 | for (t = ign->tunnels_r[h0^h1]; t; t = t->next) { |
191 | if (remote == t->parms.iph.daddr) { | 207 | if (remote != t->parms.iph.daddr || |
192 | if (t->parms.i_key == key && t->dev->flags & IFF_UP) { | 208 | key != t->parms.i_key || |
193 | if (t->dev->type == dev_type) | 209 | !(t->dev->flags & IFF_UP)) |
194 | return t; | 210 | continue; |
195 | if (t->dev->type == ARPHRD_IPGRE && !t2) | 211 | |
196 | t2 = t; | 212 | if (t->dev->type != ARPHRD_IPGRE && |
197 | } | 213 | t->dev->type != dev_type) |
214 | continue; | ||
215 | |||
216 | score = 0; | ||
217 | if (t->parms.link != link) | ||
218 | score |= 1; | ||
219 | if (t->dev->type != dev_type) | ||
220 | score |= 2; | ||
221 | if (score == 0) | ||
222 | return t; | ||
223 | |||
224 | if (score < cand_score) { | ||
225 | cand = t; | ||
226 | cand_score = score; | ||
198 | } | 227 | } |
199 | } | 228 | } |
200 | 229 | ||
201 | for (t = ign->tunnels_l[h1]; t; t = t->next) { | 230 | for (t = ign->tunnels_l[h1]; t; t = t->next) { |
202 | if (local == t->parms.iph.saddr || | 231 | if ((local != t->parms.iph.saddr && |
203 | (local == t->parms.iph.daddr && | 232 | (local != t->parms.iph.daddr || |
204 | ipv4_is_multicast(local))) { | 233 | !ipv4_is_multicast(local))) || |
205 | if (t->parms.i_key == key && t->dev->flags & IFF_UP) { | 234 | key != t->parms.i_key || |
206 | if (t->dev->type == dev_type) | 235 | !(t->dev->flags & IFF_UP)) |
207 | return t; | 236 | continue; |
208 | if (t->dev->type == ARPHRD_IPGRE && !t2) | 237 | |
209 | t2 = t; | 238 | if (t->dev->type != ARPHRD_IPGRE && |
210 | } | 239 | t->dev->type != dev_type) |
240 | continue; | ||
241 | |||
242 | score = 0; | ||
243 | if (t->parms.link != link) | ||
244 | score |= 1; | ||
245 | if (t->dev->type != dev_type) | ||
246 | score |= 2; | ||
247 | if (score == 0) | ||
248 | return t; | ||
249 | |||
250 | if (score < cand_score) { | ||
251 | cand = t; | ||
252 | cand_score = score; | ||
211 | } | 253 | } |
212 | } | 254 | } |
213 | 255 | ||
214 | for (t = ign->tunnels_wc[h1]; t; t = t->next) { | 256 | for (t = ign->tunnels_wc[h1]; t; t = t->next) { |
215 | if (t->parms.i_key == key && t->dev->flags & IFF_UP) { | 257 | if (t->parms.i_key != key || |
216 | if (t->dev->type == dev_type) | 258 | !(t->dev->flags & IFF_UP)) |
217 | return t; | 259 | continue; |
218 | if (t->dev->type == ARPHRD_IPGRE && !t2) | 260 | |
219 | t2 = t; | 261 | if (t->dev->type != ARPHRD_IPGRE && |
262 | t->dev->type != dev_type) | ||
263 | continue; | ||
264 | |||
265 | score = 0; | ||
266 | if (t->parms.link != link) | ||
267 | score |= 1; | ||
268 | if (t->dev->type != dev_type) | ||
269 | score |= 2; | ||
270 | if (score == 0) | ||
271 | return t; | ||
272 | |||
273 | if (score < cand_score) { | ||
274 | cand = t; | ||
275 | cand_score = score; | ||
220 | } | 276 | } |
221 | } | 277 | } |
222 | 278 | ||
223 | if (t2) | 279 | if (cand != NULL) |
224 | return t2; | 280 | return cand; |
225 | 281 | ||
226 | if (ign->fb_tunnel_dev->flags&IFF_UP) | 282 | if (ign->fb_tunnel_dev->flags & IFF_UP) |
227 | return netdev_priv(ign->fb_tunnel_dev); | 283 | return netdev_priv(ign->fb_tunnel_dev); |
284 | |||
228 | return NULL; | 285 | return NULL; |
229 | } | 286 | } |
230 | 287 | ||
@@ -284,6 +341,7 @@ static struct ip_tunnel *ipgre_tunnel_find(struct net *net, | |||
284 | __be32 remote = parms->iph.daddr; | 341 | __be32 remote = parms->iph.daddr; |
285 | __be32 local = parms->iph.saddr; | 342 | __be32 local = parms->iph.saddr; |
286 | __be32 key = parms->i_key; | 343 | __be32 key = parms->i_key; |
344 | int link = parms->link; | ||
287 | struct ip_tunnel *t, **tp; | 345 | struct ip_tunnel *t, **tp; |
288 | struct ipgre_net *ign = net_generic(net, ipgre_net_id); | 346 | struct ipgre_net *ign = net_generic(net, ipgre_net_id); |
289 | 347 | ||
@@ -291,6 +349,7 @@ static struct ip_tunnel *ipgre_tunnel_find(struct net *net, | |||
291 | if (local == t->parms.iph.saddr && | 349 | if (local == t->parms.iph.saddr && |
292 | remote == t->parms.iph.daddr && | 350 | remote == t->parms.iph.daddr && |
293 | key == t->parms.i_key && | 351 | key == t->parms.i_key && |
352 | link == t->parms.link && | ||
294 | type == t->dev->type) | 353 | type == t->dev->type) |
295 | break; | 354 | break; |
296 | 355 | ||
@@ -421,7 +480,7 @@ static void ipgre_err(struct sk_buff *skb, u32 info) | |||
421 | } | 480 | } |
422 | 481 | ||
423 | read_lock(&ipgre_lock); | 482 | read_lock(&ipgre_lock); |
424 | t = ipgre_tunnel_lookup(dev_net(skb->dev), iph->daddr, iph->saddr, | 483 | t = ipgre_tunnel_lookup(skb->dev, iph->daddr, iph->saddr, |
425 | flags & GRE_KEY ? | 484 | flags & GRE_KEY ? |
426 | *(((__be32 *)p) + (grehlen / 4) - 1) : 0, | 485 | *(((__be32 *)p) + (grehlen / 4) - 1) : 0, |
427 | p[1]); | 486 | p[1]); |
@@ -518,7 +577,7 @@ static int ipgre_rcv(struct sk_buff *skb) | |||
518 | gre_proto = *(__be16 *)(h + 2); | 577 | gre_proto = *(__be16 *)(h + 2); |
519 | 578 | ||
520 | read_lock(&ipgre_lock); | 579 | read_lock(&ipgre_lock); |
521 | if ((tunnel = ipgre_tunnel_lookup(dev_net(skb->dev), | 580 | if ((tunnel = ipgre_tunnel_lookup(skb->dev, |
522 | iph->saddr, iph->daddr, key, | 581 | iph->saddr, iph->daddr, key, |
523 | gre_proto))) { | 582 | gre_proto))) { |
524 | struct net_device_stats *stats = &tunnel->dev->stats; | 583 | struct net_device_stats *stats = &tunnel->dev->stats; |
diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c index d722013c1cae..90d22ae0a419 100644 --- a/net/ipv4/ipconfig.c +++ b/net/ipv4/ipconfig.c | |||
@@ -100,8 +100,8 @@ | |||
100 | #define CONF_NAMESERVERS_MAX 3 /* Maximum number of nameservers | 100 | #define CONF_NAMESERVERS_MAX 3 /* Maximum number of nameservers |
101 | - '3' from resolv.h */ | 101 | - '3' from resolv.h */ |
102 | 102 | ||
103 | #define NONE __constant_htonl(INADDR_NONE) | 103 | #define NONE cpu_to_be32(INADDR_NONE) |
104 | #define ANY __constant_htonl(INADDR_ANY) | 104 | #define ANY cpu_to_be32(INADDR_ANY) |
105 | 105 | ||
106 | /* | 106 | /* |
107 | * Public IP configuration | 107 | * Public IP configuration |
@@ -406,7 +406,7 @@ static int __init ic_defaults(void) | |||
406 | static int ic_rarp_recv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev); | 406 | static int ic_rarp_recv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev); |
407 | 407 | ||
408 | static struct packet_type rarp_packet_type __initdata = { | 408 | static struct packet_type rarp_packet_type __initdata = { |
409 | .type = __constant_htons(ETH_P_RARP), | 409 | .type = cpu_to_be16(ETH_P_RARP), |
410 | .func = ic_rarp_recv, | 410 | .func = ic_rarp_recv, |
411 | }; | 411 | }; |
412 | 412 | ||
@@ -568,7 +568,7 @@ struct bootp_pkt { /* BOOTP packet format */ | |||
568 | static int ic_bootp_recv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev); | 568 | static int ic_bootp_recv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev); |
569 | 569 | ||
570 | static struct packet_type bootp_packet_type __initdata = { | 570 | static struct packet_type bootp_packet_type __initdata = { |
571 | .type = __constant_htons(ETH_P_IP), | 571 | .type = cpu_to_be16(ETH_P_IP), |
572 | .func = ic_bootp_recv, | 572 | .func = ic_bootp_recv, |
573 | }; | 573 | }; |
574 | 574 | ||
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c index 14666449dc1c..21a6dc710f20 100644 --- a/net/ipv4/ipmr.c +++ b/net/ipv4/ipmr.c | |||
@@ -67,9 +67,6 @@ | |||
67 | #define CONFIG_IP_PIMSM 1 | 67 | #define CONFIG_IP_PIMSM 1 |
68 | #endif | 68 | #endif |
69 | 69 | ||
70 | static struct sock *mroute_socket; | ||
71 | |||
72 | |||
73 | /* Big lock, protecting vif table, mrt cache and mroute socket state. | 70 | /* Big lock, protecting vif table, mrt cache and mroute socket state. |
74 | Note that the changes are semaphored via rtnl_lock. | 71 | Note that the changes are semaphored via rtnl_lock. |
75 | */ | 72 | */ |
@@ -80,18 +77,9 @@ static DEFINE_RWLOCK(mrt_lock); | |||
80 | * Multicast router control variables | 77 | * Multicast router control variables |
81 | */ | 78 | */ |
82 | 79 | ||
83 | static struct vif_device vif_table[MAXVIFS]; /* Devices */ | 80 | #define VIF_EXISTS(_net, _idx) ((_net)->ipv4.vif_table[_idx].dev != NULL) |
84 | static int maxvif; | ||
85 | |||
86 | #define VIF_EXISTS(idx) (vif_table[idx].dev != NULL) | ||
87 | |||
88 | static int mroute_do_assert; /* Set in PIM assert */ | ||
89 | static int mroute_do_pim; | ||
90 | |||
91 | static struct mfc_cache *mfc_cache_array[MFC_LINES]; /* Forwarding cache */ | ||
92 | 81 | ||
93 | static struct mfc_cache *mfc_unres_queue; /* Queue of unresolved entries */ | 82 | static struct mfc_cache *mfc_unres_queue; /* Queue of unresolved entries */ |
94 | static atomic_t cache_resolve_queue_len; /* Size of unresolved */ | ||
95 | 83 | ||
96 | /* Special spinlock for queue of unresolved entries */ | 84 | /* Special spinlock for queue of unresolved entries */ |
97 | static DEFINE_SPINLOCK(mfc_unres_lock); | 85 | static DEFINE_SPINLOCK(mfc_unres_lock); |
@@ -107,7 +95,8 @@ static DEFINE_SPINLOCK(mfc_unres_lock); | |||
107 | static struct kmem_cache *mrt_cachep __read_mostly; | 95 | static struct kmem_cache *mrt_cachep __read_mostly; |
108 | 96 | ||
109 | static int ip_mr_forward(struct sk_buff *skb, struct mfc_cache *cache, int local); | 97 | static int ip_mr_forward(struct sk_buff *skb, struct mfc_cache *cache, int local); |
110 | static int ipmr_cache_report(struct sk_buff *pkt, vifi_t vifi, int assert); | 98 | static int ipmr_cache_report(struct net *net, |
99 | struct sk_buff *pkt, vifi_t vifi, int assert); | ||
111 | static int ipmr_fill_mroute(struct sk_buff *skb, struct mfc_cache *c, struct rtmsg *rtm); | 100 | static int ipmr_fill_mroute(struct sk_buff *skb, struct mfc_cache *c, struct rtmsg *rtm); |
112 | 101 | ||
113 | #ifdef CONFIG_IP_PIMSM_V2 | 102 | #ifdef CONFIG_IP_PIMSM_V2 |
@@ -120,9 +109,11 @@ static struct timer_list ipmr_expire_timer; | |||
120 | 109 | ||
121 | static void ipmr_del_tunnel(struct net_device *dev, struct vifctl *v) | 110 | static void ipmr_del_tunnel(struct net_device *dev, struct vifctl *v) |
122 | { | 111 | { |
112 | struct net *net = dev_net(dev); | ||
113 | |||
123 | dev_close(dev); | 114 | dev_close(dev); |
124 | 115 | ||
125 | dev = __dev_get_by_name(&init_net, "tunl0"); | 116 | dev = __dev_get_by_name(net, "tunl0"); |
126 | if (dev) { | 117 | if (dev) { |
127 | const struct net_device_ops *ops = dev->netdev_ops; | 118 | const struct net_device_ops *ops = dev->netdev_ops; |
128 | struct ifreq ifr; | 119 | struct ifreq ifr; |
@@ -148,11 +139,11 @@ static void ipmr_del_tunnel(struct net_device *dev, struct vifctl *v) | |||
148 | } | 139 | } |
149 | 140 | ||
150 | static | 141 | static |
151 | struct net_device *ipmr_new_tunnel(struct vifctl *v) | 142 | struct net_device *ipmr_new_tunnel(struct net *net, struct vifctl *v) |
152 | { | 143 | { |
153 | struct net_device *dev; | 144 | struct net_device *dev; |
154 | 145 | ||
155 | dev = __dev_get_by_name(&init_net, "tunl0"); | 146 | dev = __dev_get_by_name(net, "tunl0"); |
156 | 147 | ||
157 | if (dev) { | 148 | if (dev) { |
158 | const struct net_device_ops *ops = dev->netdev_ops; | 149 | const struct net_device_ops *ops = dev->netdev_ops; |
@@ -181,7 +172,8 @@ struct net_device *ipmr_new_tunnel(struct vifctl *v) | |||
181 | 172 | ||
182 | dev = NULL; | 173 | dev = NULL; |
183 | 174 | ||
184 | if (err == 0 && (dev = __dev_get_by_name(&init_net, p.name)) != NULL) { | 175 | if (err == 0 && |
176 | (dev = __dev_get_by_name(net, p.name)) != NULL) { | ||
185 | dev->flags |= IFF_MULTICAST; | 177 | dev->flags |= IFF_MULTICAST; |
186 | 178 | ||
187 | in_dev = __in_dev_get_rtnl(dev); | 179 | in_dev = __in_dev_get_rtnl(dev); |
@@ -209,14 +201,15 @@ failure: | |||
209 | 201 | ||
210 | #ifdef CONFIG_IP_PIMSM | 202 | #ifdef CONFIG_IP_PIMSM |
211 | 203 | ||
212 | static int reg_vif_num = -1; | ||
213 | |||
214 | static int reg_vif_xmit(struct sk_buff *skb, struct net_device *dev) | 204 | static int reg_vif_xmit(struct sk_buff *skb, struct net_device *dev) |
215 | { | 205 | { |
206 | struct net *net = dev_net(dev); | ||
207 | |||
216 | read_lock(&mrt_lock); | 208 | read_lock(&mrt_lock); |
217 | dev->stats.tx_bytes += skb->len; | 209 | dev->stats.tx_bytes += skb->len; |
218 | dev->stats.tx_packets++; | 210 | dev->stats.tx_packets++; |
219 | ipmr_cache_report(skb, reg_vif_num, IGMPMSG_WHOLEPKT); | 211 | ipmr_cache_report(net, skb, net->ipv4.mroute_reg_vif_num, |
212 | IGMPMSG_WHOLEPKT); | ||
220 | read_unlock(&mrt_lock); | 213 | read_unlock(&mrt_lock); |
221 | kfree_skb(skb); | 214 | kfree_skb(skb); |
222 | return 0; | 215 | return 0; |
@@ -283,16 +276,16 @@ failure: | |||
283 | * @notify: Set to 1, if the caller is a notifier_call | 276 | * @notify: Set to 1, if the caller is a notifier_call |
284 | */ | 277 | */ |
285 | 278 | ||
286 | static int vif_delete(int vifi, int notify) | 279 | static int vif_delete(struct net *net, int vifi, int notify) |
287 | { | 280 | { |
288 | struct vif_device *v; | 281 | struct vif_device *v; |
289 | struct net_device *dev; | 282 | struct net_device *dev; |
290 | struct in_device *in_dev; | 283 | struct in_device *in_dev; |
291 | 284 | ||
292 | if (vifi < 0 || vifi >= maxvif) | 285 | if (vifi < 0 || vifi >= net->ipv4.maxvif) |
293 | return -EADDRNOTAVAIL; | 286 | return -EADDRNOTAVAIL; |
294 | 287 | ||
295 | v = &vif_table[vifi]; | 288 | v = &net->ipv4.vif_table[vifi]; |
296 | 289 | ||
297 | write_lock_bh(&mrt_lock); | 290 | write_lock_bh(&mrt_lock); |
298 | dev = v->dev; | 291 | dev = v->dev; |
@@ -304,17 +297,17 @@ static int vif_delete(int vifi, int notify) | |||
304 | } | 297 | } |
305 | 298 | ||
306 | #ifdef CONFIG_IP_PIMSM | 299 | #ifdef CONFIG_IP_PIMSM |
307 | if (vifi == reg_vif_num) | 300 | if (vifi == net->ipv4.mroute_reg_vif_num) |
308 | reg_vif_num = -1; | 301 | net->ipv4.mroute_reg_vif_num = -1; |
309 | #endif | 302 | #endif |
310 | 303 | ||
311 | if (vifi+1 == maxvif) { | 304 | if (vifi+1 == net->ipv4.maxvif) { |
312 | int tmp; | 305 | int tmp; |
313 | for (tmp=vifi-1; tmp>=0; tmp--) { | 306 | for (tmp=vifi-1; tmp>=0; tmp--) { |
314 | if (VIF_EXISTS(tmp)) | 307 | if (VIF_EXISTS(net, tmp)) |
315 | break; | 308 | break; |
316 | } | 309 | } |
317 | maxvif = tmp+1; | 310 | net->ipv4.maxvif = tmp+1; |
318 | } | 311 | } |
319 | 312 | ||
320 | write_unlock_bh(&mrt_lock); | 313 | write_unlock_bh(&mrt_lock); |
@@ -333,6 +326,12 @@ static int vif_delete(int vifi, int notify) | |||
333 | return 0; | 326 | return 0; |
334 | } | 327 | } |
335 | 328 | ||
329 | static inline void ipmr_cache_free(struct mfc_cache *c) | ||
330 | { | ||
331 | release_net(mfc_net(c)); | ||
332 | kmem_cache_free(mrt_cachep, c); | ||
333 | } | ||
334 | |||
336 | /* Destroy an unresolved cache entry, killing queued skbs | 335 | /* Destroy an unresolved cache entry, killing queued skbs |
337 | and reporting error to netlink readers. | 336 | and reporting error to netlink readers. |
338 | */ | 337 | */ |
@@ -341,8 +340,9 @@ static void ipmr_destroy_unres(struct mfc_cache *c) | |||
341 | { | 340 | { |
342 | struct sk_buff *skb; | 341 | struct sk_buff *skb; |
343 | struct nlmsgerr *e; | 342 | struct nlmsgerr *e; |
343 | struct net *net = mfc_net(c); | ||
344 | 344 | ||
345 | atomic_dec(&cache_resolve_queue_len); | 345 | atomic_dec(&net->ipv4.cache_resolve_queue_len); |
346 | 346 | ||
347 | while ((skb = skb_dequeue(&c->mfc_un.unres.unresolved))) { | 347 | while ((skb = skb_dequeue(&c->mfc_un.unres.unresolved))) { |
348 | if (ip_hdr(skb)->version == 0) { | 348 | if (ip_hdr(skb)->version == 0) { |
@@ -354,12 +354,12 @@ static void ipmr_destroy_unres(struct mfc_cache *c) | |||
354 | e->error = -ETIMEDOUT; | 354 | e->error = -ETIMEDOUT; |
355 | memset(&e->msg, 0, sizeof(e->msg)); | 355 | memset(&e->msg, 0, sizeof(e->msg)); |
356 | 356 | ||
357 | rtnl_unicast(skb, &init_net, NETLINK_CB(skb).pid); | 357 | rtnl_unicast(skb, net, NETLINK_CB(skb).pid); |
358 | } else | 358 | } else |
359 | kfree_skb(skb); | 359 | kfree_skb(skb); |
360 | } | 360 | } |
361 | 361 | ||
362 | kmem_cache_free(mrt_cachep, c); | 362 | ipmr_cache_free(c); |
363 | } | 363 | } |
364 | 364 | ||
365 | 365 | ||
@@ -376,7 +376,7 @@ static void ipmr_expire_process(unsigned long dummy) | |||
376 | return; | 376 | return; |
377 | } | 377 | } |
378 | 378 | ||
379 | if (atomic_read(&cache_resolve_queue_len) == 0) | 379 | if (mfc_unres_queue == NULL) |
380 | goto out; | 380 | goto out; |
381 | 381 | ||
382 | now = jiffies; | 382 | now = jiffies; |
@@ -397,7 +397,7 @@ static void ipmr_expire_process(unsigned long dummy) | |||
397 | ipmr_destroy_unres(c); | 397 | ipmr_destroy_unres(c); |
398 | } | 398 | } |
399 | 399 | ||
400 | if (atomic_read(&cache_resolve_queue_len)) | 400 | if (mfc_unres_queue != NULL) |
401 | mod_timer(&ipmr_expire_timer, jiffies + expires); | 401 | mod_timer(&ipmr_expire_timer, jiffies + expires); |
402 | 402 | ||
403 | out: | 403 | out: |
@@ -409,13 +409,15 @@ out: | |||
409 | static void ipmr_update_thresholds(struct mfc_cache *cache, unsigned char *ttls) | 409 | static void ipmr_update_thresholds(struct mfc_cache *cache, unsigned char *ttls) |
410 | { | 410 | { |
411 | int vifi; | 411 | int vifi; |
412 | struct net *net = mfc_net(cache); | ||
412 | 413 | ||
413 | cache->mfc_un.res.minvif = MAXVIFS; | 414 | cache->mfc_un.res.minvif = MAXVIFS; |
414 | cache->mfc_un.res.maxvif = 0; | 415 | cache->mfc_un.res.maxvif = 0; |
415 | memset(cache->mfc_un.res.ttls, 255, MAXVIFS); | 416 | memset(cache->mfc_un.res.ttls, 255, MAXVIFS); |
416 | 417 | ||
417 | for (vifi=0; vifi<maxvif; vifi++) { | 418 | for (vifi = 0; vifi < net->ipv4.maxvif; vifi++) { |
418 | if (VIF_EXISTS(vifi) && ttls[vifi] && ttls[vifi] < 255) { | 419 | if (VIF_EXISTS(net, vifi) && |
420 | ttls[vifi] && ttls[vifi] < 255) { | ||
419 | cache->mfc_un.res.ttls[vifi] = ttls[vifi]; | 421 | cache->mfc_un.res.ttls[vifi] = ttls[vifi]; |
420 | if (cache->mfc_un.res.minvif > vifi) | 422 | if (cache->mfc_un.res.minvif > vifi) |
421 | cache->mfc_un.res.minvif = vifi; | 423 | cache->mfc_un.res.minvif = vifi; |
@@ -425,16 +427,16 @@ static void ipmr_update_thresholds(struct mfc_cache *cache, unsigned char *ttls) | |||
425 | } | 427 | } |
426 | } | 428 | } |
427 | 429 | ||
428 | static int vif_add(struct vifctl *vifc, int mrtsock) | 430 | static int vif_add(struct net *net, struct vifctl *vifc, int mrtsock) |
429 | { | 431 | { |
430 | int vifi = vifc->vifc_vifi; | 432 | int vifi = vifc->vifc_vifi; |
431 | struct vif_device *v = &vif_table[vifi]; | 433 | struct vif_device *v = &net->ipv4.vif_table[vifi]; |
432 | struct net_device *dev; | 434 | struct net_device *dev; |
433 | struct in_device *in_dev; | 435 | struct in_device *in_dev; |
434 | int err; | 436 | int err; |
435 | 437 | ||
436 | /* Is vif busy ? */ | 438 | /* Is vif busy ? */ |
437 | if (VIF_EXISTS(vifi)) | 439 | if (VIF_EXISTS(net, vifi)) |
438 | return -EADDRINUSE; | 440 | return -EADDRINUSE; |
439 | 441 | ||
440 | switch (vifc->vifc_flags) { | 442 | switch (vifc->vifc_flags) { |
@@ -444,7 +446,7 @@ static int vif_add(struct vifctl *vifc, int mrtsock) | |||
444 | * Special Purpose VIF in PIM | 446 | * Special Purpose VIF in PIM |
445 | * All the packets will be sent to the daemon | 447 | * All the packets will be sent to the daemon |
446 | */ | 448 | */ |
447 | if (reg_vif_num >= 0) | 449 | if (net->ipv4.mroute_reg_vif_num >= 0) |
448 | return -EADDRINUSE; | 450 | return -EADDRINUSE; |
449 | dev = ipmr_reg_vif(); | 451 | dev = ipmr_reg_vif(); |
450 | if (!dev) | 452 | if (!dev) |
@@ -458,7 +460,7 @@ static int vif_add(struct vifctl *vifc, int mrtsock) | |||
458 | break; | 460 | break; |
459 | #endif | 461 | #endif |
460 | case VIFF_TUNNEL: | 462 | case VIFF_TUNNEL: |
461 | dev = ipmr_new_tunnel(vifc); | 463 | dev = ipmr_new_tunnel(net, vifc); |
462 | if (!dev) | 464 | if (!dev) |
463 | return -ENOBUFS; | 465 | return -ENOBUFS; |
464 | err = dev_set_allmulti(dev, 1); | 466 | err = dev_set_allmulti(dev, 1); |
@@ -469,7 +471,7 @@ static int vif_add(struct vifctl *vifc, int mrtsock) | |||
469 | } | 471 | } |
470 | break; | 472 | break; |
471 | case 0: | 473 | case 0: |
472 | dev = ip_dev_find(&init_net, vifc->vifc_lcl_addr.s_addr); | 474 | dev = ip_dev_find(net, vifc->vifc_lcl_addr.s_addr); |
473 | if (!dev) | 475 | if (!dev) |
474 | return -EADDRNOTAVAIL; | 476 | return -EADDRNOTAVAIL; |
475 | err = dev_set_allmulti(dev, 1); | 477 | err = dev_set_allmulti(dev, 1); |
@@ -510,20 +512,22 @@ static int vif_add(struct vifctl *vifc, int mrtsock) | |||
510 | v->dev = dev; | 512 | v->dev = dev; |
511 | #ifdef CONFIG_IP_PIMSM | 513 | #ifdef CONFIG_IP_PIMSM |
512 | if (v->flags&VIFF_REGISTER) | 514 | if (v->flags&VIFF_REGISTER) |
513 | reg_vif_num = vifi; | 515 | net->ipv4.mroute_reg_vif_num = vifi; |
514 | #endif | 516 | #endif |
515 | if (vifi+1 > maxvif) | 517 | if (vifi+1 > net->ipv4.maxvif) |
516 | maxvif = vifi+1; | 518 | net->ipv4.maxvif = vifi+1; |
517 | write_unlock_bh(&mrt_lock); | 519 | write_unlock_bh(&mrt_lock); |
518 | return 0; | 520 | return 0; |
519 | } | 521 | } |
520 | 522 | ||
521 | static struct mfc_cache *ipmr_cache_find(__be32 origin, __be32 mcastgrp) | 523 | static struct mfc_cache *ipmr_cache_find(struct net *net, |
524 | __be32 origin, | ||
525 | __be32 mcastgrp) | ||
522 | { | 526 | { |
523 | int line = MFC_HASH(mcastgrp, origin); | 527 | int line = MFC_HASH(mcastgrp, origin); |
524 | struct mfc_cache *c; | 528 | struct mfc_cache *c; |
525 | 529 | ||
526 | for (c=mfc_cache_array[line]; c; c = c->next) { | 530 | for (c = net->ipv4.mfc_cache_array[line]; c; c = c->next) { |
527 | if (c->mfc_origin==origin && c->mfc_mcastgrp==mcastgrp) | 531 | if (c->mfc_origin==origin && c->mfc_mcastgrp==mcastgrp) |
528 | break; | 532 | break; |
529 | } | 533 | } |
@@ -533,22 +537,24 @@ static struct mfc_cache *ipmr_cache_find(__be32 origin, __be32 mcastgrp) | |||
533 | /* | 537 | /* |
534 | * Allocate a multicast cache entry | 538 | * Allocate a multicast cache entry |
535 | */ | 539 | */ |
536 | static struct mfc_cache *ipmr_cache_alloc(void) | 540 | static struct mfc_cache *ipmr_cache_alloc(struct net *net) |
537 | { | 541 | { |
538 | struct mfc_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_KERNEL); | 542 | struct mfc_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_KERNEL); |
539 | if (c == NULL) | 543 | if (c == NULL) |
540 | return NULL; | 544 | return NULL; |
541 | c->mfc_un.res.minvif = MAXVIFS; | 545 | c->mfc_un.res.minvif = MAXVIFS; |
546 | mfc_net_set(c, net); | ||
542 | return c; | 547 | return c; |
543 | } | 548 | } |
544 | 549 | ||
545 | static struct mfc_cache *ipmr_cache_alloc_unres(void) | 550 | static struct mfc_cache *ipmr_cache_alloc_unres(struct net *net) |
546 | { | 551 | { |
547 | struct mfc_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_ATOMIC); | 552 | struct mfc_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_ATOMIC); |
548 | if (c == NULL) | 553 | if (c == NULL) |
549 | return NULL; | 554 | return NULL; |
550 | skb_queue_head_init(&c->mfc_un.unres.unresolved); | 555 | skb_queue_head_init(&c->mfc_un.unres.unresolved); |
551 | c->mfc_un.unres.expires = jiffies + 10*HZ; | 556 | c->mfc_un.unres.expires = jiffies + 10*HZ; |
557 | mfc_net_set(c, net); | ||
552 | return c; | 558 | return c; |
553 | } | 559 | } |
554 | 560 | ||
@@ -581,7 +587,7 @@ static void ipmr_cache_resolve(struct mfc_cache *uc, struct mfc_cache *c) | |||
581 | memset(&e->msg, 0, sizeof(e->msg)); | 587 | memset(&e->msg, 0, sizeof(e->msg)); |
582 | } | 588 | } |
583 | 589 | ||
584 | rtnl_unicast(skb, &init_net, NETLINK_CB(skb).pid); | 590 | rtnl_unicast(skb, mfc_net(c), NETLINK_CB(skb).pid); |
585 | } else | 591 | } else |
586 | ip_mr_forward(skb, c, 0); | 592 | ip_mr_forward(skb, c, 0); |
587 | } | 593 | } |
@@ -594,7 +600,8 @@ static void ipmr_cache_resolve(struct mfc_cache *uc, struct mfc_cache *c) | |||
594 | * Called under mrt_lock. | 600 | * Called under mrt_lock. |
595 | */ | 601 | */ |
596 | 602 | ||
597 | static int ipmr_cache_report(struct sk_buff *pkt, vifi_t vifi, int assert) | 603 | static int ipmr_cache_report(struct net *net, |
604 | struct sk_buff *pkt, vifi_t vifi, int assert) | ||
598 | { | 605 | { |
599 | struct sk_buff *skb; | 606 | struct sk_buff *skb; |
600 | const int ihl = ip_hdrlen(pkt); | 607 | const int ihl = ip_hdrlen(pkt); |
@@ -626,7 +633,7 @@ static int ipmr_cache_report(struct sk_buff *pkt, vifi_t vifi, int assert) | |||
626 | memcpy(msg, skb_network_header(pkt), sizeof(struct iphdr)); | 633 | memcpy(msg, skb_network_header(pkt), sizeof(struct iphdr)); |
627 | msg->im_msgtype = IGMPMSG_WHOLEPKT; | 634 | msg->im_msgtype = IGMPMSG_WHOLEPKT; |
628 | msg->im_mbz = 0; | 635 | msg->im_mbz = 0; |
629 | msg->im_vif = reg_vif_num; | 636 | msg->im_vif = net->ipv4.mroute_reg_vif_num; |
630 | ip_hdr(skb)->ihl = sizeof(struct iphdr) >> 2; | 637 | ip_hdr(skb)->ihl = sizeof(struct iphdr) >> 2; |
631 | ip_hdr(skb)->tot_len = htons(ntohs(ip_hdr(pkt)->tot_len) + | 638 | ip_hdr(skb)->tot_len = htons(ntohs(ip_hdr(pkt)->tot_len) + |
632 | sizeof(struct iphdr)); | 639 | sizeof(struct iphdr)); |
@@ -658,7 +665,7 @@ static int ipmr_cache_report(struct sk_buff *pkt, vifi_t vifi, int assert) | |||
658 | skb->transport_header = skb->network_header; | 665 | skb->transport_header = skb->network_header; |
659 | } | 666 | } |
660 | 667 | ||
661 | if (mroute_socket == NULL) { | 668 | if (net->ipv4.mroute_sk == NULL) { |
662 | kfree_skb(skb); | 669 | kfree_skb(skb); |
663 | return -EINVAL; | 670 | return -EINVAL; |
664 | } | 671 | } |
@@ -666,7 +673,8 @@ static int ipmr_cache_report(struct sk_buff *pkt, vifi_t vifi, int assert) | |||
666 | /* | 673 | /* |
667 | * Deliver to mrouted | 674 | * Deliver to mrouted |
668 | */ | 675 | */ |
669 | if ((ret = sock_queue_rcv_skb(mroute_socket, skb))<0) { | 676 | ret = sock_queue_rcv_skb(net->ipv4.mroute_sk, skb); |
677 | if (ret < 0) { | ||
670 | if (net_ratelimit()) | 678 | if (net_ratelimit()) |
671 | printk(KERN_WARNING "mroute: pending queue full, dropping entries.\n"); | 679 | printk(KERN_WARNING "mroute: pending queue full, dropping entries.\n"); |
672 | kfree_skb(skb); | 680 | kfree_skb(skb); |
@@ -680,7 +688,7 @@ static int ipmr_cache_report(struct sk_buff *pkt, vifi_t vifi, int assert) | |||
680 | */ | 688 | */ |
681 | 689 | ||
682 | static int | 690 | static int |
683 | ipmr_cache_unresolved(vifi_t vifi, struct sk_buff *skb) | 691 | ipmr_cache_unresolved(struct net *net, vifi_t vifi, struct sk_buff *skb) |
684 | { | 692 | { |
685 | int err; | 693 | int err; |
686 | struct mfc_cache *c; | 694 | struct mfc_cache *c; |
@@ -688,7 +696,8 @@ ipmr_cache_unresolved(vifi_t vifi, struct sk_buff *skb) | |||
688 | 696 | ||
689 | spin_lock_bh(&mfc_unres_lock); | 697 | spin_lock_bh(&mfc_unres_lock); |
690 | for (c=mfc_unres_queue; c; c=c->next) { | 698 | for (c=mfc_unres_queue; c; c=c->next) { |
691 | if (c->mfc_mcastgrp == iph->daddr && | 699 | if (net_eq(mfc_net(c), net) && |
700 | c->mfc_mcastgrp == iph->daddr && | ||
692 | c->mfc_origin == iph->saddr) | 701 | c->mfc_origin == iph->saddr) |
693 | break; | 702 | break; |
694 | } | 703 | } |
@@ -698,8 +707,8 @@ ipmr_cache_unresolved(vifi_t vifi, struct sk_buff *skb) | |||
698 | * Create a new entry if allowable | 707 | * Create a new entry if allowable |
699 | */ | 708 | */ |
700 | 709 | ||
701 | if (atomic_read(&cache_resolve_queue_len) >= 10 || | 710 | if (atomic_read(&net->ipv4.cache_resolve_queue_len) >= 10 || |
702 | (c=ipmr_cache_alloc_unres())==NULL) { | 711 | (c = ipmr_cache_alloc_unres(net)) == NULL) { |
703 | spin_unlock_bh(&mfc_unres_lock); | 712 | spin_unlock_bh(&mfc_unres_lock); |
704 | 713 | ||
705 | kfree_skb(skb); | 714 | kfree_skb(skb); |
@@ -716,18 +725,19 @@ ipmr_cache_unresolved(vifi_t vifi, struct sk_buff *skb) | |||
716 | /* | 725 | /* |
717 | * Reflect first query at mrouted. | 726 | * Reflect first query at mrouted. |
718 | */ | 727 | */ |
719 | if ((err = ipmr_cache_report(skb, vifi, IGMPMSG_NOCACHE))<0) { | 728 | err = ipmr_cache_report(net, skb, vifi, IGMPMSG_NOCACHE); |
729 | if (err < 0) { | ||
720 | /* If the report failed throw the cache entry | 730 | /* If the report failed throw the cache entry |
721 | out - Brad Parker | 731 | out - Brad Parker |
722 | */ | 732 | */ |
723 | spin_unlock_bh(&mfc_unres_lock); | 733 | spin_unlock_bh(&mfc_unres_lock); |
724 | 734 | ||
725 | kmem_cache_free(mrt_cachep, c); | 735 | ipmr_cache_free(c); |
726 | kfree_skb(skb); | 736 | kfree_skb(skb); |
727 | return err; | 737 | return err; |
728 | } | 738 | } |
729 | 739 | ||
730 | atomic_inc(&cache_resolve_queue_len); | 740 | atomic_inc(&net->ipv4.cache_resolve_queue_len); |
731 | c->next = mfc_unres_queue; | 741 | c->next = mfc_unres_queue; |
732 | mfc_unres_queue = c; | 742 | mfc_unres_queue = c; |
733 | 743 | ||
@@ -753,35 +763,37 @@ ipmr_cache_unresolved(vifi_t vifi, struct sk_buff *skb) | |||
753 | * MFC cache manipulation by user space mroute daemon | 763 | * MFC cache manipulation by user space mroute daemon |
754 | */ | 764 | */ |
755 | 765 | ||
756 | static int ipmr_mfc_delete(struct mfcctl *mfc) | 766 | static int ipmr_mfc_delete(struct net *net, struct mfcctl *mfc) |
757 | { | 767 | { |
758 | int line; | 768 | int line; |
759 | struct mfc_cache *c, **cp; | 769 | struct mfc_cache *c, **cp; |
760 | 770 | ||
761 | line = MFC_HASH(mfc->mfcc_mcastgrp.s_addr, mfc->mfcc_origin.s_addr); | 771 | line = MFC_HASH(mfc->mfcc_mcastgrp.s_addr, mfc->mfcc_origin.s_addr); |
762 | 772 | ||
763 | for (cp=&mfc_cache_array[line]; (c=*cp) != NULL; cp = &c->next) { | 773 | for (cp = &net->ipv4.mfc_cache_array[line]; |
774 | (c = *cp) != NULL; cp = &c->next) { | ||
764 | if (c->mfc_origin == mfc->mfcc_origin.s_addr && | 775 | if (c->mfc_origin == mfc->mfcc_origin.s_addr && |
765 | c->mfc_mcastgrp == mfc->mfcc_mcastgrp.s_addr) { | 776 | c->mfc_mcastgrp == mfc->mfcc_mcastgrp.s_addr) { |
766 | write_lock_bh(&mrt_lock); | 777 | write_lock_bh(&mrt_lock); |
767 | *cp = c->next; | 778 | *cp = c->next; |
768 | write_unlock_bh(&mrt_lock); | 779 | write_unlock_bh(&mrt_lock); |
769 | 780 | ||
770 | kmem_cache_free(mrt_cachep, c); | 781 | ipmr_cache_free(c); |
771 | return 0; | 782 | return 0; |
772 | } | 783 | } |
773 | } | 784 | } |
774 | return -ENOENT; | 785 | return -ENOENT; |
775 | } | 786 | } |
776 | 787 | ||
777 | static int ipmr_mfc_add(struct mfcctl *mfc, int mrtsock) | 788 | static int ipmr_mfc_add(struct net *net, struct mfcctl *mfc, int mrtsock) |
778 | { | 789 | { |
779 | int line; | 790 | int line; |
780 | struct mfc_cache *uc, *c, **cp; | 791 | struct mfc_cache *uc, *c, **cp; |
781 | 792 | ||
782 | line = MFC_HASH(mfc->mfcc_mcastgrp.s_addr, mfc->mfcc_origin.s_addr); | 793 | line = MFC_HASH(mfc->mfcc_mcastgrp.s_addr, mfc->mfcc_origin.s_addr); |
783 | 794 | ||
784 | for (cp=&mfc_cache_array[line]; (c=*cp) != NULL; cp = &c->next) { | 795 | for (cp = &net->ipv4.mfc_cache_array[line]; |
796 | (c = *cp) != NULL; cp = &c->next) { | ||
785 | if (c->mfc_origin == mfc->mfcc_origin.s_addr && | 797 | if (c->mfc_origin == mfc->mfcc_origin.s_addr && |
786 | c->mfc_mcastgrp == mfc->mfcc_mcastgrp.s_addr) | 798 | c->mfc_mcastgrp == mfc->mfcc_mcastgrp.s_addr) |
787 | break; | 799 | break; |
@@ -800,7 +812,7 @@ static int ipmr_mfc_add(struct mfcctl *mfc, int mrtsock) | |||
800 | if (!ipv4_is_multicast(mfc->mfcc_mcastgrp.s_addr)) | 812 | if (!ipv4_is_multicast(mfc->mfcc_mcastgrp.s_addr)) |
801 | return -EINVAL; | 813 | return -EINVAL; |
802 | 814 | ||
803 | c = ipmr_cache_alloc(); | 815 | c = ipmr_cache_alloc(net); |
804 | if (c == NULL) | 816 | if (c == NULL) |
805 | return -ENOMEM; | 817 | return -ENOMEM; |
806 | 818 | ||
@@ -812,8 +824,8 @@ static int ipmr_mfc_add(struct mfcctl *mfc, int mrtsock) | |||
812 | c->mfc_flags |= MFC_STATIC; | 824 | c->mfc_flags |= MFC_STATIC; |
813 | 825 | ||
814 | write_lock_bh(&mrt_lock); | 826 | write_lock_bh(&mrt_lock); |
815 | c->next = mfc_cache_array[line]; | 827 | c->next = net->ipv4.mfc_cache_array[line]; |
816 | mfc_cache_array[line] = c; | 828 | net->ipv4.mfc_cache_array[line] = c; |
817 | write_unlock_bh(&mrt_lock); | 829 | write_unlock_bh(&mrt_lock); |
818 | 830 | ||
819 | /* | 831 | /* |
@@ -823,19 +835,21 @@ static int ipmr_mfc_add(struct mfcctl *mfc, int mrtsock) | |||
823 | spin_lock_bh(&mfc_unres_lock); | 835 | spin_lock_bh(&mfc_unres_lock); |
824 | for (cp = &mfc_unres_queue; (uc=*cp) != NULL; | 836 | for (cp = &mfc_unres_queue; (uc=*cp) != NULL; |
825 | cp = &uc->next) { | 837 | cp = &uc->next) { |
826 | if (uc->mfc_origin == c->mfc_origin && | 838 | if (net_eq(mfc_net(uc), net) && |
839 | uc->mfc_origin == c->mfc_origin && | ||
827 | uc->mfc_mcastgrp == c->mfc_mcastgrp) { | 840 | uc->mfc_mcastgrp == c->mfc_mcastgrp) { |
828 | *cp = uc->next; | 841 | *cp = uc->next; |
829 | if (atomic_dec_and_test(&cache_resolve_queue_len)) | 842 | atomic_dec(&net->ipv4.cache_resolve_queue_len); |
830 | del_timer(&ipmr_expire_timer); | ||
831 | break; | 843 | break; |
832 | } | 844 | } |
833 | } | 845 | } |
846 | if (mfc_unres_queue == NULL) | ||
847 | del_timer(&ipmr_expire_timer); | ||
834 | spin_unlock_bh(&mfc_unres_lock); | 848 | spin_unlock_bh(&mfc_unres_lock); |
835 | 849 | ||
836 | if (uc) { | 850 | if (uc) { |
837 | ipmr_cache_resolve(uc, c); | 851 | ipmr_cache_resolve(uc, c); |
838 | kmem_cache_free(mrt_cachep, uc); | 852 | ipmr_cache_free(uc); |
839 | } | 853 | } |
840 | return 0; | 854 | return 0; |
841 | } | 855 | } |
@@ -844,16 +858,16 @@ static int ipmr_mfc_add(struct mfcctl *mfc, int mrtsock) | |||
844 | * Close the multicast socket, and clear the vif tables etc | 858 | * Close the multicast socket, and clear the vif tables etc |
845 | */ | 859 | */ |
846 | 860 | ||
847 | static void mroute_clean_tables(struct sock *sk) | 861 | static void mroute_clean_tables(struct net *net) |
848 | { | 862 | { |
849 | int i; | 863 | int i; |
850 | 864 | ||
851 | /* | 865 | /* |
852 | * Shut down all active vif entries | 866 | * Shut down all active vif entries |
853 | */ | 867 | */ |
854 | for (i=0; i<maxvif; i++) { | 868 | for (i = 0; i < net->ipv4.maxvif; i++) { |
855 | if (!(vif_table[i].flags&VIFF_STATIC)) | 869 | if (!(net->ipv4.vif_table[i].flags&VIFF_STATIC)) |
856 | vif_delete(i, 0); | 870 | vif_delete(net, i, 0); |
857 | } | 871 | } |
858 | 872 | ||
859 | /* | 873 | /* |
@@ -862,7 +876,7 @@ static void mroute_clean_tables(struct sock *sk) | |||
862 | for (i=0; i<MFC_LINES; i++) { | 876 | for (i=0; i<MFC_LINES; i++) { |
863 | struct mfc_cache *c, **cp; | 877 | struct mfc_cache *c, **cp; |
864 | 878 | ||
865 | cp = &mfc_cache_array[i]; | 879 | cp = &net->ipv4.mfc_cache_array[i]; |
866 | while ((c = *cp) != NULL) { | 880 | while ((c = *cp) != NULL) { |
867 | if (c->mfc_flags&MFC_STATIC) { | 881 | if (c->mfc_flags&MFC_STATIC) { |
868 | cp = &c->next; | 882 | cp = &c->next; |
@@ -872,22 +886,23 @@ static void mroute_clean_tables(struct sock *sk) | |||
872 | *cp = c->next; | 886 | *cp = c->next; |
873 | write_unlock_bh(&mrt_lock); | 887 | write_unlock_bh(&mrt_lock); |
874 | 888 | ||
875 | kmem_cache_free(mrt_cachep, c); | 889 | ipmr_cache_free(c); |
876 | } | 890 | } |
877 | } | 891 | } |
878 | 892 | ||
879 | if (atomic_read(&cache_resolve_queue_len) != 0) { | 893 | if (atomic_read(&net->ipv4.cache_resolve_queue_len) != 0) { |
880 | struct mfc_cache *c; | 894 | struct mfc_cache *c, **cp; |
881 | 895 | ||
882 | spin_lock_bh(&mfc_unres_lock); | 896 | spin_lock_bh(&mfc_unres_lock); |
883 | while (mfc_unres_queue != NULL) { | 897 | cp = &mfc_unres_queue; |
884 | c = mfc_unres_queue; | 898 | while ((c = *cp) != NULL) { |
885 | mfc_unres_queue = c->next; | 899 | if (!net_eq(mfc_net(c), net)) { |
886 | spin_unlock_bh(&mfc_unres_lock); | 900 | cp = &c->next; |
901 | continue; | ||
902 | } | ||
903 | *cp = c->next; | ||
887 | 904 | ||
888 | ipmr_destroy_unres(c); | 905 | ipmr_destroy_unres(c); |
889 | |||
890 | spin_lock_bh(&mfc_unres_lock); | ||
891 | } | 906 | } |
892 | spin_unlock_bh(&mfc_unres_lock); | 907 | spin_unlock_bh(&mfc_unres_lock); |
893 | } | 908 | } |
@@ -895,15 +910,17 @@ static void mroute_clean_tables(struct sock *sk) | |||
895 | 910 | ||
896 | static void mrtsock_destruct(struct sock *sk) | 911 | static void mrtsock_destruct(struct sock *sk) |
897 | { | 912 | { |
913 | struct net *net = sock_net(sk); | ||
914 | |||
898 | rtnl_lock(); | 915 | rtnl_lock(); |
899 | if (sk == mroute_socket) { | 916 | if (sk == net->ipv4.mroute_sk) { |
900 | IPV4_DEVCONF_ALL(sock_net(sk), MC_FORWARDING)--; | 917 | IPV4_DEVCONF_ALL(net, MC_FORWARDING)--; |
901 | 918 | ||
902 | write_lock_bh(&mrt_lock); | 919 | write_lock_bh(&mrt_lock); |
903 | mroute_socket = NULL; | 920 | net->ipv4.mroute_sk = NULL; |
904 | write_unlock_bh(&mrt_lock); | 921 | write_unlock_bh(&mrt_lock); |
905 | 922 | ||
906 | mroute_clean_tables(sk); | 923 | mroute_clean_tables(net); |
907 | } | 924 | } |
908 | rtnl_unlock(); | 925 | rtnl_unlock(); |
909 | } | 926 | } |
@@ -920,9 +937,10 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, int | |||
920 | int ret; | 937 | int ret; |
921 | struct vifctl vif; | 938 | struct vifctl vif; |
922 | struct mfcctl mfc; | 939 | struct mfcctl mfc; |
940 | struct net *net = sock_net(sk); | ||
923 | 941 | ||
924 | if (optname != MRT_INIT) { | 942 | if (optname != MRT_INIT) { |
925 | if (sk != mroute_socket && !capable(CAP_NET_ADMIN)) | 943 | if (sk != net->ipv4.mroute_sk && !capable(CAP_NET_ADMIN)) |
926 | return -EACCES; | 944 | return -EACCES; |
927 | } | 945 | } |
928 | 946 | ||
@@ -935,7 +953,7 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, int | |||
935 | return -ENOPROTOOPT; | 953 | return -ENOPROTOOPT; |
936 | 954 | ||
937 | rtnl_lock(); | 955 | rtnl_lock(); |
938 | if (mroute_socket) { | 956 | if (net->ipv4.mroute_sk) { |
939 | rtnl_unlock(); | 957 | rtnl_unlock(); |
940 | return -EADDRINUSE; | 958 | return -EADDRINUSE; |
941 | } | 959 | } |
@@ -943,15 +961,15 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, int | |||
943 | ret = ip_ra_control(sk, 1, mrtsock_destruct); | 961 | ret = ip_ra_control(sk, 1, mrtsock_destruct); |
944 | if (ret == 0) { | 962 | if (ret == 0) { |
945 | write_lock_bh(&mrt_lock); | 963 | write_lock_bh(&mrt_lock); |
946 | mroute_socket = sk; | 964 | net->ipv4.mroute_sk = sk; |
947 | write_unlock_bh(&mrt_lock); | 965 | write_unlock_bh(&mrt_lock); |
948 | 966 | ||
949 | IPV4_DEVCONF_ALL(sock_net(sk), MC_FORWARDING)++; | 967 | IPV4_DEVCONF_ALL(net, MC_FORWARDING)++; |
950 | } | 968 | } |
951 | rtnl_unlock(); | 969 | rtnl_unlock(); |
952 | return ret; | 970 | return ret; |
953 | case MRT_DONE: | 971 | case MRT_DONE: |
954 | if (sk != mroute_socket) | 972 | if (sk != net->ipv4.mroute_sk) |
955 | return -EACCES; | 973 | return -EACCES; |
956 | return ip_ra_control(sk, 0, NULL); | 974 | return ip_ra_control(sk, 0, NULL); |
957 | case MRT_ADD_VIF: | 975 | case MRT_ADD_VIF: |
@@ -964,9 +982,9 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, int | |||
964 | return -ENFILE; | 982 | return -ENFILE; |
965 | rtnl_lock(); | 983 | rtnl_lock(); |
966 | if (optname == MRT_ADD_VIF) { | 984 | if (optname == MRT_ADD_VIF) { |
967 | ret = vif_add(&vif, sk==mroute_socket); | 985 | ret = vif_add(net, &vif, sk == net->ipv4.mroute_sk); |
968 | } else { | 986 | } else { |
969 | ret = vif_delete(vif.vifc_vifi, 0); | 987 | ret = vif_delete(net, vif.vifc_vifi, 0); |
970 | } | 988 | } |
971 | rtnl_unlock(); | 989 | rtnl_unlock(); |
972 | return ret; | 990 | return ret; |
@@ -983,9 +1001,9 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, int | |||
983 | return -EFAULT; | 1001 | return -EFAULT; |
984 | rtnl_lock(); | 1002 | rtnl_lock(); |
985 | if (optname == MRT_DEL_MFC) | 1003 | if (optname == MRT_DEL_MFC) |
986 | ret = ipmr_mfc_delete(&mfc); | 1004 | ret = ipmr_mfc_delete(net, &mfc); |
987 | else | 1005 | else |
988 | ret = ipmr_mfc_add(&mfc, sk==mroute_socket); | 1006 | ret = ipmr_mfc_add(net, &mfc, sk == net->ipv4.mroute_sk); |
989 | rtnl_unlock(); | 1007 | rtnl_unlock(); |
990 | return ret; | 1008 | return ret; |
991 | /* | 1009 | /* |
@@ -996,7 +1014,7 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, int | |||
996 | int v; | 1014 | int v; |
997 | if (get_user(v,(int __user *)optval)) | 1015 | if (get_user(v,(int __user *)optval)) |
998 | return -EFAULT; | 1016 | return -EFAULT; |
999 | mroute_do_assert=(v)?1:0; | 1017 | net->ipv4.mroute_do_assert = (v) ? 1 : 0; |
1000 | return 0; | 1018 | return 0; |
1001 | } | 1019 | } |
1002 | #ifdef CONFIG_IP_PIMSM | 1020 | #ifdef CONFIG_IP_PIMSM |
@@ -1010,11 +1028,11 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, int | |||
1010 | 1028 | ||
1011 | rtnl_lock(); | 1029 | rtnl_lock(); |
1012 | ret = 0; | 1030 | ret = 0; |
1013 | if (v != mroute_do_pim) { | 1031 | if (v != net->ipv4.mroute_do_pim) { |
1014 | mroute_do_pim = v; | 1032 | net->ipv4.mroute_do_pim = v; |
1015 | mroute_do_assert = v; | 1033 | net->ipv4.mroute_do_assert = v; |
1016 | #ifdef CONFIG_IP_PIMSM_V2 | 1034 | #ifdef CONFIG_IP_PIMSM_V2 |
1017 | if (mroute_do_pim) | 1035 | if (net->ipv4.mroute_do_pim) |
1018 | ret = inet_add_protocol(&pim_protocol, | 1036 | ret = inet_add_protocol(&pim_protocol, |
1019 | IPPROTO_PIM); | 1037 | IPPROTO_PIM); |
1020 | else | 1038 | else |
@@ -1045,6 +1063,7 @@ int ip_mroute_getsockopt(struct sock *sk, int optname, char __user *optval, int | |||
1045 | { | 1063 | { |
1046 | int olr; | 1064 | int olr; |
1047 | int val; | 1065 | int val; |
1066 | struct net *net = sock_net(sk); | ||
1048 | 1067 | ||
1049 | if (optname != MRT_VERSION && | 1068 | if (optname != MRT_VERSION && |
1050 | #ifdef CONFIG_IP_PIMSM | 1069 | #ifdef CONFIG_IP_PIMSM |
@@ -1066,10 +1085,10 @@ int ip_mroute_getsockopt(struct sock *sk, int optname, char __user *optval, int | |||
1066 | val = 0x0305; | 1085 | val = 0x0305; |
1067 | #ifdef CONFIG_IP_PIMSM | 1086 | #ifdef CONFIG_IP_PIMSM |
1068 | else if (optname == MRT_PIM) | 1087 | else if (optname == MRT_PIM) |
1069 | val = mroute_do_pim; | 1088 | val = net->ipv4.mroute_do_pim; |
1070 | #endif | 1089 | #endif |
1071 | else | 1090 | else |
1072 | val = mroute_do_assert; | 1091 | val = net->ipv4.mroute_do_assert; |
1073 | if (copy_to_user(optval, &val, olr)) | 1092 | if (copy_to_user(optval, &val, olr)) |
1074 | return -EFAULT; | 1093 | return -EFAULT; |
1075 | return 0; | 1094 | return 0; |
@@ -1085,16 +1104,17 @@ int ipmr_ioctl(struct sock *sk, int cmd, void __user *arg) | |||
1085 | struct sioc_vif_req vr; | 1104 | struct sioc_vif_req vr; |
1086 | struct vif_device *vif; | 1105 | struct vif_device *vif; |
1087 | struct mfc_cache *c; | 1106 | struct mfc_cache *c; |
1107 | struct net *net = sock_net(sk); | ||
1088 | 1108 | ||
1089 | switch (cmd) { | 1109 | switch (cmd) { |
1090 | case SIOCGETVIFCNT: | 1110 | case SIOCGETVIFCNT: |
1091 | if (copy_from_user(&vr, arg, sizeof(vr))) | 1111 | if (copy_from_user(&vr, arg, sizeof(vr))) |
1092 | return -EFAULT; | 1112 | return -EFAULT; |
1093 | if (vr.vifi >= maxvif) | 1113 | if (vr.vifi >= net->ipv4.maxvif) |
1094 | return -EINVAL; | 1114 | return -EINVAL; |
1095 | read_lock(&mrt_lock); | 1115 | read_lock(&mrt_lock); |
1096 | vif=&vif_table[vr.vifi]; | 1116 | vif = &net->ipv4.vif_table[vr.vifi]; |
1097 | if (VIF_EXISTS(vr.vifi)) { | 1117 | if (VIF_EXISTS(net, vr.vifi)) { |
1098 | vr.icount = vif->pkt_in; | 1118 | vr.icount = vif->pkt_in; |
1099 | vr.ocount = vif->pkt_out; | 1119 | vr.ocount = vif->pkt_out; |
1100 | vr.ibytes = vif->bytes_in; | 1120 | vr.ibytes = vif->bytes_in; |
@@ -1112,7 +1132,7 @@ int ipmr_ioctl(struct sock *sk, int cmd, void __user *arg) | |||
1112 | return -EFAULT; | 1132 | return -EFAULT; |
1113 | 1133 | ||
1114 | read_lock(&mrt_lock); | 1134 | read_lock(&mrt_lock); |
1115 | c = ipmr_cache_find(sr.src.s_addr, sr.grp.s_addr); | 1135 | c = ipmr_cache_find(net, sr.src.s_addr, sr.grp.s_addr); |
1116 | if (c) { | 1136 | if (c) { |
1117 | sr.pktcnt = c->mfc_un.res.pkt; | 1137 | sr.pktcnt = c->mfc_un.res.pkt; |
1118 | sr.bytecnt = c->mfc_un.res.bytes; | 1138 | sr.bytecnt = c->mfc_un.res.bytes; |
@@ -1134,18 +1154,19 @@ int ipmr_ioctl(struct sock *sk, int cmd, void __user *arg) | |||
1134 | static int ipmr_device_event(struct notifier_block *this, unsigned long event, void *ptr) | 1154 | static int ipmr_device_event(struct notifier_block *this, unsigned long event, void *ptr) |
1135 | { | 1155 | { |
1136 | struct net_device *dev = ptr; | 1156 | struct net_device *dev = ptr; |
1157 | struct net *net = dev_net(dev); | ||
1137 | struct vif_device *v; | 1158 | struct vif_device *v; |
1138 | int ct; | 1159 | int ct; |
1139 | 1160 | ||
1140 | if (!net_eq(dev_net(dev), &init_net)) | 1161 | if (!net_eq(dev_net(dev), net)) |
1141 | return NOTIFY_DONE; | 1162 | return NOTIFY_DONE; |
1142 | 1163 | ||
1143 | if (event != NETDEV_UNREGISTER) | 1164 | if (event != NETDEV_UNREGISTER) |
1144 | return NOTIFY_DONE; | 1165 | return NOTIFY_DONE; |
1145 | v=&vif_table[0]; | 1166 | v = &net->ipv4.vif_table[0]; |
1146 | for (ct=0; ct<maxvif; ct++,v++) { | 1167 | for (ct = 0; ct < net->ipv4.maxvif; ct++, v++) { |
1147 | if (v->dev == dev) | 1168 | if (v->dev == dev) |
1148 | vif_delete(ct, 1); | 1169 | vif_delete(net, ct, 1); |
1149 | } | 1170 | } |
1150 | return NOTIFY_DONE; | 1171 | return NOTIFY_DONE; |
1151 | } | 1172 | } |
@@ -1205,8 +1226,9 @@ static inline int ipmr_forward_finish(struct sk_buff *skb) | |||
1205 | 1226 | ||
1206 | static void ipmr_queue_xmit(struct sk_buff *skb, struct mfc_cache *c, int vifi) | 1227 | static void ipmr_queue_xmit(struct sk_buff *skb, struct mfc_cache *c, int vifi) |
1207 | { | 1228 | { |
1229 | struct net *net = mfc_net(c); | ||
1208 | const struct iphdr *iph = ip_hdr(skb); | 1230 | const struct iphdr *iph = ip_hdr(skb); |
1209 | struct vif_device *vif = &vif_table[vifi]; | 1231 | struct vif_device *vif = &net->ipv4.vif_table[vifi]; |
1210 | struct net_device *dev; | 1232 | struct net_device *dev; |
1211 | struct rtable *rt; | 1233 | struct rtable *rt; |
1212 | int encap = 0; | 1234 | int encap = 0; |
@@ -1220,7 +1242,7 @@ static void ipmr_queue_xmit(struct sk_buff *skb, struct mfc_cache *c, int vifi) | |||
1220 | vif->bytes_out += skb->len; | 1242 | vif->bytes_out += skb->len; |
1221 | vif->dev->stats.tx_bytes += skb->len; | 1243 | vif->dev->stats.tx_bytes += skb->len; |
1222 | vif->dev->stats.tx_packets++; | 1244 | vif->dev->stats.tx_packets++; |
1223 | ipmr_cache_report(skb, vifi, IGMPMSG_WHOLEPKT); | 1245 | ipmr_cache_report(net, skb, vifi, IGMPMSG_WHOLEPKT); |
1224 | kfree_skb(skb); | 1246 | kfree_skb(skb); |
1225 | return; | 1247 | return; |
1226 | } | 1248 | } |
@@ -1233,7 +1255,7 @@ static void ipmr_queue_xmit(struct sk_buff *skb, struct mfc_cache *c, int vifi) | |||
1233 | .saddr = vif->local, | 1255 | .saddr = vif->local, |
1234 | .tos = RT_TOS(iph->tos) } }, | 1256 | .tos = RT_TOS(iph->tos) } }, |
1235 | .proto = IPPROTO_IPIP }; | 1257 | .proto = IPPROTO_IPIP }; |
1236 | if (ip_route_output_key(&init_net, &rt, &fl)) | 1258 | if (ip_route_output_key(net, &rt, &fl)) |
1237 | goto out_free; | 1259 | goto out_free; |
1238 | encap = sizeof(struct iphdr); | 1260 | encap = sizeof(struct iphdr); |
1239 | } else { | 1261 | } else { |
@@ -1242,7 +1264,7 @@ static void ipmr_queue_xmit(struct sk_buff *skb, struct mfc_cache *c, int vifi) | |||
1242 | { .daddr = iph->daddr, | 1264 | { .daddr = iph->daddr, |
1243 | .tos = RT_TOS(iph->tos) } }, | 1265 | .tos = RT_TOS(iph->tos) } }, |
1244 | .proto = IPPROTO_IPIP }; | 1266 | .proto = IPPROTO_IPIP }; |
1245 | if (ip_route_output_key(&init_net, &rt, &fl)) | 1267 | if (ip_route_output_key(net, &rt, &fl)) |
1246 | goto out_free; | 1268 | goto out_free; |
1247 | } | 1269 | } |
1248 | 1270 | ||
@@ -1306,9 +1328,10 @@ out_free: | |||
1306 | 1328 | ||
1307 | static int ipmr_find_vif(struct net_device *dev) | 1329 | static int ipmr_find_vif(struct net_device *dev) |
1308 | { | 1330 | { |
1331 | struct net *net = dev_net(dev); | ||
1309 | int ct; | 1332 | int ct; |
1310 | for (ct=maxvif-1; ct>=0; ct--) { | 1333 | for (ct = net->ipv4.maxvif-1; ct >= 0; ct--) { |
1311 | if (vif_table[ct].dev == dev) | 1334 | if (net->ipv4.vif_table[ct].dev == dev) |
1312 | break; | 1335 | break; |
1313 | } | 1336 | } |
1314 | return ct; | 1337 | return ct; |
@@ -1320,6 +1343,7 @@ static int ip_mr_forward(struct sk_buff *skb, struct mfc_cache *cache, int local | |||
1320 | { | 1343 | { |
1321 | int psend = -1; | 1344 | int psend = -1; |
1322 | int vif, ct; | 1345 | int vif, ct; |
1346 | struct net *net = mfc_net(cache); | ||
1323 | 1347 | ||
1324 | vif = cache->mfc_parent; | 1348 | vif = cache->mfc_parent; |
1325 | cache->mfc_un.res.pkt++; | 1349 | cache->mfc_un.res.pkt++; |
@@ -1328,7 +1352,7 @@ static int ip_mr_forward(struct sk_buff *skb, struct mfc_cache *cache, int local | |||
1328 | /* | 1352 | /* |
1329 | * Wrong interface: drop packet and (maybe) send PIM assert. | 1353 | * Wrong interface: drop packet and (maybe) send PIM assert. |
1330 | */ | 1354 | */ |
1331 | if (vif_table[vif].dev != skb->dev) { | 1355 | if (net->ipv4.vif_table[vif].dev != skb->dev) { |
1332 | int true_vifi; | 1356 | int true_vifi; |
1333 | 1357 | ||
1334 | if (skb->rtable->fl.iif == 0) { | 1358 | if (skb->rtable->fl.iif == 0) { |
@@ -1349,23 +1373,24 @@ static int ip_mr_forward(struct sk_buff *skb, struct mfc_cache *cache, int local | |||
1349 | cache->mfc_un.res.wrong_if++; | 1373 | cache->mfc_un.res.wrong_if++; |
1350 | true_vifi = ipmr_find_vif(skb->dev); | 1374 | true_vifi = ipmr_find_vif(skb->dev); |
1351 | 1375 | ||
1352 | if (true_vifi >= 0 && mroute_do_assert && | 1376 | if (true_vifi >= 0 && net->ipv4.mroute_do_assert && |
1353 | /* pimsm uses asserts, when switching from RPT to SPT, | 1377 | /* pimsm uses asserts, when switching from RPT to SPT, |
1354 | so that we cannot check that packet arrived on an oif. | 1378 | so that we cannot check that packet arrived on an oif. |
1355 | It is bad, but otherwise we would need to move pretty | 1379 | It is bad, but otherwise we would need to move pretty |
1356 | large chunk of pimd to kernel. Ough... --ANK | 1380 | large chunk of pimd to kernel. Ough... --ANK |
1357 | */ | 1381 | */ |
1358 | (mroute_do_pim || cache->mfc_un.res.ttls[true_vifi] < 255) && | 1382 | (net->ipv4.mroute_do_pim || |
1383 | cache->mfc_un.res.ttls[true_vifi] < 255) && | ||
1359 | time_after(jiffies, | 1384 | time_after(jiffies, |
1360 | cache->mfc_un.res.last_assert + MFC_ASSERT_THRESH)) { | 1385 | cache->mfc_un.res.last_assert + MFC_ASSERT_THRESH)) { |
1361 | cache->mfc_un.res.last_assert = jiffies; | 1386 | cache->mfc_un.res.last_assert = jiffies; |
1362 | ipmr_cache_report(skb, true_vifi, IGMPMSG_WRONGVIF); | 1387 | ipmr_cache_report(net, skb, true_vifi, IGMPMSG_WRONGVIF); |
1363 | } | 1388 | } |
1364 | goto dont_forward; | 1389 | goto dont_forward; |
1365 | } | 1390 | } |
1366 | 1391 | ||
1367 | vif_table[vif].pkt_in++; | 1392 | net->ipv4.vif_table[vif].pkt_in++; |
1368 | vif_table[vif].bytes_in += skb->len; | 1393 | net->ipv4.vif_table[vif].bytes_in += skb->len; |
1369 | 1394 | ||
1370 | /* | 1395 | /* |
1371 | * Forward the frame | 1396 | * Forward the frame |
@@ -1405,6 +1430,7 @@ dont_forward: | |||
1405 | int ip_mr_input(struct sk_buff *skb) | 1430 | int ip_mr_input(struct sk_buff *skb) |
1406 | { | 1431 | { |
1407 | struct mfc_cache *cache; | 1432 | struct mfc_cache *cache; |
1433 | struct net *net = dev_net(skb->dev); | ||
1408 | int local = skb->rtable->rt_flags&RTCF_LOCAL; | 1434 | int local = skb->rtable->rt_flags&RTCF_LOCAL; |
1409 | 1435 | ||
1410 | /* Packet is looped back after forward, it should not be | 1436 | /* Packet is looped back after forward, it should not be |
@@ -1425,9 +1451,9 @@ int ip_mr_input(struct sk_buff *skb) | |||
1425 | that we can forward NO IGMP messages. | 1451 | that we can forward NO IGMP messages. |
1426 | */ | 1452 | */ |
1427 | read_lock(&mrt_lock); | 1453 | read_lock(&mrt_lock); |
1428 | if (mroute_socket) { | 1454 | if (net->ipv4.mroute_sk) { |
1429 | nf_reset(skb); | 1455 | nf_reset(skb); |
1430 | raw_rcv(mroute_socket, skb); | 1456 | raw_rcv(net->ipv4.mroute_sk, skb); |
1431 | read_unlock(&mrt_lock); | 1457 | read_unlock(&mrt_lock); |
1432 | return 0; | 1458 | return 0; |
1433 | } | 1459 | } |
@@ -1436,7 +1462,7 @@ int ip_mr_input(struct sk_buff *skb) | |||
1436 | } | 1462 | } |
1437 | 1463 | ||
1438 | read_lock(&mrt_lock); | 1464 | read_lock(&mrt_lock); |
1439 | cache = ipmr_cache_find(ip_hdr(skb)->saddr, ip_hdr(skb)->daddr); | 1465 | cache = ipmr_cache_find(net, ip_hdr(skb)->saddr, ip_hdr(skb)->daddr); |
1440 | 1466 | ||
1441 | /* | 1467 | /* |
1442 | * No usable cache entry | 1468 | * No usable cache entry |
@@ -1456,7 +1482,7 @@ int ip_mr_input(struct sk_buff *skb) | |||
1456 | 1482 | ||
1457 | vif = ipmr_find_vif(skb->dev); | 1483 | vif = ipmr_find_vif(skb->dev); |
1458 | if (vif >= 0) { | 1484 | if (vif >= 0) { |
1459 | int err = ipmr_cache_unresolved(vif, skb); | 1485 | int err = ipmr_cache_unresolved(net, vif, skb); |
1460 | read_unlock(&mrt_lock); | 1486 | read_unlock(&mrt_lock); |
1461 | 1487 | ||
1462 | return err; | 1488 | return err; |
@@ -1487,6 +1513,7 @@ static int __pim_rcv(struct sk_buff *skb, unsigned int pimlen) | |||
1487 | { | 1513 | { |
1488 | struct net_device *reg_dev = NULL; | 1514 | struct net_device *reg_dev = NULL; |
1489 | struct iphdr *encap; | 1515 | struct iphdr *encap; |
1516 | struct net *net = dev_net(skb->dev); | ||
1490 | 1517 | ||
1491 | encap = (struct iphdr *)(skb_transport_header(skb) + pimlen); | 1518 | encap = (struct iphdr *)(skb_transport_header(skb) + pimlen); |
1492 | /* | 1519 | /* |
@@ -1501,8 +1528,8 @@ static int __pim_rcv(struct sk_buff *skb, unsigned int pimlen) | |||
1501 | return 1; | 1528 | return 1; |
1502 | 1529 | ||
1503 | read_lock(&mrt_lock); | 1530 | read_lock(&mrt_lock); |
1504 | if (reg_vif_num >= 0) | 1531 | if (net->ipv4.mroute_reg_vif_num >= 0) |
1505 | reg_dev = vif_table[reg_vif_num].dev; | 1532 | reg_dev = net->ipv4.vif_table[net->ipv4.mroute_reg_vif_num].dev; |
1506 | if (reg_dev) | 1533 | if (reg_dev) |
1507 | dev_hold(reg_dev); | 1534 | dev_hold(reg_dev); |
1508 | read_unlock(&mrt_lock); | 1535 | read_unlock(&mrt_lock); |
@@ -1537,13 +1564,14 @@ static int __pim_rcv(struct sk_buff *skb, unsigned int pimlen) | |||
1537 | int pim_rcv_v1(struct sk_buff * skb) | 1564 | int pim_rcv_v1(struct sk_buff * skb) |
1538 | { | 1565 | { |
1539 | struct igmphdr *pim; | 1566 | struct igmphdr *pim; |
1567 | struct net *net = dev_net(skb->dev); | ||
1540 | 1568 | ||
1541 | if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(struct iphdr))) | 1569 | if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(struct iphdr))) |
1542 | goto drop; | 1570 | goto drop; |
1543 | 1571 | ||
1544 | pim = igmp_hdr(skb); | 1572 | pim = igmp_hdr(skb); |
1545 | 1573 | ||
1546 | if (!mroute_do_pim || | 1574 | if (!net->ipv4.mroute_do_pim || |
1547 | pim->group != PIM_V1_VERSION || pim->code != PIM_V1_REGISTER) | 1575 | pim->group != PIM_V1_VERSION || pim->code != PIM_V1_REGISTER) |
1548 | goto drop; | 1576 | goto drop; |
1549 | 1577 | ||
@@ -1583,7 +1611,8 @@ ipmr_fill_mroute(struct sk_buff *skb, struct mfc_cache *c, struct rtmsg *rtm) | |||
1583 | { | 1611 | { |
1584 | int ct; | 1612 | int ct; |
1585 | struct rtnexthop *nhp; | 1613 | struct rtnexthop *nhp; |
1586 | struct net_device *dev = vif_table[c->mfc_parent].dev; | 1614 | struct net *net = mfc_net(c); |
1615 | struct net_device *dev = net->ipv4.vif_table[c->mfc_parent].dev; | ||
1587 | u8 *b = skb_tail_pointer(skb); | 1616 | u8 *b = skb_tail_pointer(skb); |
1588 | struct rtattr *mp_head; | 1617 | struct rtattr *mp_head; |
1589 | 1618 | ||
@@ -1599,7 +1628,7 @@ ipmr_fill_mroute(struct sk_buff *skb, struct mfc_cache *c, struct rtmsg *rtm) | |||
1599 | nhp = (struct rtnexthop *)skb_put(skb, RTA_ALIGN(sizeof(*nhp))); | 1628 | nhp = (struct rtnexthop *)skb_put(skb, RTA_ALIGN(sizeof(*nhp))); |
1600 | nhp->rtnh_flags = 0; | 1629 | nhp->rtnh_flags = 0; |
1601 | nhp->rtnh_hops = c->mfc_un.res.ttls[ct]; | 1630 | nhp->rtnh_hops = c->mfc_un.res.ttls[ct]; |
1602 | nhp->rtnh_ifindex = vif_table[ct].dev->ifindex; | 1631 | nhp->rtnh_ifindex = net->ipv4.vif_table[ct].dev->ifindex; |
1603 | nhp->rtnh_len = sizeof(*nhp); | 1632 | nhp->rtnh_len = sizeof(*nhp); |
1604 | } | 1633 | } |
1605 | } | 1634 | } |
@@ -1613,14 +1642,15 @@ rtattr_failure: | |||
1613 | return -EMSGSIZE; | 1642 | return -EMSGSIZE; |
1614 | } | 1643 | } |
1615 | 1644 | ||
1616 | int ipmr_get_route(struct sk_buff *skb, struct rtmsg *rtm, int nowait) | 1645 | int ipmr_get_route(struct net *net, |
1646 | struct sk_buff *skb, struct rtmsg *rtm, int nowait) | ||
1617 | { | 1647 | { |
1618 | int err; | 1648 | int err; |
1619 | struct mfc_cache *cache; | 1649 | struct mfc_cache *cache; |
1620 | struct rtable *rt = skb->rtable; | 1650 | struct rtable *rt = skb->rtable; |
1621 | 1651 | ||
1622 | read_lock(&mrt_lock); | 1652 | read_lock(&mrt_lock); |
1623 | cache = ipmr_cache_find(rt->rt_src, rt->rt_dst); | 1653 | cache = ipmr_cache_find(net, rt->rt_src, rt->rt_dst); |
1624 | 1654 | ||
1625 | if (cache == NULL) { | 1655 | if (cache == NULL) { |
1626 | struct sk_buff *skb2; | 1656 | struct sk_buff *skb2; |
@@ -1651,7 +1681,7 @@ int ipmr_get_route(struct sk_buff *skb, struct rtmsg *rtm, int nowait) | |||
1651 | iph->saddr = rt->rt_src; | 1681 | iph->saddr = rt->rt_src; |
1652 | iph->daddr = rt->rt_dst; | 1682 | iph->daddr = rt->rt_dst; |
1653 | iph->version = 0; | 1683 | iph->version = 0; |
1654 | err = ipmr_cache_unresolved(vif, skb2); | 1684 | err = ipmr_cache_unresolved(net, vif, skb2); |
1655 | read_unlock(&mrt_lock); | 1685 | read_unlock(&mrt_lock); |
1656 | return err; | 1686 | return err; |
1657 | } | 1687 | } |
@@ -1668,17 +1698,19 @@ int ipmr_get_route(struct sk_buff *skb, struct rtmsg *rtm, int nowait) | |||
1668 | * The /proc interfaces to multicast routing /proc/ip_mr_cache /proc/ip_mr_vif | 1698 | * The /proc interfaces to multicast routing /proc/ip_mr_cache /proc/ip_mr_vif |
1669 | */ | 1699 | */ |
1670 | struct ipmr_vif_iter { | 1700 | struct ipmr_vif_iter { |
1701 | struct seq_net_private p; | ||
1671 | int ct; | 1702 | int ct; |
1672 | }; | 1703 | }; |
1673 | 1704 | ||
1674 | static struct vif_device *ipmr_vif_seq_idx(struct ipmr_vif_iter *iter, | 1705 | static struct vif_device *ipmr_vif_seq_idx(struct net *net, |
1706 | struct ipmr_vif_iter *iter, | ||
1675 | loff_t pos) | 1707 | loff_t pos) |
1676 | { | 1708 | { |
1677 | for (iter->ct = 0; iter->ct < maxvif; ++iter->ct) { | 1709 | for (iter->ct = 0; iter->ct < net->ipv4.maxvif; ++iter->ct) { |
1678 | if (!VIF_EXISTS(iter->ct)) | 1710 | if (!VIF_EXISTS(net, iter->ct)) |
1679 | continue; | 1711 | continue; |
1680 | if (pos-- == 0) | 1712 | if (pos-- == 0) |
1681 | return &vif_table[iter->ct]; | 1713 | return &net->ipv4.vif_table[iter->ct]; |
1682 | } | 1714 | } |
1683 | return NULL; | 1715 | return NULL; |
1684 | } | 1716 | } |
@@ -1686,23 +1718,26 @@ static struct vif_device *ipmr_vif_seq_idx(struct ipmr_vif_iter *iter, | |||
1686 | static void *ipmr_vif_seq_start(struct seq_file *seq, loff_t *pos) | 1718 | static void *ipmr_vif_seq_start(struct seq_file *seq, loff_t *pos) |
1687 | __acquires(mrt_lock) | 1719 | __acquires(mrt_lock) |
1688 | { | 1720 | { |
1721 | struct net *net = seq_file_net(seq); | ||
1722 | |||
1689 | read_lock(&mrt_lock); | 1723 | read_lock(&mrt_lock); |
1690 | return *pos ? ipmr_vif_seq_idx(seq->private, *pos - 1) | 1724 | return *pos ? ipmr_vif_seq_idx(net, seq->private, *pos - 1) |
1691 | : SEQ_START_TOKEN; | 1725 | : SEQ_START_TOKEN; |
1692 | } | 1726 | } |
1693 | 1727 | ||
1694 | static void *ipmr_vif_seq_next(struct seq_file *seq, void *v, loff_t *pos) | 1728 | static void *ipmr_vif_seq_next(struct seq_file *seq, void *v, loff_t *pos) |
1695 | { | 1729 | { |
1696 | struct ipmr_vif_iter *iter = seq->private; | 1730 | struct ipmr_vif_iter *iter = seq->private; |
1731 | struct net *net = seq_file_net(seq); | ||
1697 | 1732 | ||
1698 | ++*pos; | 1733 | ++*pos; |
1699 | if (v == SEQ_START_TOKEN) | 1734 | if (v == SEQ_START_TOKEN) |
1700 | return ipmr_vif_seq_idx(iter, 0); | 1735 | return ipmr_vif_seq_idx(net, iter, 0); |
1701 | 1736 | ||
1702 | while (++iter->ct < maxvif) { | 1737 | while (++iter->ct < net->ipv4.maxvif) { |
1703 | if (!VIF_EXISTS(iter->ct)) | 1738 | if (!VIF_EXISTS(net, iter->ct)) |
1704 | continue; | 1739 | continue; |
1705 | return &vif_table[iter->ct]; | 1740 | return &net->ipv4.vif_table[iter->ct]; |
1706 | } | 1741 | } |
1707 | return NULL; | 1742 | return NULL; |
1708 | } | 1743 | } |
@@ -1715,6 +1750,8 @@ static void ipmr_vif_seq_stop(struct seq_file *seq, void *v) | |||
1715 | 1750 | ||
1716 | static int ipmr_vif_seq_show(struct seq_file *seq, void *v) | 1751 | static int ipmr_vif_seq_show(struct seq_file *seq, void *v) |
1717 | { | 1752 | { |
1753 | struct net *net = seq_file_net(seq); | ||
1754 | |||
1718 | if (v == SEQ_START_TOKEN) { | 1755 | if (v == SEQ_START_TOKEN) { |
1719 | seq_puts(seq, | 1756 | seq_puts(seq, |
1720 | "Interface BytesIn PktsIn BytesOut PktsOut Flags Local Remote\n"); | 1757 | "Interface BytesIn PktsIn BytesOut PktsOut Flags Local Remote\n"); |
@@ -1724,7 +1761,7 @@ static int ipmr_vif_seq_show(struct seq_file *seq, void *v) | |||
1724 | 1761 | ||
1725 | seq_printf(seq, | 1762 | seq_printf(seq, |
1726 | "%2Zd %-10s %8ld %7ld %8ld %7ld %05X %08X %08X\n", | 1763 | "%2Zd %-10s %8ld %7ld %8ld %7ld %05X %08X %08X\n", |
1727 | vif - vif_table, | 1764 | vif - net->ipv4.vif_table, |
1728 | name, vif->bytes_in, vif->pkt_in, | 1765 | name, vif->bytes_in, vif->pkt_in, |
1729 | vif->bytes_out, vif->pkt_out, | 1766 | vif->bytes_out, vif->pkt_out, |
1730 | vif->flags, vif->local, vif->remote); | 1767 | vif->flags, vif->local, vif->remote); |
@@ -1741,8 +1778,8 @@ static const struct seq_operations ipmr_vif_seq_ops = { | |||
1741 | 1778 | ||
1742 | static int ipmr_vif_open(struct inode *inode, struct file *file) | 1779 | static int ipmr_vif_open(struct inode *inode, struct file *file) |
1743 | { | 1780 | { |
1744 | return seq_open_private(file, &ipmr_vif_seq_ops, | 1781 | return seq_open_net(inode, file, &ipmr_vif_seq_ops, |
1745 | sizeof(struct ipmr_vif_iter)); | 1782 | sizeof(struct ipmr_vif_iter)); |
1746 | } | 1783 | } |
1747 | 1784 | ||
1748 | static const struct file_operations ipmr_vif_fops = { | 1785 | static const struct file_operations ipmr_vif_fops = { |
@@ -1750,23 +1787,26 @@ static const struct file_operations ipmr_vif_fops = { | |||
1750 | .open = ipmr_vif_open, | 1787 | .open = ipmr_vif_open, |
1751 | .read = seq_read, | 1788 | .read = seq_read, |
1752 | .llseek = seq_lseek, | 1789 | .llseek = seq_lseek, |
1753 | .release = seq_release_private, | 1790 | .release = seq_release_net, |
1754 | }; | 1791 | }; |
1755 | 1792 | ||
1756 | struct ipmr_mfc_iter { | 1793 | struct ipmr_mfc_iter { |
1794 | struct seq_net_private p; | ||
1757 | struct mfc_cache **cache; | 1795 | struct mfc_cache **cache; |
1758 | int ct; | 1796 | int ct; |
1759 | }; | 1797 | }; |
1760 | 1798 | ||
1761 | 1799 | ||
1762 | static struct mfc_cache *ipmr_mfc_seq_idx(struct ipmr_mfc_iter *it, loff_t pos) | 1800 | static struct mfc_cache *ipmr_mfc_seq_idx(struct net *net, |
1801 | struct ipmr_mfc_iter *it, loff_t pos) | ||
1763 | { | 1802 | { |
1764 | struct mfc_cache *mfc; | 1803 | struct mfc_cache *mfc; |
1765 | 1804 | ||
1766 | it->cache = mfc_cache_array; | 1805 | it->cache = net->ipv4.mfc_cache_array; |
1767 | read_lock(&mrt_lock); | 1806 | read_lock(&mrt_lock); |
1768 | for (it->ct = 0; it->ct < MFC_LINES; it->ct++) | 1807 | for (it->ct = 0; it->ct < MFC_LINES; it->ct++) |
1769 | for (mfc = mfc_cache_array[it->ct]; mfc; mfc = mfc->next) | 1808 | for (mfc = net->ipv4.mfc_cache_array[it->ct]; |
1809 | mfc; mfc = mfc->next) | ||
1770 | if (pos-- == 0) | 1810 | if (pos-- == 0) |
1771 | return mfc; | 1811 | return mfc; |
1772 | read_unlock(&mrt_lock); | 1812 | read_unlock(&mrt_lock); |
@@ -1774,7 +1814,8 @@ static struct mfc_cache *ipmr_mfc_seq_idx(struct ipmr_mfc_iter *it, loff_t pos) | |||
1774 | it->cache = &mfc_unres_queue; | 1814 | it->cache = &mfc_unres_queue; |
1775 | spin_lock_bh(&mfc_unres_lock); | 1815 | spin_lock_bh(&mfc_unres_lock); |
1776 | for (mfc = mfc_unres_queue; mfc; mfc = mfc->next) | 1816 | for (mfc = mfc_unres_queue; mfc; mfc = mfc->next) |
1777 | if (pos-- == 0) | 1817 | if (net_eq(mfc_net(mfc), net) && |
1818 | pos-- == 0) | ||
1778 | return mfc; | 1819 | return mfc; |
1779 | spin_unlock_bh(&mfc_unres_lock); | 1820 | spin_unlock_bh(&mfc_unres_lock); |
1780 | 1821 | ||
@@ -1786,9 +1827,11 @@ static struct mfc_cache *ipmr_mfc_seq_idx(struct ipmr_mfc_iter *it, loff_t pos) | |||
1786 | static void *ipmr_mfc_seq_start(struct seq_file *seq, loff_t *pos) | 1827 | static void *ipmr_mfc_seq_start(struct seq_file *seq, loff_t *pos) |
1787 | { | 1828 | { |
1788 | struct ipmr_mfc_iter *it = seq->private; | 1829 | struct ipmr_mfc_iter *it = seq->private; |
1830 | struct net *net = seq_file_net(seq); | ||
1831 | |||
1789 | it->cache = NULL; | 1832 | it->cache = NULL; |
1790 | it->ct = 0; | 1833 | it->ct = 0; |
1791 | return *pos ? ipmr_mfc_seq_idx(seq->private, *pos - 1) | 1834 | return *pos ? ipmr_mfc_seq_idx(net, seq->private, *pos - 1) |
1792 | : SEQ_START_TOKEN; | 1835 | : SEQ_START_TOKEN; |
1793 | } | 1836 | } |
1794 | 1837 | ||
@@ -1796,11 +1839,12 @@ static void *ipmr_mfc_seq_next(struct seq_file *seq, void *v, loff_t *pos) | |||
1796 | { | 1839 | { |
1797 | struct mfc_cache *mfc = v; | 1840 | struct mfc_cache *mfc = v; |
1798 | struct ipmr_mfc_iter *it = seq->private; | 1841 | struct ipmr_mfc_iter *it = seq->private; |
1842 | struct net *net = seq_file_net(seq); | ||
1799 | 1843 | ||
1800 | ++*pos; | 1844 | ++*pos; |
1801 | 1845 | ||
1802 | if (v == SEQ_START_TOKEN) | 1846 | if (v == SEQ_START_TOKEN) |
1803 | return ipmr_mfc_seq_idx(seq->private, 0); | 1847 | return ipmr_mfc_seq_idx(net, seq->private, 0); |
1804 | 1848 | ||
1805 | if (mfc->next) | 1849 | if (mfc->next) |
1806 | return mfc->next; | 1850 | return mfc->next; |
@@ -1808,10 +1852,10 @@ static void *ipmr_mfc_seq_next(struct seq_file *seq, void *v, loff_t *pos) | |||
1808 | if (it->cache == &mfc_unres_queue) | 1852 | if (it->cache == &mfc_unres_queue) |
1809 | goto end_of_list; | 1853 | goto end_of_list; |
1810 | 1854 | ||
1811 | BUG_ON(it->cache != mfc_cache_array); | 1855 | BUG_ON(it->cache != net->ipv4.mfc_cache_array); |
1812 | 1856 | ||
1813 | while (++it->ct < MFC_LINES) { | 1857 | while (++it->ct < MFC_LINES) { |
1814 | mfc = mfc_cache_array[it->ct]; | 1858 | mfc = net->ipv4.mfc_cache_array[it->ct]; |
1815 | if (mfc) | 1859 | if (mfc) |
1816 | return mfc; | 1860 | return mfc; |
1817 | } | 1861 | } |
@@ -1823,6 +1867,8 @@ static void *ipmr_mfc_seq_next(struct seq_file *seq, void *v, loff_t *pos) | |||
1823 | 1867 | ||
1824 | spin_lock_bh(&mfc_unres_lock); | 1868 | spin_lock_bh(&mfc_unres_lock); |
1825 | mfc = mfc_unres_queue; | 1869 | mfc = mfc_unres_queue; |
1870 | while (mfc && !net_eq(mfc_net(mfc), net)) | ||
1871 | mfc = mfc->next; | ||
1826 | if (mfc) | 1872 | if (mfc) |
1827 | return mfc; | 1873 | return mfc; |
1828 | 1874 | ||
@@ -1836,16 +1882,18 @@ static void *ipmr_mfc_seq_next(struct seq_file *seq, void *v, loff_t *pos) | |||
1836 | static void ipmr_mfc_seq_stop(struct seq_file *seq, void *v) | 1882 | static void ipmr_mfc_seq_stop(struct seq_file *seq, void *v) |
1837 | { | 1883 | { |
1838 | struct ipmr_mfc_iter *it = seq->private; | 1884 | struct ipmr_mfc_iter *it = seq->private; |
1885 | struct net *net = seq_file_net(seq); | ||
1839 | 1886 | ||
1840 | if (it->cache == &mfc_unres_queue) | 1887 | if (it->cache == &mfc_unres_queue) |
1841 | spin_unlock_bh(&mfc_unres_lock); | 1888 | spin_unlock_bh(&mfc_unres_lock); |
1842 | else if (it->cache == mfc_cache_array) | 1889 | else if (it->cache == net->ipv4.mfc_cache_array) |
1843 | read_unlock(&mrt_lock); | 1890 | read_unlock(&mrt_lock); |
1844 | } | 1891 | } |
1845 | 1892 | ||
1846 | static int ipmr_mfc_seq_show(struct seq_file *seq, void *v) | 1893 | static int ipmr_mfc_seq_show(struct seq_file *seq, void *v) |
1847 | { | 1894 | { |
1848 | int n; | 1895 | int n; |
1896 | struct net *net = seq_file_net(seq); | ||
1849 | 1897 | ||
1850 | if (v == SEQ_START_TOKEN) { | 1898 | if (v == SEQ_START_TOKEN) { |
1851 | seq_puts(seq, | 1899 | seq_puts(seq, |
@@ -1866,9 +1914,9 @@ static int ipmr_mfc_seq_show(struct seq_file *seq, void *v) | |||
1866 | mfc->mfc_un.res.wrong_if); | 1914 | mfc->mfc_un.res.wrong_if); |
1867 | for (n = mfc->mfc_un.res.minvif; | 1915 | for (n = mfc->mfc_un.res.minvif; |
1868 | n < mfc->mfc_un.res.maxvif; n++ ) { | 1916 | n < mfc->mfc_un.res.maxvif; n++ ) { |
1869 | if (VIF_EXISTS(n) | 1917 | if (VIF_EXISTS(net, n) && |
1870 | && mfc->mfc_un.res.ttls[n] < 255) | 1918 | mfc->mfc_un.res.ttls[n] < 255) |
1871 | seq_printf(seq, | 1919 | seq_printf(seq, |
1872 | " %2d:%-3d", | 1920 | " %2d:%-3d", |
1873 | n, mfc->mfc_un.res.ttls[n]); | 1921 | n, mfc->mfc_un.res.ttls[n]); |
1874 | } | 1922 | } |
@@ -1892,8 +1940,8 @@ static const struct seq_operations ipmr_mfc_seq_ops = { | |||
1892 | 1940 | ||
1893 | static int ipmr_mfc_open(struct inode *inode, struct file *file) | 1941 | static int ipmr_mfc_open(struct inode *inode, struct file *file) |
1894 | { | 1942 | { |
1895 | return seq_open_private(file, &ipmr_mfc_seq_ops, | 1943 | return seq_open_net(inode, file, &ipmr_mfc_seq_ops, |
1896 | sizeof(struct ipmr_mfc_iter)); | 1944 | sizeof(struct ipmr_mfc_iter)); |
1897 | } | 1945 | } |
1898 | 1946 | ||
1899 | static const struct file_operations ipmr_mfc_fops = { | 1947 | static const struct file_operations ipmr_mfc_fops = { |
@@ -1901,7 +1949,7 @@ static const struct file_operations ipmr_mfc_fops = { | |||
1901 | .open = ipmr_mfc_open, | 1949 | .open = ipmr_mfc_open, |
1902 | .read = seq_read, | 1950 | .read = seq_read, |
1903 | .llseek = seq_lseek, | 1951 | .llseek = seq_lseek, |
1904 | .release = seq_release_private, | 1952 | .release = seq_release_net, |
1905 | }; | 1953 | }; |
1906 | #endif | 1954 | #endif |
1907 | 1955 | ||
@@ -1915,6 +1963,65 @@ static struct net_protocol pim_protocol = { | |||
1915 | /* | 1963 | /* |
1916 | * Setup for IP multicast routing | 1964 | * Setup for IP multicast routing |
1917 | */ | 1965 | */ |
1966 | static int __net_init ipmr_net_init(struct net *net) | ||
1967 | { | ||
1968 | int err = 0; | ||
1969 | |||
1970 | net->ipv4.vif_table = kcalloc(MAXVIFS, sizeof(struct vif_device), | ||
1971 | GFP_KERNEL); | ||
1972 | if (!net->ipv4.vif_table) { | ||
1973 | err = -ENOMEM; | ||
1974 | goto fail; | ||
1975 | } | ||
1976 | |||
1977 | /* Forwarding cache */ | ||
1978 | net->ipv4.mfc_cache_array = kcalloc(MFC_LINES, | ||
1979 | sizeof(struct mfc_cache *), | ||
1980 | GFP_KERNEL); | ||
1981 | if (!net->ipv4.mfc_cache_array) { | ||
1982 | err = -ENOMEM; | ||
1983 | goto fail_mfc_cache; | ||
1984 | } | ||
1985 | |||
1986 | #ifdef CONFIG_IP_PIMSM | ||
1987 | net->ipv4.mroute_reg_vif_num = -1; | ||
1988 | #endif | ||
1989 | |||
1990 | #ifdef CONFIG_PROC_FS | ||
1991 | err = -ENOMEM; | ||
1992 | if (!proc_net_fops_create(net, "ip_mr_vif", 0, &ipmr_vif_fops)) | ||
1993 | goto proc_vif_fail; | ||
1994 | if (!proc_net_fops_create(net, "ip_mr_cache", 0, &ipmr_mfc_fops)) | ||
1995 | goto proc_cache_fail; | ||
1996 | #endif | ||
1997 | return 0; | ||
1998 | |||
1999 | #ifdef CONFIG_PROC_FS | ||
2000 | proc_cache_fail: | ||
2001 | proc_net_remove(net, "ip_mr_vif"); | ||
2002 | proc_vif_fail: | ||
2003 | kfree(net->ipv4.mfc_cache_array); | ||
2004 | #endif | ||
2005 | fail_mfc_cache: | ||
2006 | kfree(net->ipv4.vif_table); | ||
2007 | fail: | ||
2008 | return err; | ||
2009 | } | ||
2010 | |||
2011 | static void __net_exit ipmr_net_exit(struct net *net) | ||
2012 | { | ||
2013 | #ifdef CONFIG_PROC_FS | ||
2014 | proc_net_remove(net, "ip_mr_cache"); | ||
2015 | proc_net_remove(net, "ip_mr_vif"); | ||
2016 | #endif | ||
2017 | kfree(net->ipv4.mfc_cache_array); | ||
2018 | kfree(net->ipv4.vif_table); | ||
2019 | } | ||
2020 | |||
2021 | static struct pernet_operations ipmr_net_ops = { | ||
2022 | .init = ipmr_net_init, | ||
2023 | .exit = ipmr_net_exit, | ||
2024 | }; | ||
1918 | 2025 | ||
1919 | int __init ip_mr_init(void) | 2026 | int __init ip_mr_init(void) |
1920 | { | 2027 | { |
@@ -1927,26 +2034,20 @@ int __init ip_mr_init(void) | |||
1927 | if (!mrt_cachep) | 2034 | if (!mrt_cachep) |
1928 | return -ENOMEM; | 2035 | return -ENOMEM; |
1929 | 2036 | ||
2037 | err = register_pernet_subsys(&ipmr_net_ops); | ||
2038 | if (err) | ||
2039 | goto reg_pernet_fail; | ||
2040 | |||
1930 | setup_timer(&ipmr_expire_timer, ipmr_expire_process, 0); | 2041 | setup_timer(&ipmr_expire_timer, ipmr_expire_process, 0); |
1931 | err = register_netdevice_notifier(&ip_mr_notifier); | 2042 | err = register_netdevice_notifier(&ip_mr_notifier); |
1932 | if (err) | 2043 | if (err) |
1933 | goto reg_notif_fail; | 2044 | goto reg_notif_fail; |
1934 | #ifdef CONFIG_PROC_FS | ||
1935 | err = -ENOMEM; | ||
1936 | if (!proc_net_fops_create(&init_net, "ip_mr_vif", 0, &ipmr_vif_fops)) | ||
1937 | goto proc_vif_fail; | ||
1938 | if (!proc_net_fops_create(&init_net, "ip_mr_cache", 0, &ipmr_mfc_fops)) | ||
1939 | goto proc_cache_fail; | ||
1940 | #endif | ||
1941 | return 0; | 2045 | return 0; |
1942 | #ifdef CONFIG_PROC_FS | 2046 | |
1943 | proc_cache_fail: | ||
1944 | proc_net_remove(&init_net, "ip_mr_vif"); | ||
1945 | proc_vif_fail: | ||
1946 | unregister_netdevice_notifier(&ip_mr_notifier); | ||
1947 | #endif | ||
1948 | reg_notif_fail: | 2047 | reg_notif_fail: |
1949 | del_timer(&ipmr_expire_timer); | 2048 | del_timer(&ipmr_expire_timer); |
2049 | unregister_pernet_subsys(&ipmr_net_ops); | ||
2050 | reg_pernet_fail: | ||
1950 | kmem_cache_destroy(mrt_cachep); | 2051 | kmem_cache_destroy(mrt_cachep); |
1951 | return err; | 2052 | return err; |
1952 | } | 2053 | } |
diff --git a/net/ipv4/netfilter/nf_nat_snmp_basic.c b/net/ipv4/netfilter/nf_nat_snmp_basic.c index 182f845de92f..d9521f6f9ed0 100644 --- a/net/ipv4/netfilter/nf_nat_snmp_basic.c +++ b/net/ipv4/netfilter/nf_nat_snmp_basic.c | |||
@@ -1292,7 +1292,7 @@ static struct nf_conntrack_helper snmp_helper __read_mostly = { | |||
1292 | .expect_policy = &snmp_exp_policy, | 1292 | .expect_policy = &snmp_exp_policy, |
1293 | .name = "snmp", | 1293 | .name = "snmp", |
1294 | .tuple.src.l3num = AF_INET, | 1294 | .tuple.src.l3num = AF_INET, |
1295 | .tuple.src.u.udp.port = __constant_htons(SNMP_PORT), | 1295 | .tuple.src.u.udp.port = cpu_to_be16(SNMP_PORT), |
1296 | .tuple.dst.protonum = IPPROTO_UDP, | 1296 | .tuple.dst.protonum = IPPROTO_UDP, |
1297 | }; | 1297 | }; |
1298 | 1298 | ||
@@ -1302,7 +1302,7 @@ static struct nf_conntrack_helper snmp_trap_helper __read_mostly = { | |||
1302 | .expect_policy = &snmp_exp_policy, | 1302 | .expect_policy = &snmp_exp_policy, |
1303 | .name = "snmp_trap", | 1303 | .name = "snmp_trap", |
1304 | .tuple.src.l3num = AF_INET, | 1304 | .tuple.src.l3num = AF_INET, |
1305 | .tuple.src.u.udp.port = __constant_htons(SNMP_TRAP_PORT), | 1305 | .tuple.src.u.udp.port = cpu_to_be16(SNMP_TRAP_PORT), |
1306 | .tuple.dst.protonum = IPPROTO_UDP, | 1306 | .tuple.dst.protonum = IPPROTO_UDP, |
1307 | }; | 1307 | }; |
1308 | 1308 | ||
diff --git a/net/ipv4/route.c b/net/ipv4/route.c index 97f71153584f..5caee609be06 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c | |||
@@ -151,7 +151,7 @@ static void rt_emergency_hash_rebuild(struct net *net); | |||
151 | 151 | ||
152 | static struct dst_ops ipv4_dst_ops = { | 152 | static struct dst_ops ipv4_dst_ops = { |
153 | .family = AF_INET, | 153 | .family = AF_INET, |
154 | .protocol = __constant_htons(ETH_P_IP), | 154 | .protocol = cpu_to_be16(ETH_P_IP), |
155 | .gc = rt_garbage_collect, | 155 | .gc = rt_garbage_collect, |
156 | .check = ipv4_dst_check, | 156 | .check = ipv4_dst_check, |
157 | .destroy = ipv4_dst_destroy, | 157 | .destroy = ipv4_dst_destroy, |
@@ -2696,7 +2696,7 @@ static void ipv4_rt_blackhole_update_pmtu(struct dst_entry *dst, u32 mtu) | |||
2696 | 2696 | ||
2697 | static struct dst_ops ipv4_dst_blackhole_ops = { | 2697 | static struct dst_ops ipv4_dst_blackhole_ops = { |
2698 | .family = AF_INET, | 2698 | .family = AF_INET, |
2699 | .protocol = __constant_htons(ETH_P_IP), | 2699 | .protocol = cpu_to_be16(ETH_P_IP), |
2700 | .destroy = ipv4_dst_destroy, | 2700 | .destroy = ipv4_dst_destroy, |
2701 | .check = ipv4_dst_check, | 2701 | .check = ipv4_dst_check, |
2702 | .update_pmtu = ipv4_rt_blackhole_update_pmtu, | 2702 | .update_pmtu = ipv4_rt_blackhole_update_pmtu, |
@@ -2779,7 +2779,8 @@ int ip_route_output_key(struct net *net, struct rtable **rp, struct flowi *flp) | |||
2779 | return ip_route_output_flow(net, rp, flp, NULL, 0); | 2779 | return ip_route_output_flow(net, rp, flp, NULL, 0); |
2780 | } | 2780 | } |
2781 | 2781 | ||
2782 | static int rt_fill_info(struct sk_buff *skb, u32 pid, u32 seq, int event, | 2782 | static int rt_fill_info(struct net *net, |
2783 | struct sk_buff *skb, u32 pid, u32 seq, int event, | ||
2783 | int nowait, unsigned int flags) | 2784 | int nowait, unsigned int flags) |
2784 | { | 2785 | { |
2785 | struct rtable *rt = skb->rtable; | 2786 | struct rtable *rt = skb->rtable; |
@@ -2844,8 +2845,8 @@ static int rt_fill_info(struct sk_buff *skb, u32 pid, u32 seq, int event, | |||
2844 | __be32 dst = rt->rt_dst; | 2845 | __be32 dst = rt->rt_dst; |
2845 | 2846 | ||
2846 | if (ipv4_is_multicast(dst) && !ipv4_is_local_multicast(dst) && | 2847 | if (ipv4_is_multicast(dst) && !ipv4_is_local_multicast(dst) && |
2847 | IPV4_DEVCONF_ALL(&init_net, MC_FORWARDING)) { | 2848 | IPV4_DEVCONF_ALL(net, MC_FORWARDING)) { |
2848 | int err = ipmr_get_route(skb, r, nowait); | 2849 | int err = ipmr_get_route(net, skb, r, nowait); |
2849 | if (err <= 0) { | 2850 | if (err <= 0) { |
2850 | if (!nowait) { | 2851 | if (!nowait) { |
2851 | if (err == 0) | 2852 | if (err == 0) |
@@ -2950,7 +2951,7 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void | |||
2950 | if (rtm->rtm_flags & RTM_F_NOTIFY) | 2951 | if (rtm->rtm_flags & RTM_F_NOTIFY) |
2951 | rt->rt_flags |= RTCF_NOTIFY; | 2952 | rt->rt_flags |= RTCF_NOTIFY; |
2952 | 2953 | ||
2953 | err = rt_fill_info(skb, NETLINK_CB(in_skb).pid, nlh->nlmsg_seq, | 2954 | err = rt_fill_info(net, skb, NETLINK_CB(in_skb).pid, nlh->nlmsg_seq, |
2954 | RTM_NEWROUTE, 0, 0); | 2955 | RTM_NEWROUTE, 0, 0); |
2955 | if (err <= 0) | 2956 | if (err <= 0) |
2956 | goto errout_free; | 2957 | goto errout_free; |
@@ -2988,7 +2989,7 @@ int ip_rt_dump(struct sk_buff *skb, struct netlink_callback *cb) | |||
2988 | if (rt_is_expired(rt)) | 2989 | if (rt_is_expired(rt)) |
2989 | continue; | 2990 | continue; |
2990 | skb->dst = dst_clone(&rt->u.dst); | 2991 | skb->dst = dst_clone(&rt->u.dst); |
2991 | if (rt_fill_info(skb, NETLINK_CB(cb->skb).pid, | 2992 | if (rt_fill_info(net, skb, NETLINK_CB(cb->skb).pid, |
2992 | cb->nlh->nlmsg_seq, RTM_NEWROUTE, | 2993 | cb->nlh->nlmsg_seq, RTM_NEWROUTE, |
2993 | 1, NLM_F_MULTI) <= 0) { | 2994 | 1, NLM_F_MULTI) <= 0) { |
2994 | dst_release(xchg(&skb->dst, NULL)); | 2995 | dst_release(xchg(&skb->dst, NULL)); |
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 76b148bcb0dc..73266b79c19a 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c | |||
@@ -2482,19 +2482,19 @@ struct sk_buff **tcp_gro_receive(struct sk_buff **head, struct sk_buff *skb) | |||
2482 | unsigned int mss = 1; | 2482 | unsigned int mss = 1; |
2483 | int flush = 1; | 2483 | int flush = 1; |
2484 | 2484 | ||
2485 | if (!pskb_may_pull(skb, sizeof(*th))) | 2485 | th = skb_gro_header(skb, sizeof(*th)); |
2486 | if (unlikely(!th)) | ||
2486 | goto out; | 2487 | goto out; |
2487 | 2488 | ||
2488 | th = tcp_hdr(skb); | ||
2489 | thlen = th->doff * 4; | 2489 | thlen = th->doff * 4; |
2490 | if (thlen < sizeof(*th)) | 2490 | if (thlen < sizeof(*th)) |
2491 | goto out; | 2491 | goto out; |
2492 | 2492 | ||
2493 | if (!pskb_may_pull(skb, thlen)) | 2493 | th = skb_gro_header(skb, thlen); |
2494 | if (unlikely(!th)) | ||
2494 | goto out; | 2495 | goto out; |
2495 | 2496 | ||
2496 | th = tcp_hdr(skb); | 2497 | skb_gro_pull(skb, thlen); |
2497 | __skb_pull(skb, thlen); | ||
2498 | 2498 | ||
2499 | flags = tcp_flag_word(th); | 2499 | flags = tcp_flag_word(th); |
2500 | 2500 | ||
@@ -2522,10 +2522,10 @@ found: | |||
2522 | flush |= th->ack_seq != th2->ack_seq || th->window != th2->window; | 2522 | flush |= th->ack_seq != th2->ack_seq || th->window != th2->window; |
2523 | flush |= memcmp(th + 1, th2 + 1, thlen - sizeof(*th)); | 2523 | flush |= memcmp(th + 1, th2 + 1, thlen - sizeof(*th)); |
2524 | 2524 | ||
2525 | total = p->len; | 2525 | total = skb_gro_len(p); |
2526 | mss = skb_shinfo(p)->gso_size; | 2526 | mss = skb_shinfo(p)->gso_size; |
2527 | 2527 | ||
2528 | flush |= skb->len > mss || skb->len <= 0; | 2528 | flush |= skb_gro_len(skb) > mss || !skb_gro_len(skb); |
2529 | flush |= ntohl(th2->seq) + total != ntohl(th->seq); | 2529 | flush |= ntohl(th2->seq) + total != ntohl(th->seq); |
2530 | 2530 | ||
2531 | if (flush || skb_gro_receive(head, skb)) { | 2531 | if (flush || skb_gro_receive(head, skb)) { |
@@ -2538,7 +2538,7 @@ found: | |||
2538 | tcp_flag_word(th2) |= flags & (TCP_FLAG_FIN | TCP_FLAG_PSH); | 2538 | tcp_flag_word(th2) |= flags & (TCP_FLAG_FIN | TCP_FLAG_PSH); |
2539 | 2539 | ||
2540 | out_check_final: | 2540 | out_check_final: |
2541 | flush = skb->len < mss; | 2541 | flush = skb_gro_len(skb) < mss; |
2542 | flush |= flags & (TCP_FLAG_URG | TCP_FLAG_PSH | TCP_FLAG_RST | | 2542 | flush |= flags & (TCP_FLAG_URG | TCP_FLAG_PSH | TCP_FLAG_RST | |
2543 | TCP_FLAG_SYN | TCP_FLAG_FIN); | 2543 | TCP_FLAG_SYN | TCP_FLAG_FIN); |
2544 | 2544 | ||
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 19d7b429a262..f6b962f56ab4 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c | |||
@@ -2355,7 +2355,7 @@ struct sk_buff **tcp4_gro_receive(struct sk_buff **head, struct sk_buff *skb) | |||
2355 | 2355 | ||
2356 | switch (skb->ip_summed) { | 2356 | switch (skb->ip_summed) { |
2357 | case CHECKSUM_COMPLETE: | 2357 | case CHECKSUM_COMPLETE: |
2358 | if (!tcp_v4_check(skb->len, iph->saddr, iph->daddr, | 2358 | if (!tcp_v4_check(skb_gro_len(skb), iph->saddr, iph->daddr, |
2359 | skb->csum)) { | 2359 | skb->csum)) { |
2360 | skb->ip_summed = CHECKSUM_UNNECESSARY; | 2360 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
2361 | break; | 2361 | break; |
diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c index 2ad24ba31f9d..60d918c96a4f 100644 --- a/net/ipv4/xfrm4_policy.c +++ b/net/ipv4/xfrm4_policy.c | |||
@@ -241,7 +241,7 @@ static void xfrm4_dst_ifdown(struct dst_entry *dst, struct net_device *dev, | |||
241 | 241 | ||
242 | static struct dst_ops xfrm4_dst_ops = { | 242 | static struct dst_ops xfrm4_dst_ops = { |
243 | .family = AF_INET, | 243 | .family = AF_INET, |
244 | .protocol = __constant_htons(ETH_P_IP), | 244 | .protocol = cpu_to_be16(ETH_P_IP), |
245 | .gc = xfrm4_garbage_collect, | 245 | .gc = xfrm4_garbage_collect, |
246 | .update_pmtu = xfrm4_update_pmtu, | 246 | .update_pmtu = xfrm4_update_pmtu, |
247 | .destroy = xfrm4_dst_destroy, | 247 | .destroy = xfrm4_dst_destroy, |
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c index c802bc1658a8..fa2ac7ee662f 100644 --- a/net/ipv6/af_inet6.c +++ b/net/ipv6/af_inet6.c | |||
@@ -799,24 +799,34 @@ static struct sk_buff **ipv6_gro_receive(struct sk_buff **head, | |||
799 | int proto; | 799 | int proto; |
800 | __wsum csum; | 800 | __wsum csum; |
801 | 801 | ||
802 | if (unlikely(!pskb_may_pull(skb, sizeof(*iph)))) | 802 | iph = skb_gro_header(skb, sizeof(*iph)); |
803 | if (unlikely(!iph)) | ||
803 | goto out; | 804 | goto out; |
804 | 805 | ||
805 | iph = ipv6_hdr(skb); | 806 | skb_gro_pull(skb, sizeof(*iph)); |
806 | __skb_pull(skb, sizeof(*iph)); | 807 | skb_set_transport_header(skb, skb_gro_offset(skb)); |
807 | 808 | ||
808 | flush += ntohs(iph->payload_len) != skb->len; | 809 | flush += ntohs(iph->payload_len) != skb_gro_len(skb); |
809 | 810 | ||
810 | rcu_read_lock(); | 811 | rcu_read_lock(); |
811 | proto = ipv6_gso_pull_exthdrs(skb, iph->nexthdr); | 812 | proto = iph->nexthdr; |
812 | iph = ipv6_hdr(skb); | ||
813 | IPV6_GRO_CB(skb)->proto = proto; | ||
814 | ops = rcu_dereference(inet6_protos[proto]); | 813 | ops = rcu_dereference(inet6_protos[proto]); |
815 | if (!ops || !ops->gro_receive) | 814 | if (!ops || !ops->gro_receive) { |
816 | goto out_unlock; | 815 | __pskb_pull(skb, skb_gro_offset(skb)); |
816 | proto = ipv6_gso_pull_exthdrs(skb, proto); | ||
817 | skb_gro_pull(skb, -skb_transport_offset(skb)); | ||
818 | skb_reset_transport_header(skb); | ||
819 | __skb_push(skb, skb_gro_offset(skb)); | ||
820 | |||
821 | if (!ops || !ops->gro_receive) | ||
822 | goto out_unlock; | ||
823 | |||
824 | iph = ipv6_hdr(skb); | ||
825 | } | ||
826 | |||
827 | IPV6_GRO_CB(skb)->proto = proto; | ||
817 | 828 | ||
818 | flush--; | 829 | flush--; |
819 | skb_reset_transport_header(skb); | ||
820 | nlen = skb_network_header_len(skb); | 830 | nlen = skb_network_header_len(skb); |
821 | 831 | ||
822 | for (p = *head; p; p = p->next) { | 832 | for (p = *head; p; p = p->next) { |
@@ -880,7 +890,7 @@ out_unlock: | |||
880 | } | 890 | } |
881 | 891 | ||
882 | static struct packet_type ipv6_packet_type = { | 892 | static struct packet_type ipv6_packet_type = { |
883 | .type = __constant_htons(ETH_P_IPV6), | 893 | .type = cpu_to_be16(ETH_P_IPV6), |
884 | .func = ipv6_rcv, | 894 | .func = ipv6_rcv, |
885 | .gso_send_check = ipv6_gso_send_check, | 895 | .gso_send_check = ipv6_gso_send_check, |
886 | .gso_segment = ipv6_gso_segment, | 896 | .gso_segment = ipv6_gso_segment, |
diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 9c574235c905..c3d486a3edad 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c | |||
@@ -98,7 +98,7 @@ static struct rt6_info *rt6_get_route_info(struct net *net, | |||
98 | 98 | ||
99 | static struct dst_ops ip6_dst_ops_template = { | 99 | static struct dst_ops ip6_dst_ops_template = { |
100 | .family = AF_INET6, | 100 | .family = AF_INET6, |
101 | .protocol = __constant_htons(ETH_P_IPV6), | 101 | .protocol = cpu_to_be16(ETH_P_IPV6), |
102 | .gc = ip6_dst_gc, | 102 | .gc = ip6_dst_gc, |
103 | .gc_thresh = 1024, | 103 | .gc_thresh = 1024, |
104 | .check = ip6_dst_check, | 104 | .check = ip6_dst_check, |
@@ -117,7 +117,7 @@ static void ip6_rt_blackhole_update_pmtu(struct dst_entry *dst, u32 mtu) | |||
117 | 117 | ||
118 | static struct dst_ops ip6_dst_blackhole_ops = { | 118 | static struct dst_ops ip6_dst_blackhole_ops = { |
119 | .family = AF_INET6, | 119 | .family = AF_INET6, |
120 | .protocol = __constant_htons(ETH_P_IPV6), | 120 | .protocol = cpu_to_be16(ETH_P_IPV6), |
121 | .destroy = ip6_dst_destroy, | 121 | .destroy = ip6_dst_destroy, |
122 | .check = ip6_dst_check, | 122 | .check = ip6_dst_check, |
123 | .update_pmtu = ip6_rt_blackhole_update_pmtu, | 123 | .update_pmtu = ip6_rt_blackhole_update_pmtu, |
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index e5b85d45bee8..00f1269e11e9 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c | |||
@@ -948,7 +948,7 @@ struct sk_buff **tcp6_gro_receive(struct sk_buff **head, struct sk_buff *skb) | |||
948 | 948 | ||
949 | switch (skb->ip_summed) { | 949 | switch (skb->ip_summed) { |
950 | case CHECKSUM_COMPLETE: | 950 | case CHECKSUM_COMPLETE: |
951 | if (!tcp_v6_check(skb->len, &iph->saddr, &iph->daddr, | 951 | if (!tcp_v6_check(skb_gro_len(skb), &iph->saddr, &iph->daddr, |
952 | skb->csum)) { | 952 | skb->csum)) { |
953 | skb->ip_summed = CHECKSUM_UNNECESSARY; | 953 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
954 | break; | 954 | break; |
diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c index 97ab068e8ccc..b4b16a43f277 100644 --- a/net/ipv6/xfrm6_policy.c +++ b/net/ipv6/xfrm6_policy.c | |||
@@ -272,7 +272,7 @@ static void xfrm6_dst_ifdown(struct dst_entry *dst, struct net_device *dev, | |||
272 | 272 | ||
273 | static struct dst_ops xfrm6_dst_ops = { | 273 | static struct dst_ops xfrm6_dst_ops = { |
274 | .family = AF_INET6, | 274 | .family = AF_INET6, |
275 | .protocol = __constant_htons(ETH_P_IPV6), | 275 | .protocol = cpu_to_be16(ETH_P_IPV6), |
276 | .gc = xfrm6_garbage_collect, | 276 | .gc = xfrm6_garbage_collect, |
277 | .update_pmtu = xfrm6_update_pmtu, | 277 | .update_pmtu = xfrm6_update_pmtu, |
278 | .destroy = xfrm6_dst_destroy, | 278 | .destroy = xfrm6_dst_destroy, |
diff --git a/net/ipx/af_ipx.c b/net/ipx/af_ipx.c index b6e70f92e7fb..43d0ffc6d565 100644 --- a/net/ipx/af_ipx.c +++ b/net/ipx/af_ipx.c | |||
@@ -1959,12 +1959,12 @@ static const struct proto_ops SOCKOPS_WRAPPED(ipx_dgram_ops) = { | |||
1959 | SOCKOPS_WRAP(ipx_dgram, PF_IPX); | 1959 | SOCKOPS_WRAP(ipx_dgram, PF_IPX); |
1960 | 1960 | ||
1961 | static struct packet_type ipx_8023_packet_type = { | 1961 | static struct packet_type ipx_8023_packet_type = { |
1962 | .type = __constant_htons(ETH_P_802_3), | 1962 | .type = cpu_to_be16(ETH_P_802_3), |
1963 | .func = ipx_rcv, | 1963 | .func = ipx_rcv, |
1964 | }; | 1964 | }; |
1965 | 1965 | ||
1966 | static struct packet_type ipx_dix_packet_type = { | 1966 | static struct packet_type ipx_dix_packet_type = { |
1967 | .type = __constant_htons(ETH_P_IPX), | 1967 | .type = cpu_to_be16(ETH_P_IPX), |
1968 | .func = ipx_rcv, | 1968 | .func = ipx_rcv, |
1969 | }; | 1969 | }; |
1970 | 1970 | ||
diff --git a/net/irda/irmod.c b/net/irda/irmod.c index 4c487a883725..1bb607f2f5c7 100644 --- a/net/irda/irmod.c +++ b/net/irda/irmod.c | |||
@@ -56,7 +56,7 @@ EXPORT_SYMBOL(irda_debug); | |||
56 | * Tell the kernel how IrDA packets should be handled. | 56 | * Tell the kernel how IrDA packets should be handled. |
57 | */ | 57 | */ |
58 | static struct packet_type irda_packet_type = { | 58 | static struct packet_type irda_packet_type = { |
59 | .type = __constant_htons(ETH_P_IRDA), | 59 | .type = cpu_to_be16(ETH_P_IRDA), |
60 | .func = irlap_driver_rcv, /* Packet type handler irlap_frame.c */ | 60 | .func = irlap_driver_rcv, /* Packet type handler irlap_frame.c */ |
61 | }; | 61 | }; |
62 | 62 | ||
diff --git a/net/llc/llc_core.c b/net/llc/llc_core.c index 50d5b10e23a2..a7fe1adc378d 100644 --- a/net/llc/llc_core.c +++ b/net/llc/llc_core.c | |||
@@ -148,12 +148,12 @@ void llc_sap_close(struct llc_sap *sap) | |||
148 | } | 148 | } |
149 | 149 | ||
150 | static struct packet_type llc_packet_type = { | 150 | static struct packet_type llc_packet_type = { |
151 | .type = __constant_htons(ETH_P_802_2), | 151 | .type = cpu_to_be16(ETH_P_802_2), |
152 | .func = llc_rcv, | 152 | .func = llc_rcv, |
153 | }; | 153 | }; |
154 | 154 | ||
155 | static struct packet_type llc_tr_packet_type = { | 155 | static struct packet_type llc_tr_packet_type = { |
156 | .type = __constant_htons(ETH_P_TR_802_2), | 156 | .type = cpu_to_be16(ETH_P_TR_802_2), |
157 | .func = llc_rcv, | 157 | .func = llc_rcv, |
158 | }; | 158 | }; |
159 | 159 | ||
diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c index 6be5d4efa51b..5c48378a852f 100644 --- a/net/netfilter/ipvs/ip_vs_sync.c +++ b/net/netfilter/ipvs/ip_vs_sync.c | |||
@@ -149,8 +149,8 @@ static struct task_struct *sync_backup_thread; | |||
149 | /* multicast addr */ | 149 | /* multicast addr */ |
150 | static struct sockaddr_in mcast_addr = { | 150 | static struct sockaddr_in mcast_addr = { |
151 | .sin_family = AF_INET, | 151 | .sin_family = AF_INET, |
152 | .sin_port = __constant_htons(IP_VS_SYNC_PORT), | 152 | .sin_port = cpu_to_be16(IP_VS_SYNC_PORT), |
153 | .sin_addr.s_addr = __constant_htonl(IP_VS_SYNC_GROUP), | 153 | .sin_addr.s_addr = cpu_to_be32(IP_VS_SYNC_GROUP), |
154 | }; | 154 | }; |
155 | 155 | ||
156 | 156 | ||
diff --git a/net/netfilter/nf_conntrack_amanda.c b/net/netfilter/nf_conntrack_amanda.c index 4f8fcf498545..07d9d8857e5d 100644 --- a/net/netfilter/nf_conntrack_amanda.c +++ b/net/netfilter/nf_conntrack_amanda.c | |||
@@ -177,7 +177,7 @@ static struct nf_conntrack_helper amanda_helper[2] __read_mostly = { | |||
177 | .me = THIS_MODULE, | 177 | .me = THIS_MODULE, |
178 | .help = amanda_help, | 178 | .help = amanda_help, |
179 | .tuple.src.l3num = AF_INET, | 179 | .tuple.src.l3num = AF_INET, |
180 | .tuple.src.u.udp.port = __constant_htons(10080), | 180 | .tuple.src.u.udp.port = cpu_to_be16(10080), |
181 | .tuple.dst.protonum = IPPROTO_UDP, | 181 | .tuple.dst.protonum = IPPROTO_UDP, |
182 | .expect_policy = &amanda_exp_policy, | 182 | .expect_policy = &amanda_exp_policy, |
183 | }, | 183 | }, |
@@ -186,7 +186,7 @@ static struct nf_conntrack_helper amanda_helper[2] __read_mostly = { | |||
186 | .me = THIS_MODULE, | 186 | .me = THIS_MODULE, |
187 | .help = amanda_help, | 187 | .help = amanda_help, |
188 | .tuple.src.l3num = AF_INET6, | 188 | .tuple.src.l3num = AF_INET6, |
189 | .tuple.src.u.udp.port = __constant_htons(10080), | 189 | .tuple.src.u.udp.port = cpu_to_be16(10080), |
190 | .tuple.dst.protonum = IPPROTO_UDP, | 190 | .tuple.dst.protonum = IPPROTO_UDP, |
191 | .expect_policy = &amanda_exp_policy, | 191 | .expect_policy = &amanda_exp_policy, |
192 | }, | 192 | }, |
diff --git a/net/netfilter/nf_conntrack_h323_main.c b/net/netfilter/nf_conntrack_h323_main.c index 687bd633c3d7..66369490230e 100644 --- a/net/netfilter/nf_conntrack_h323_main.c +++ b/net/netfilter/nf_conntrack_h323_main.c | |||
@@ -1167,7 +1167,7 @@ static struct nf_conntrack_helper nf_conntrack_helper_q931[] __read_mostly = { | |||
1167 | .name = "Q.931", | 1167 | .name = "Q.931", |
1168 | .me = THIS_MODULE, | 1168 | .me = THIS_MODULE, |
1169 | .tuple.src.l3num = AF_INET, | 1169 | .tuple.src.l3num = AF_INET, |
1170 | .tuple.src.u.tcp.port = __constant_htons(Q931_PORT), | 1170 | .tuple.src.u.tcp.port = cpu_to_be16(Q931_PORT), |
1171 | .tuple.dst.protonum = IPPROTO_TCP, | 1171 | .tuple.dst.protonum = IPPROTO_TCP, |
1172 | .help = q931_help, | 1172 | .help = q931_help, |
1173 | .expect_policy = &q931_exp_policy, | 1173 | .expect_policy = &q931_exp_policy, |
@@ -1176,7 +1176,7 @@ static struct nf_conntrack_helper nf_conntrack_helper_q931[] __read_mostly = { | |||
1176 | .name = "Q.931", | 1176 | .name = "Q.931", |
1177 | .me = THIS_MODULE, | 1177 | .me = THIS_MODULE, |
1178 | .tuple.src.l3num = AF_INET6, | 1178 | .tuple.src.l3num = AF_INET6, |
1179 | .tuple.src.u.tcp.port = __constant_htons(Q931_PORT), | 1179 | .tuple.src.u.tcp.port = cpu_to_be16(Q931_PORT), |
1180 | .tuple.dst.protonum = IPPROTO_TCP, | 1180 | .tuple.dst.protonum = IPPROTO_TCP, |
1181 | .help = q931_help, | 1181 | .help = q931_help, |
1182 | .expect_policy = &q931_exp_policy, | 1182 | .expect_policy = &q931_exp_policy, |
@@ -1741,7 +1741,7 @@ static struct nf_conntrack_helper nf_conntrack_helper_ras[] __read_mostly = { | |||
1741 | .name = "RAS", | 1741 | .name = "RAS", |
1742 | .me = THIS_MODULE, | 1742 | .me = THIS_MODULE, |
1743 | .tuple.src.l3num = AF_INET, | 1743 | .tuple.src.l3num = AF_INET, |
1744 | .tuple.src.u.udp.port = __constant_htons(RAS_PORT), | 1744 | .tuple.src.u.udp.port = cpu_to_be16(RAS_PORT), |
1745 | .tuple.dst.protonum = IPPROTO_UDP, | 1745 | .tuple.dst.protonum = IPPROTO_UDP, |
1746 | .help = ras_help, | 1746 | .help = ras_help, |
1747 | .expect_policy = &ras_exp_policy, | 1747 | .expect_policy = &ras_exp_policy, |
@@ -1750,7 +1750,7 @@ static struct nf_conntrack_helper nf_conntrack_helper_ras[] __read_mostly = { | |||
1750 | .name = "RAS", | 1750 | .name = "RAS", |
1751 | .me = THIS_MODULE, | 1751 | .me = THIS_MODULE, |
1752 | .tuple.src.l3num = AF_INET6, | 1752 | .tuple.src.l3num = AF_INET6, |
1753 | .tuple.src.u.udp.port = __constant_htons(RAS_PORT), | 1753 | .tuple.src.u.udp.port = cpu_to_be16(RAS_PORT), |
1754 | .tuple.dst.protonum = IPPROTO_UDP, | 1754 | .tuple.dst.protonum = IPPROTO_UDP, |
1755 | .help = ras_help, | 1755 | .help = ras_help, |
1756 | .expect_policy = &ras_exp_policy, | 1756 | .expect_policy = &ras_exp_policy, |
diff --git a/net/netfilter/nf_conntrack_netbios_ns.c b/net/netfilter/nf_conntrack_netbios_ns.c index 5af4273b4668..8a3875e36ec2 100644 --- a/net/netfilter/nf_conntrack_netbios_ns.c +++ b/net/netfilter/nf_conntrack_netbios_ns.c | |||
@@ -105,7 +105,7 @@ static struct nf_conntrack_expect_policy exp_policy = { | |||
105 | static struct nf_conntrack_helper helper __read_mostly = { | 105 | static struct nf_conntrack_helper helper __read_mostly = { |
106 | .name = "netbios-ns", | 106 | .name = "netbios-ns", |
107 | .tuple.src.l3num = AF_INET, | 107 | .tuple.src.l3num = AF_INET, |
108 | .tuple.src.u.udp.port = __constant_htons(NMBD_PORT), | 108 | .tuple.src.u.udp.port = cpu_to_be16(NMBD_PORT), |
109 | .tuple.dst.protonum = IPPROTO_UDP, | 109 | .tuple.dst.protonum = IPPROTO_UDP, |
110 | .me = THIS_MODULE, | 110 | .me = THIS_MODULE, |
111 | .help = help, | 111 | .help = help, |
diff --git a/net/netfilter/nf_conntrack_pptp.c b/net/netfilter/nf_conntrack_pptp.c index 9e169ef2e854..72cca638a82d 100644 --- a/net/netfilter/nf_conntrack_pptp.c +++ b/net/netfilter/nf_conntrack_pptp.c | |||
@@ -591,7 +591,7 @@ static struct nf_conntrack_helper pptp __read_mostly = { | |||
591 | .name = "pptp", | 591 | .name = "pptp", |
592 | .me = THIS_MODULE, | 592 | .me = THIS_MODULE, |
593 | .tuple.src.l3num = AF_INET, | 593 | .tuple.src.l3num = AF_INET, |
594 | .tuple.src.u.tcp.port = __constant_htons(PPTP_CONTROL_PORT), | 594 | .tuple.src.u.tcp.port = cpu_to_be16(PPTP_CONTROL_PORT), |
595 | .tuple.dst.protonum = IPPROTO_TCP, | 595 | .tuple.dst.protonum = IPPROTO_TCP, |
596 | .help = conntrack_pptp_help, | 596 | .help = conntrack_pptp_help, |
597 | .destroy = pptp_destroy_siblings, | 597 | .destroy = pptp_destroy_siblings, |
diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c index e9c05b8f4f45..cba7849de98e 100644 --- a/net/netrom/af_netrom.c +++ b/net/netrom/af_netrom.c | |||
@@ -1432,7 +1432,7 @@ static int __init nr_proto_init(void) | |||
1432 | struct net_device *dev; | 1432 | struct net_device *dev; |
1433 | 1433 | ||
1434 | sprintf(name, "nr%d", i); | 1434 | sprintf(name, "nr%d", i); |
1435 | dev = alloc_netdev(sizeof(struct nr_private), name, nr_setup); | 1435 | dev = alloc_netdev(0, name, nr_setup); |
1436 | if (!dev) { | 1436 | if (!dev) { |
1437 | printk(KERN_ERR "NET/ROM: nr_proto_init - unable to allocate device structure\n"); | 1437 | printk(KERN_ERR "NET/ROM: nr_proto_init - unable to allocate device structure\n"); |
1438 | goto fail; | 1438 | goto fail; |
diff --git a/net/netrom/nr_dev.c b/net/netrom/nr_dev.c index 6caf459665f2..351372463fed 100644 --- a/net/netrom/nr_dev.c +++ b/net/netrom/nr_dev.c | |||
@@ -42,7 +42,7 @@ | |||
42 | 42 | ||
43 | int nr_rx_ip(struct sk_buff *skb, struct net_device *dev) | 43 | int nr_rx_ip(struct sk_buff *skb, struct net_device *dev) |
44 | { | 44 | { |
45 | struct net_device_stats *stats = netdev_priv(dev); | 45 | struct net_device_stats *stats = &dev->stats; |
46 | 46 | ||
47 | if (!netif_running(dev)) { | 47 | if (!netif_running(dev)) { |
48 | stats->rx_dropped++; | 48 | stats->rx_dropped++; |
@@ -171,8 +171,7 @@ static int nr_close(struct net_device *dev) | |||
171 | 171 | ||
172 | static int nr_xmit(struct sk_buff *skb, struct net_device *dev) | 172 | static int nr_xmit(struct sk_buff *skb, struct net_device *dev) |
173 | { | 173 | { |
174 | struct nr_private *nr = netdev_priv(dev); | 174 | struct net_device_stats *stats = &dev->stats; |
175 | struct net_device_stats *stats = &nr->stats; | ||
176 | unsigned int len = skb->len; | 175 | unsigned int len = skb->len; |
177 | 176 | ||
178 | if (!nr_route_frame(skb, NULL)) { | 177 | if (!nr_route_frame(skb, NULL)) { |
@@ -187,34 +186,27 @@ static int nr_xmit(struct sk_buff *skb, struct net_device *dev) | |||
187 | return 0; | 186 | return 0; |
188 | } | 187 | } |
189 | 188 | ||
190 | static struct net_device_stats *nr_get_stats(struct net_device *dev) | ||
191 | { | ||
192 | struct nr_private *nr = netdev_priv(dev); | ||
193 | |||
194 | return &nr->stats; | ||
195 | } | ||
196 | |||
197 | static const struct header_ops nr_header_ops = { | 189 | static const struct header_ops nr_header_ops = { |
198 | .create = nr_header, | 190 | .create = nr_header, |
199 | .rebuild= nr_rebuild_header, | 191 | .rebuild= nr_rebuild_header, |
200 | }; | 192 | }; |
201 | 193 | ||
194 | static const struct net_device_ops nr_netdev_ops = { | ||
195 | .ndo_open = nr_open, | ||
196 | .ndo_stop = nr_close, | ||
197 | .ndo_start_xmit = nr_xmit, | ||
198 | .ndo_set_mac_address = nr_set_mac_address, | ||
199 | }; | ||
202 | 200 | ||
203 | void nr_setup(struct net_device *dev) | 201 | void nr_setup(struct net_device *dev) |
204 | { | 202 | { |
205 | dev->mtu = NR_MAX_PACKET_SIZE; | 203 | dev->mtu = NR_MAX_PACKET_SIZE; |
206 | dev->hard_start_xmit = nr_xmit; | 204 | dev->netdev_ops = &nr_netdev_ops; |
207 | dev->open = nr_open; | ||
208 | dev->stop = nr_close; | ||
209 | |||
210 | dev->header_ops = &nr_header_ops; | 205 | dev->header_ops = &nr_header_ops; |
211 | dev->hard_header_len = NR_NETWORK_LEN + NR_TRANSPORT_LEN; | 206 | dev->hard_header_len = NR_NETWORK_LEN + NR_TRANSPORT_LEN; |
212 | dev->addr_len = AX25_ADDR_LEN; | 207 | dev->addr_len = AX25_ADDR_LEN; |
213 | dev->type = ARPHRD_NETROM; | 208 | dev->type = ARPHRD_NETROM; |
214 | dev->set_mac_address = nr_set_mac_address; | ||
215 | 209 | ||
216 | /* New-style flags. */ | 210 | /* New-style flags. */ |
217 | dev->flags = IFF_NOARP; | 211 | dev->flags = IFF_NOARP; |
218 | |||
219 | dev->get_stats = nr_get_stats; | ||
220 | } | 212 | } |
diff --git a/net/phonet/af_phonet.c b/net/phonet/af_phonet.c index 13cb323f8c38..81795ea87794 100644 --- a/net/phonet/af_phonet.c +++ b/net/phonet/af_phonet.c | |||
@@ -275,8 +275,6 @@ static inline int can_respond(struct sk_buff *skb) | |||
275 | return 0; | 275 | return 0; |
276 | 276 | ||
277 | ph = pn_hdr(skb); | 277 | ph = pn_hdr(skb); |
278 | if (phonet_address_get(skb->dev, ph->pn_rdev) != ph->pn_rdev) | ||
279 | return 0; /* we are not the destination */ | ||
280 | if (ph->pn_res == PN_PREFIX && !pskb_may_pull(skb, 5)) | 278 | if (ph->pn_res == PN_PREFIX && !pskb_may_pull(skb, 5)) |
281 | return 0; | 279 | return 0; |
282 | if (ph->pn_res == PN_COMMGR) /* indications */ | 280 | if (ph->pn_res == PN_COMMGR) /* indications */ |
@@ -344,8 +342,8 @@ static int phonet_rcv(struct sk_buff *skb, struct net_device *dev, | |||
344 | struct packet_type *pkttype, | 342 | struct packet_type *pkttype, |
345 | struct net_device *orig_dev) | 343 | struct net_device *orig_dev) |
346 | { | 344 | { |
345 | struct net *net = dev_net(dev); | ||
347 | struct phonethdr *ph; | 346 | struct phonethdr *ph; |
348 | struct sock *sk; | ||
349 | struct sockaddr_pn sa; | 347 | struct sockaddr_pn sa; |
350 | u16 len; | 348 | u16 len; |
351 | 349 | ||
@@ -364,28 +362,28 @@ static int phonet_rcv(struct sk_buff *skb, struct net_device *dev, | |||
364 | skb_reset_transport_header(skb); | 362 | skb_reset_transport_header(skb); |
365 | 363 | ||
366 | pn_skb_get_dst_sockaddr(skb, &sa); | 364 | pn_skb_get_dst_sockaddr(skb, &sa); |
367 | if (pn_sockaddr_get_addr(&sa) == 0) | ||
368 | goto out; /* currently, we cannot be device 0 */ | ||
369 | 365 | ||
370 | sk = pn_find_sock_by_sa(dev_net(dev), &sa); | 366 | /* check if we are the destination */ |
371 | if (sk == NULL) { | 367 | if (phonet_address_lookup(net, pn_sockaddr_get_addr(&sa)) == 0) { |
368 | /* Phonet packet input */ | ||
369 | struct sock *sk = pn_find_sock_by_sa(net, &sa); | ||
370 | |||
371 | if (sk) | ||
372 | return sk_receive_skb(sk, skb, 0); | ||
373 | |||
372 | if (can_respond(skb)) { | 374 | if (can_respond(skb)) { |
373 | send_obj_unreachable(skb); | 375 | send_obj_unreachable(skb); |
374 | send_reset_indications(skb); | 376 | send_reset_indications(skb); |
375 | } | 377 | } |
376 | goto out; | ||
377 | } | 378 | } |
378 | 379 | ||
379 | /* Push data to the socket (or other sockets connected to it). */ | ||
380 | return sk_receive_skb(sk, skb, 0); | ||
381 | |||
382 | out: | 380 | out: |
383 | kfree_skb(skb); | 381 | kfree_skb(skb); |
384 | return NET_RX_DROP; | 382 | return NET_RX_DROP; |
385 | } | 383 | } |
386 | 384 | ||
387 | static struct packet_type phonet_packet_type = { | 385 | static struct packet_type phonet_packet_type = { |
388 | .type = __constant_htons(ETH_P_PHONET), | 386 | .type = cpu_to_be16(ETH_P_PHONET), |
389 | .dev = NULL, | 387 | .dev = NULL, |
390 | .func = phonet_rcv, | 388 | .func = phonet_rcv, |
391 | }; | 389 | }; |
@@ -428,16 +426,18 @@ static int __init phonet_init(void) | |||
428 | { | 426 | { |
429 | int err; | 427 | int err; |
430 | 428 | ||
429 | err = phonet_device_init(); | ||
430 | if (err) | ||
431 | return err; | ||
432 | |||
431 | err = sock_register(&phonet_proto_family); | 433 | err = sock_register(&phonet_proto_family); |
432 | if (err) { | 434 | if (err) { |
433 | printk(KERN_ALERT | 435 | printk(KERN_ALERT |
434 | "phonet protocol family initialization failed\n"); | 436 | "phonet protocol family initialization failed\n"); |
435 | return err; | 437 | goto err_sock; |
436 | } | 438 | } |
437 | 439 | ||
438 | phonet_device_init(); | ||
439 | dev_add_pack(&phonet_packet_type); | 440 | dev_add_pack(&phonet_packet_type); |
440 | phonet_netlink_register(); | ||
441 | phonet_sysctl_init(); | 441 | phonet_sysctl_init(); |
442 | 442 | ||
443 | err = isi_register(); | 443 | err = isi_register(); |
@@ -449,6 +449,7 @@ err: | |||
449 | phonet_sysctl_exit(); | 449 | phonet_sysctl_exit(); |
450 | sock_unregister(PF_PHONET); | 450 | sock_unregister(PF_PHONET); |
451 | dev_remove_pack(&phonet_packet_type); | 451 | dev_remove_pack(&phonet_packet_type); |
452 | err_sock: | ||
452 | phonet_device_exit(); | 453 | phonet_device_exit(); |
453 | return err; | 454 | return err; |
454 | } | 455 | } |
diff --git a/net/phonet/pn_dev.c b/net/phonet/pn_dev.c index 5491bf5e354b..80a322d77909 100644 --- a/net/phonet/pn_dev.c +++ b/net/phonet/pn_dev.c | |||
@@ -28,32 +28,41 @@ | |||
28 | #include <linux/netdevice.h> | 28 | #include <linux/netdevice.h> |
29 | #include <linux/phonet.h> | 29 | #include <linux/phonet.h> |
30 | #include <net/sock.h> | 30 | #include <net/sock.h> |
31 | #include <net/netns/generic.h> | ||
31 | #include <net/phonet/pn_dev.h> | 32 | #include <net/phonet/pn_dev.h> |
32 | 33 | ||
33 | /* when accessing, remember to lock with spin_lock(&pndevs.lock); */ | 34 | struct phonet_net { |
34 | struct phonet_device_list pndevs = { | 35 | struct phonet_device_list pndevs; |
35 | .list = LIST_HEAD_INIT(pndevs.list), | ||
36 | .lock = __SPIN_LOCK_UNLOCKED(pndevs.lock), | ||
37 | }; | 36 | }; |
38 | 37 | ||
38 | int phonet_net_id; | ||
39 | |||
40 | struct phonet_device_list *phonet_device_list(struct net *net) | ||
41 | { | ||
42 | struct phonet_net *pnn = net_generic(net, phonet_net_id); | ||
43 | return &pnn->pndevs; | ||
44 | } | ||
45 | |||
39 | /* Allocate new Phonet device. */ | 46 | /* Allocate new Phonet device. */ |
40 | static struct phonet_device *__phonet_device_alloc(struct net_device *dev) | 47 | static struct phonet_device *__phonet_device_alloc(struct net_device *dev) |
41 | { | 48 | { |
49 | struct phonet_device_list *pndevs = phonet_device_list(dev_net(dev)); | ||
42 | struct phonet_device *pnd = kmalloc(sizeof(*pnd), GFP_ATOMIC); | 50 | struct phonet_device *pnd = kmalloc(sizeof(*pnd), GFP_ATOMIC); |
43 | if (pnd == NULL) | 51 | if (pnd == NULL) |
44 | return NULL; | 52 | return NULL; |
45 | pnd->netdev = dev; | 53 | pnd->netdev = dev; |
46 | bitmap_zero(pnd->addrs, 64); | 54 | bitmap_zero(pnd->addrs, 64); |
47 | 55 | ||
48 | list_add(&pnd->list, &pndevs.list); | 56 | list_add(&pnd->list, &pndevs->list); |
49 | return pnd; | 57 | return pnd; |
50 | } | 58 | } |
51 | 59 | ||
52 | static struct phonet_device *__phonet_get(struct net_device *dev) | 60 | static struct phonet_device *__phonet_get(struct net_device *dev) |
53 | { | 61 | { |
62 | struct phonet_device_list *pndevs = phonet_device_list(dev_net(dev)); | ||
54 | struct phonet_device *pnd; | 63 | struct phonet_device *pnd; |
55 | 64 | ||
56 | list_for_each_entry(pnd, &pndevs.list, list) { | 65 | list_for_each_entry(pnd, &pndevs->list, list) { |
57 | if (pnd->netdev == dev) | 66 | if (pnd->netdev == dev) |
58 | return pnd; | 67 | return pnd; |
59 | } | 68 | } |
@@ -68,32 +77,33 @@ static void __phonet_device_free(struct phonet_device *pnd) | |||
68 | 77 | ||
69 | struct net_device *phonet_device_get(struct net *net) | 78 | struct net_device *phonet_device_get(struct net *net) |
70 | { | 79 | { |
80 | struct phonet_device_list *pndevs = phonet_device_list(net); | ||
71 | struct phonet_device *pnd; | 81 | struct phonet_device *pnd; |
72 | struct net_device *dev; | 82 | struct net_device *dev; |
73 | 83 | ||
74 | spin_lock_bh(&pndevs.lock); | 84 | spin_lock_bh(&pndevs->lock); |
75 | list_for_each_entry(pnd, &pndevs.list, list) { | 85 | list_for_each_entry(pnd, &pndevs->list, list) { |
76 | dev = pnd->netdev; | 86 | dev = pnd->netdev; |
77 | BUG_ON(!dev); | 87 | BUG_ON(!dev); |
78 | 88 | ||
79 | if (net_eq(dev_net(dev), net) && | 89 | if ((dev->reg_state == NETREG_REGISTERED) && |
80 | (dev->reg_state == NETREG_REGISTERED) && | ||
81 | ((pnd->netdev->flags & IFF_UP)) == IFF_UP) | 90 | ((pnd->netdev->flags & IFF_UP)) == IFF_UP) |
82 | break; | 91 | break; |
83 | dev = NULL; | 92 | dev = NULL; |
84 | } | 93 | } |
85 | if (dev) | 94 | if (dev) |
86 | dev_hold(dev); | 95 | dev_hold(dev); |
87 | spin_unlock_bh(&pndevs.lock); | 96 | spin_unlock_bh(&pndevs->lock); |
88 | return dev; | 97 | return dev; |
89 | } | 98 | } |
90 | 99 | ||
91 | int phonet_address_add(struct net_device *dev, u8 addr) | 100 | int phonet_address_add(struct net_device *dev, u8 addr) |
92 | { | 101 | { |
102 | struct phonet_device_list *pndevs = phonet_device_list(dev_net(dev)); | ||
93 | struct phonet_device *pnd; | 103 | struct phonet_device *pnd; |
94 | int err = 0; | 104 | int err = 0; |
95 | 105 | ||
96 | spin_lock_bh(&pndevs.lock); | 106 | spin_lock_bh(&pndevs->lock); |
97 | /* Find or create Phonet-specific device data */ | 107 | /* Find or create Phonet-specific device data */ |
98 | pnd = __phonet_get(dev); | 108 | pnd = __phonet_get(dev); |
99 | if (pnd == NULL) | 109 | if (pnd == NULL) |
@@ -102,31 +112,33 @@ int phonet_address_add(struct net_device *dev, u8 addr) | |||
102 | err = -ENOMEM; | 112 | err = -ENOMEM; |
103 | else if (test_and_set_bit(addr >> 2, pnd->addrs)) | 113 | else if (test_and_set_bit(addr >> 2, pnd->addrs)) |
104 | err = -EEXIST; | 114 | err = -EEXIST; |
105 | spin_unlock_bh(&pndevs.lock); | 115 | spin_unlock_bh(&pndevs->lock); |
106 | return err; | 116 | return err; |
107 | } | 117 | } |
108 | 118 | ||
109 | int phonet_address_del(struct net_device *dev, u8 addr) | 119 | int phonet_address_del(struct net_device *dev, u8 addr) |
110 | { | 120 | { |
121 | struct phonet_device_list *pndevs = phonet_device_list(dev_net(dev)); | ||
111 | struct phonet_device *pnd; | 122 | struct phonet_device *pnd; |
112 | int err = 0; | 123 | int err = 0; |
113 | 124 | ||
114 | spin_lock_bh(&pndevs.lock); | 125 | spin_lock_bh(&pndevs->lock); |
115 | pnd = __phonet_get(dev); | 126 | pnd = __phonet_get(dev); |
116 | if (!pnd || !test_and_clear_bit(addr >> 2, pnd->addrs)) | 127 | if (!pnd || !test_and_clear_bit(addr >> 2, pnd->addrs)) |
117 | err = -EADDRNOTAVAIL; | 128 | err = -EADDRNOTAVAIL; |
118 | else if (bitmap_empty(pnd->addrs, 64)) | 129 | else if (bitmap_empty(pnd->addrs, 64)) |
119 | __phonet_device_free(pnd); | 130 | __phonet_device_free(pnd); |
120 | spin_unlock_bh(&pndevs.lock); | 131 | spin_unlock_bh(&pndevs->lock); |
121 | return err; | 132 | return err; |
122 | } | 133 | } |
123 | 134 | ||
124 | /* Gets a source address toward a destination, through a interface. */ | 135 | /* Gets a source address toward a destination, through a interface. */ |
125 | u8 phonet_address_get(struct net_device *dev, u8 addr) | 136 | u8 phonet_address_get(struct net_device *dev, u8 addr) |
126 | { | 137 | { |
138 | struct phonet_device_list *pndevs = phonet_device_list(dev_net(dev)); | ||
127 | struct phonet_device *pnd; | 139 | struct phonet_device *pnd; |
128 | 140 | ||
129 | spin_lock_bh(&pndevs.lock); | 141 | spin_lock_bh(&pndevs->lock); |
130 | pnd = __phonet_get(dev); | 142 | pnd = __phonet_get(dev); |
131 | if (pnd) { | 143 | if (pnd) { |
132 | BUG_ON(bitmap_empty(pnd->addrs, 64)); | 144 | BUG_ON(bitmap_empty(pnd->addrs, 64)); |
@@ -136,30 +148,31 @@ u8 phonet_address_get(struct net_device *dev, u8 addr) | |||
136 | addr = find_first_bit(pnd->addrs, 64) << 2; | 148 | addr = find_first_bit(pnd->addrs, 64) << 2; |
137 | } else | 149 | } else |
138 | addr = PN_NO_ADDR; | 150 | addr = PN_NO_ADDR; |
139 | spin_unlock_bh(&pndevs.lock); | 151 | spin_unlock_bh(&pndevs->lock); |
140 | return addr; | 152 | return addr; |
141 | } | 153 | } |
142 | 154 | ||
143 | int phonet_address_lookup(struct net *net, u8 addr) | 155 | int phonet_address_lookup(struct net *net, u8 addr) |
144 | { | 156 | { |
157 | struct phonet_device_list *pndevs = phonet_device_list(net); | ||
145 | struct phonet_device *pnd; | 158 | struct phonet_device *pnd; |
159 | int err = -EADDRNOTAVAIL; | ||
146 | 160 | ||
147 | spin_lock_bh(&pndevs.lock); | 161 | spin_lock_bh(&pndevs->lock); |
148 | list_for_each_entry(pnd, &pndevs.list, list) { | 162 | list_for_each_entry(pnd, &pndevs->list, list) { |
149 | if (!net_eq(dev_net(pnd->netdev), net)) | ||
150 | continue; | ||
151 | /* Don't allow unregistering devices! */ | 163 | /* Don't allow unregistering devices! */ |
152 | if ((pnd->netdev->reg_state != NETREG_REGISTERED) || | 164 | if ((pnd->netdev->reg_state != NETREG_REGISTERED) || |
153 | ((pnd->netdev->flags & IFF_UP)) != IFF_UP) | 165 | ((pnd->netdev->flags & IFF_UP)) != IFF_UP) |
154 | continue; | 166 | continue; |
155 | 167 | ||
156 | if (test_bit(addr >> 2, pnd->addrs)) { | 168 | if (test_bit(addr >> 2, pnd->addrs)) { |
157 | spin_unlock_bh(&pndevs.lock); | 169 | err = 0; |
158 | return 0; | 170 | goto found; |
159 | } | 171 | } |
160 | } | 172 | } |
161 | spin_unlock_bh(&pndevs.lock); | 173 | found: |
162 | return -EADDRNOTAVAIL; | 174 | spin_unlock_bh(&pndevs->lock); |
175 | return err; | ||
163 | } | 176 | } |
164 | 177 | ||
165 | /* notify Phonet of device events */ | 178 | /* notify Phonet of device events */ |
@@ -169,14 +182,16 @@ static int phonet_device_notify(struct notifier_block *me, unsigned long what, | |||
169 | struct net_device *dev = arg; | 182 | struct net_device *dev = arg; |
170 | 183 | ||
171 | if (what == NETDEV_UNREGISTER) { | 184 | if (what == NETDEV_UNREGISTER) { |
185 | struct phonet_device_list *pndevs; | ||
172 | struct phonet_device *pnd; | 186 | struct phonet_device *pnd; |
173 | 187 | ||
174 | /* Destroy phonet-specific device data */ | 188 | /* Destroy phonet-specific device data */ |
175 | spin_lock_bh(&pndevs.lock); | 189 | pndevs = phonet_device_list(dev_net(dev)); |
190 | spin_lock_bh(&pndevs->lock); | ||
176 | pnd = __phonet_get(dev); | 191 | pnd = __phonet_get(dev); |
177 | if (pnd) | 192 | if (pnd) |
178 | __phonet_device_free(pnd); | 193 | __phonet_device_free(pnd); |
179 | spin_unlock_bh(&pndevs.lock); | 194 | spin_unlock_bh(&pndevs->lock); |
180 | } | 195 | } |
181 | return 0; | 196 | return 0; |
182 | 197 | ||
@@ -187,24 +202,52 @@ static struct notifier_block phonet_device_notifier = { | |||
187 | .priority = 0, | 202 | .priority = 0, |
188 | }; | 203 | }; |
189 | 204 | ||
190 | /* Initialize Phonet devices list */ | 205 | /* Per-namespace Phonet devices handling */ |
191 | void phonet_device_init(void) | 206 | static int phonet_init_net(struct net *net) |
192 | { | 207 | { |
193 | register_netdevice_notifier(&phonet_device_notifier); | 208 | struct phonet_net *pnn = kmalloc(sizeof(*pnn), GFP_KERNEL); |
209 | if (!pnn) | ||
210 | return -ENOMEM; | ||
211 | |||
212 | INIT_LIST_HEAD(&pnn->pndevs.list); | ||
213 | spin_lock_init(&pnn->pndevs.lock); | ||
214 | net_assign_generic(net, phonet_net_id, pnn); | ||
215 | return 0; | ||
194 | } | 216 | } |
195 | 217 | ||
196 | void phonet_device_exit(void) | 218 | static void phonet_exit_net(struct net *net) |
197 | { | 219 | { |
220 | struct phonet_net *pnn = net_generic(net, phonet_net_id); | ||
198 | struct phonet_device *pnd, *n; | 221 | struct phonet_device *pnd, *n; |
199 | 222 | ||
200 | rtnl_unregister_all(PF_PHONET); | 223 | list_for_each_entry_safe(pnd, n, &pnn->pndevs.list, list) |
201 | rtnl_lock(); | ||
202 | spin_lock_bh(&pndevs.lock); | ||
203 | |||
204 | list_for_each_entry_safe(pnd, n, &pndevs.list, list) | ||
205 | __phonet_device_free(pnd); | 224 | __phonet_device_free(pnd); |
206 | 225 | ||
207 | spin_unlock_bh(&pndevs.lock); | 226 | kfree(pnn); |
208 | rtnl_unlock(); | 227 | } |
228 | |||
229 | static struct pernet_operations phonet_net_ops = { | ||
230 | .init = phonet_init_net, | ||
231 | .exit = phonet_exit_net, | ||
232 | }; | ||
233 | |||
234 | /* Initialize Phonet devices list */ | ||
235 | int __init phonet_device_init(void) | ||
236 | { | ||
237 | int err = register_pernet_gen_device(&phonet_net_id, &phonet_net_ops); | ||
238 | if (err) | ||
239 | return err; | ||
240 | |||
241 | register_netdevice_notifier(&phonet_device_notifier); | ||
242 | err = phonet_netlink_register(); | ||
243 | if (err) | ||
244 | phonet_device_exit(); | ||
245 | return err; | ||
246 | } | ||
247 | |||
248 | void phonet_device_exit(void) | ||
249 | { | ||
250 | rtnl_unregister_all(PF_PHONET); | ||
209 | unregister_netdevice_notifier(&phonet_device_notifier); | 251 | unregister_netdevice_notifier(&phonet_device_notifier); |
252 | unregister_pernet_gen_device(phonet_net_id, &phonet_net_ops); | ||
210 | } | 253 | } |
diff --git a/net/phonet/pn_netlink.c b/net/phonet/pn_netlink.c index 242fe8f8c322..1ceea1f92413 100644 --- a/net/phonet/pn_netlink.c +++ b/net/phonet/pn_netlink.c | |||
@@ -123,17 +123,16 @@ nla_put_failure: | |||
123 | 123 | ||
124 | static int getaddr_dumpit(struct sk_buff *skb, struct netlink_callback *cb) | 124 | static int getaddr_dumpit(struct sk_buff *skb, struct netlink_callback *cb) |
125 | { | 125 | { |
126 | struct net *net = sock_net(skb->sk); | 126 | struct phonet_device_list *pndevs; |
127 | struct phonet_device *pnd; | 127 | struct phonet_device *pnd; |
128 | int dev_idx = 0, dev_start_idx = cb->args[0]; | 128 | int dev_idx = 0, dev_start_idx = cb->args[0]; |
129 | int addr_idx = 0, addr_start_idx = cb->args[1]; | 129 | int addr_idx = 0, addr_start_idx = cb->args[1]; |
130 | 130 | ||
131 | spin_lock_bh(&pndevs.lock); | 131 | pndevs = phonet_device_list(sock_net(skb->sk)); |
132 | list_for_each_entry(pnd, &pndevs.list, list) { | 132 | spin_lock_bh(&pndevs->lock); |
133 | list_for_each_entry(pnd, &pndevs->list, list) { | ||
133 | u8 addr; | 134 | u8 addr; |
134 | 135 | ||
135 | if (!net_eq(dev_net(pnd->netdev), net)) | ||
136 | continue; | ||
137 | if (dev_idx > dev_start_idx) | 136 | if (dev_idx > dev_start_idx) |
138 | addr_start_idx = 0; | 137 | addr_start_idx = 0; |
139 | if (dev_idx++ < dev_start_idx) | 138 | if (dev_idx++ < dev_start_idx) |
@@ -153,16 +152,21 @@ static int getaddr_dumpit(struct sk_buff *skb, struct netlink_callback *cb) | |||
153 | } | 152 | } |
154 | 153 | ||
155 | out: | 154 | out: |
156 | spin_unlock_bh(&pndevs.lock); | 155 | spin_unlock_bh(&pndevs->lock); |
157 | cb->args[0] = dev_idx; | 156 | cb->args[0] = dev_idx; |
158 | cb->args[1] = addr_idx; | 157 | cb->args[1] = addr_idx; |
159 | 158 | ||
160 | return skb->len; | 159 | return skb->len; |
161 | } | 160 | } |
162 | 161 | ||
163 | void __init phonet_netlink_register(void) | 162 | int __init phonet_netlink_register(void) |
164 | { | 163 | { |
165 | rtnl_register(PF_PHONET, RTM_NEWADDR, addr_doit, NULL); | 164 | int err = __rtnl_register(PF_PHONET, RTM_NEWADDR, addr_doit, NULL); |
166 | rtnl_register(PF_PHONET, RTM_DELADDR, addr_doit, NULL); | 165 | if (err) |
167 | rtnl_register(PF_PHONET, RTM_GETADDR, NULL, getaddr_dumpit); | 166 | return err; |
167 | |||
168 | /* Further __rtnl_register() cannot fail */ | ||
169 | __rtnl_register(PF_PHONET, RTM_DELADDR, addr_doit, NULL); | ||
170 | __rtnl_register(PF_PHONET, RTM_GETADDR, NULL, getaddr_dumpit); | ||
171 | return 0; | ||
168 | } | 172 | } |
diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c index 01392649b462..650139626581 100644 --- a/net/rose/af_rose.c +++ b/net/rose/af_rose.c | |||
@@ -1587,8 +1587,7 @@ static int __init rose_proto_init(void) | |||
1587 | char name[IFNAMSIZ]; | 1587 | char name[IFNAMSIZ]; |
1588 | 1588 | ||
1589 | sprintf(name, "rose%d", i); | 1589 | sprintf(name, "rose%d", i); |
1590 | dev = alloc_netdev(sizeof(struct net_device_stats), | 1590 | dev = alloc_netdev(0, name, rose_setup); |
1591 | name, rose_setup); | ||
1592 | if (!dev) { | 1591 | if (!dev) { |
1593 | printk(KERN_ERR "ROSE: rose_proto_init - unable to allocate memory\n"); | 1592 | printk(KERN_ERR "ROSE: rose_proto_init - unable to allocate memory\n"); |
1594 | rc = -ENOMEM; | 1593 | rc = -ENOMEM; |
diff --git a/net/rose/rose_dev.c b/net/rose/rose_dev.c index 12cfcf09556b..7dcf2569613b 100644 --- a/net/rose/rose_dev.c +++ b/net/rose/rose_dev.c | |||
@@ -57,7 +57,7 @@ static int rose_rebuild_header(struct sk_buff *skb) | |||
57 | { | 57 | { |
58 | #ifdef CONFIG_INET | 58 | #ifdef CONFIG_INET |
59 | struct net_device *dev = skb->dev; | 59 | struct net_device *dev = skb->dev; |
60 | struct net_device_stats *stats = netdev_priv(dev); | 60 | struct net_device_stats *stats = &dev->stats; |
61 | unsigned char *bp = (unsigned char *)skb->data; | 61 | unsigned char *bp = (unsigned char *)skb->data; |
62 | struct sk_buff *skbn; | 62 | struct sk_buff *skbn; |
63 | unsigned int len; | 63 | unsigned int len; |
@@ -133,7 +133,7 @@ static int rose_close(struct net_device *dev) | |||
133 | 133 | ||
134 | static int rose_xmit(struct sk_buff *skb, struct net_device *dev) | 134 | static int rose_xmit(struct sk_buff *skb, struct net_device *dev) |
135 | { | 135 | { |
136 | struct net_device_stats *stats = netdev_priv(dev); | 136 | struct net_device_stats *stats = &dev->stats; |
137 | 137 | ||
138 | if (!netif_running(dev)) { | 138 | if (!netif_running(dev)) { |
139 | printk(KERN_ERR "ROSE: rose_xmit - called when iface is down\n"); | 139 | printk(KERN_ERR "ROSE: rose_xmit - called when iface is down\n"); |
@@ -144,30 +144,28 @@ static int rose_xmit(struct sk_buff *skb, struct net_device *dev) | |||
144 | return 0; | 144 | return 0; |
145 | } | 145 | } |
146 | 146 | ||
147 | static struct net_device_stats *rose_get_stats(struct net_device *dev) | ||
148 | { | ||
149 | return netdev_priv(dev); | ||
150 | } | ||
151 | |||
152 | static const struct header_ops rose_header_ops = { | 147 | static const struct header_ops rose_header_ops = { |
153 | .create = rose_header, | 148 | .create = rose_header, |
154 | .rebuild= rose_rebuild_header, | 149 | .rebuild= rose_rebuild_header, |
155 | }; | 150 | }; |
156 | 151 | ||
152 | static const struct net_device_ops rose_netdev_ops = { | ||
153 | .ndo_open = rose_open, | ||
154 | .ndo_stop = rose_close, | ||
155 | .ndo_start_xmit = rose_xmit, | ||
156 | .ndo_set_mac_address = rose_set_mac_address, | ||
157 | }; | ||
158 | |||
157 | void rose_setup(struct net_device *dev) | 159 | void rose_setup(struct net_device *dev) |
158 | { | 160 | { |
159 | dev->mtu = ROSE_MAX_PACKET_SIZE - 2; | 161 | dev->mtu = ROSE_MAX_PACKET_SIZE - 2; |
160 | dev->hard_start_xmit = rose_xmit; | 162 | dev->netdev_ops = &rose_netdev_ops; |
161 | dev->open = rose_open; | ||
162 | dev->stop = rose_close; | ||
163 | 163 | ||
164 | dev->header_ops = &rose_header_ops; | 164 | dev->header_ops = &rose_header_ops; |
165 | dev->hard_header_len = AX25_BPQ_HEADER_LEN + AX25_MAX_HEADER_LEN + ROSE_MIN_LEN; | 165 | dev->hard_header_len = AX25_BPQ_HEADER_LEN + AX25_MAX_HEADER_LEN + ROSE_MIN_LEN; |
166 | dev->addr_len = ROSE_ADDR_LEN; | 166 | dev->addr_len = ROSE_ADDR_LEN; |
167 | dev->type = ARPHRD_ROSE; | 167 | dev->type = ARPHRD_ROSE; |
168 | dev->set_mac_address = rose_set_mac_address; | ||
169 | 168 | ||
170 | /* New-style flags. */ | 169 | /* New-style flags. */ |
171 | dev->flags = IFF_NOARP; | 170 | dev->flags = IFF_NOARP; |
172 | dev->get_stats = rose_get_stats; | ||
173 | } | 171 | } |
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c index 0fc4a18fd96f..32009793307b 100644 --- a/net/sched/sch_api.c +++ b/net/sched/sch_api.c | |||
@@ -444,6 +444,17 @@ out: | |||
444 | } | 444 | } |
445 | EXPORT_SYMBOL(qdisc_calculate_pkt_len); | 445 | EXPORT_SYMBOL(qdisc_calculate_pkt_len); |
446 | 446 | ||
447 | void qdisc_warn_nonwc(char *txt, struct Qdisc *qdisc) | ||
448 | { | ||
449 | if (!(qdisc->flags & TCQ_F_WARN_NONWC)) { | ||
450 | printk(KERN_WARNING | ||
451 | "%s: %s qdisc %X: is non-work-conserving?\n", | ||
452 | txt, qdisc->ops->id, qdisc->handle >> 16); | ||
453 | qdisc->flags |= TCQ_F_WARN_NONWC; | ||
454 | } | ||
455 | } | ||
456 | EXPORT_SYMBOL(qdisc_warn_nonwc); | ||
457 | |||
447 | static enum hrtimer_restart qdisc_watchdog(struct hrtimer *timer) | 458 | static enum hrtimer_restart qdisc_watchdog(struct hrtimer *timer) |
448 | { | 459 | { |
449 | struct qdisc_watchdog *wd = container_of(timer, struct qdisc_watchdog, | 460 | struct qdisc_watchdog *wd = container_of(timer, struct qdisc_watchdog, |
diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c index 45c31b1a4e1d..74226b265528 100644 --- a/net/sched/sch_hfsc.c +++ b/net/sched/sch_hfsc.c | |||
@@ -887,8 +887,7 @@ qdisc_peek_len(struct Qdisc *sch) | |||
887 | 887 | ||
888 | skb = sch->ops->peek(sch); | 888 | skb = sch->ops->peek(sch); |
889 | if (skb == NULL) { | 889 | if (skb == NULL) { |
890 | if (net_ratelimit()) | 890 | qdisc_warn_nonwc("qdisc_peek_len", sch); |
891 | printk("qdisc_peek_len: non work-conserving qdisc ?\n"); | ||
892 | return 0; | 891 | return 0; |
893 | } | 892 | } |
894 | len = qdisc_pkt_len(skb); | 893 | len = qdisc_pkt_len(skb); |
@@ -1642,8 +1641,7 @@ hfsc_dequeue(struct Qdisc *sch) | |||
1642 | 1641 | ||
1643 | skb = qdisc_dequeue_peeked(cl->qdisc); | 1642 | skb = qdisc_dequeue_peeked(cl->qdisc); |
1644 | if (skb == NULL) { | 1643 | if (skb == NULL) { |
1645 | if (net_ratelimit()) | 1644 | qdisc_warn_nonwc("HFSC", cl->qdisc); |
1646 | printk("HFSC: Non-work-conserving qdisc ?\n"); | ||
1647 | return NULL; | 1645 | return NULL; |
1648 | } | 1646 | } |
1649 | 1647 | ||
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c index 2f0f0b04d3fb..355974f610c5 100644 --- a/net/sched/sch_htb.c +++ b/net/sched/sch_htb.c | |||
@@ -35,6 +35,7 @@ | |||
35 | #include <linux/list.h> | 35 | #include <linux/list.h> |
36 | #include <linux/compiler.h> | 36 | #include <linux/compiler.h> |
37 | #include <linux/rbtree.h> | 37 | #include <linux/rbtree.h> |
38 | #include <linux/workqueue.h> | ||
38 | #include <net/netlink.h> | 39 | #include <net/netlink.h> |
39 | #include <net/pkt_sched.h> | 40 | #include <net/pkt_sched.h> |
40 | 41 | ||
@@ -114,8 +115,6 @@ struct htb_class { | |||
114 | struct tcf_proto *filter_list; | 115 | struct tcf_proto *filter_list; |
115 | int filter_cnt; | 116 | int filter_cnt; |
116 | 117 | ||
117 | int warned; /* only one warning about non work conserving .. */ | ||
118 | |||
119 | /* token bucket parameters */ | 118 | /* token bucket parameters */ |
120 | struct qdisc_rate_table *rate; /* rate table of the class itself */ | 119 | struct qdisc_rate_table *rate; /* rate table of the class itself */ |
121 | struct qdisc_rate_table *ceil; /* ceiling rate (limits borrows too) */ | 120 | struct qdisc_rate_table *ceil; /* ceiling rate (limits borrows too) */ |
@@ -155,6 +154,10 @@ struct htb_sched { | |||
155 | int direct_qlen; /* max qlen of above */ | 154 | int direct_qlen; /* max qlen of above */ |
156 | 155 | ||
157 | long direct_pkts; | 156 | long direct_pkts; |
157 | |||
158 | #define HTB_WARN_TOOMANYEVENTS 0x1 | ||
159 | unsigned int warned; /* only one warning */ | ||
160 | struct work_struct work; | ||
158 | }; | 161 | }; |
159 | 162 | ||
160 | /* find class in global hash table using given handle */ | 163 | /* find class in global hash table using given handle */ |
@@ -658,7 +661,7 @@ static void htb_charge_class(struct htb_sched *q, struct htb_class *cl, | |||
658 | * htb_do_events - make mode changes to classes at the level | 661 | * htb_do_events - make mode changes to classes at the level |
659 | * | 662 | * |
660 | * Scans event queue for pending events and applies them. Returns time of | 663 | * Scans event queue for pending events and applies them. Returns time of |
661 | * next pending event (0 for no event in pq). | 664 | * next pending event (0 for no event in pq, q->now for too many events). |
662 | * Note: Applied are events whose have cl->pq_key <= q->now. | 665 | * Note: Applied are events whose have cl->pq_key <= q->now. |
663 | */ | 666 | */ |
664 | static psched_time_t htb_do_events(struct htb_sched *q, int level, | 667 | static psched_time_t htb_do_events(struct htb_sched *q, int level, |
@@ -686,8 +689,14 @@ static psched_time_t htb_do_events(struct htb_sched *q, int level, | |||
686 | if (cl->cmode != HTB_CAN_SEND) | 689 | if (cl->cmode != HTB_CAN_SEND) |
687 | htb_add_to_wait_tree(q, cl, diff); | 690 | htb_add_to_wait_tree(q, cl, diff); |
688 | } | 691 | } |
689 | /* too much load - let's continue on next jiffie (including above) */ | 692 | |
690 | return q->now + 2 * PSCHED_TICKS_PER_SEC / HZ; | 693 | /* too much load - let's continue after a break for scheduling */ |
694 | if (!(q->warned & HTB_WARN_TOOMANYEVENTS)) { | ||
695 | printk(KERN_WARNING "htb: too many events!\n"); | ||
696 | q->warned |= HTB_WARN_TOOMANYEVENTS; | ||
697 | } | ||
698 | |||
699 | return q->now; | ||
691 | } | 700 | } |
692 | 701 | ||
693 | /* Returns class->node+prio from id-tree where classe's id is >= id. NULL | 702 | /* Returns class->node+prio from id-tree where classe's id is >= id. NULL |
@@ -809,13 +818,8 @@ next: | |||
809 | skb = cl->un.leaf.q->dequeue(cl->un.leaf.q); | 818 | skb = cl->un.leaf.q->dequeue(cl->un.leaf.q); |
810 | if (likely(skb != NULL)) | 819 | if (likely(skb != NULL)) |
811 | break; | 820 | break; |
812 | if (!cl->warned) { | ||
813 | printk(KERN_WARNING | ||
814 | "htb: class %X isn't work conserving ?!\n", | ||
815 | cl->common.classid); | ||
816 | cl->warned = 1; | ||
817 | } | ||
818 | 821 | ||
822 | qdisc_warn_nonwc("htb", cl->un.leaf.q); | ||
819 | htb_next_rb_node((level ? cl->parent->un.inner.ptr : q-> | 823 | htb_next_rb_node((level ? cl->parent->un.inner.ptr : q-> |
820 | ptr[0]) + prio); | 824 | ptr[0]) + prio); |
821 | cl = htb_lookup_leaf(q->row[level] + prio, prio, | 825 | cl = htb_lookup_leaf(q->row[level] + prio, prio, |
@@ -892,7 +896,10 @@ static struct sk_buff *htb_dequeue(struct Qdisc *sch) | |||
892 | } | 896 | } |
893 | } | 897 | } |
894 | sch->qstats.overlimits++; | 898 | sch->qstats.overlimits++; |
895 | qdisc_watchdog_schedule(&q->watchdog, next_event); | 899 | if (likely(next_event > q->now)) |
900 | qdisc_watchdog_schedule(&q->watchdog, next_event); | ||
901 | else | ||
902 | schedule_work(&q->work); | ||
896 | fin: | 903 | fin: |
897 | return skb; | 904 | return skb; |
898 | } | 905 | } |
@@ -962,6 +969,14 @@ static const struct nla_policy htb_policy[TCA_HTB_MAX + 1] = { | |||
962 | [TCA_HTB_RTAB] = { .type = NLA_BINARY, .len = TC_RTAB_SIZE }, | 969 | [TCA_HTB_RTAB] = { .type = NLA_BINARY, .len = TC_RTAB_SIZE }, |
963 | }; | 970 | }; |
964 | 971 | ||
972 | static void htb_work_func(struct work_struct *work) | ||
973 | { | ||
974 | struct htb_sched *q = container_of(work, struct htb_sched, work); | ||
975 | struct Qdisc *sch = q->watchdog.qdisc; | ||
976 | |||
977 | __netif_schedule(qdisc_root(sch)); | ||
978 | } | ||
979 | |||
965 | static int htb_init(struct Qdisc *sch, struct nlattr *opt) | 980 | static int htb_init(struct Qdisc *sch, struct nlattr *opt) |
966 | { | 981 | { |
967 | struct htb_sched *q = qdisc_priv(sch); | 982 | struct htb_sched *q = qdisc_priv(sch); |
@@ -996,6 +1011,7 @@ static int htb_init(struct Qdisc *sch, struct nlattr *opt) | |||
996 | INIT_LIST_HEAD(q->drops + i); | 1011 | INIT_LIST_HEAD(q->drops + i); |
997 | 1012 | ||
998 | qdisc_watchdog_init(&q->watchdog, sch); | 1013 | qdisc_watchdog_init(&q->watchdog, sch); |
1014 | INIT_WORK(&q->work, htb_work_func); | ||
999 | skb_queue_head_init(&q->direct_queue); | 1015 | skb_queue_head_init(&q->direct_queue); |
1000 | 1016 | ||
1001 | q->direct_qlen = qdisc_dev(sch)->tx_queue_len; | 1017 | q->direct_qlen = qdisc_dev(sch)->tx_queue_len; |
@@ -1188,7 +1204,6 @@ static void htb_destroy_class(struct Qdisc *sch, struct htb_class *cl) | |||
1188 | kfree(cl); | 1204 | kfree(cl); |
1189 | } | 1205 | } |
1190 | 1206 | ||
1191 | /* always caled under BH & queue lock */ | ||
1192 | static void htb_destroy(struct Qdisc *sch) | 1207 | static void htb_destroy(struct Qdisc *sch) |
1193 | { | 1208 | { |
1194 | struct htb_sched *q = qdisc_priv(sch); | 1209 | struct htb_sched *q = qdisc_priv(sch); |
@@ -1196,6 +1211,7 @@ static void htb_destroy(struct Qdisc *sch) | |||
1196 | struct htb_class *cl; | 1211 | struct htb_class *cl; |
1197 | unsigned int i; | 1212 | unsigned int i; |
1198 | 1213 | ||
1214 | cancel_work_sync(&q->work); | ||
1199 | qdisc_watchdog_cancel(&q->watchdog); | 1215 | qdisc_watchdog_cancel(&q->watchdog); |
1200 | /* This line used to be after htb_destroy_class call below | 1216 | /* This line used to be after htb_destroy_class call below |
1201 | and surprisingly it worked in 2.4. But it must precede it | 1217 | and surprisingly it worked in 2.4. But it must precede it |
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c index ceaa4aa066ea..786227566696 100644 --- a/net/sctp/ipv6.c +++ b/net/sctp/ipv6.c | |||
@@ -97,8 +97,7 @@ static int sctp_inet6addr_event(struct notifier_block *this, unsigned long ev, | |||
97 | if (addr) { | 97 | if (addr) { |
98 | addr->a.v6.sin6_family = AF_INET6; | 98 | addr->a.v6.sin6_family = AF_INET6; |
99 | addr->a.v6.sin6_port = 0; | 99 | addr->a.v6.sin6_port = 0; |
100 | memcpy(&addr->a.v6.sin6_addr, &ifa->addr, | 100 | ipv6_addr_copy(&addr->a.v6.sin6_addr, &ifa->addr); |
101 | sizeof(struct in6_addr)); | ||
102 | addr->a.v6.sin6_scope_id = ifa->idev->dev->ifindex; | 101 | addr->a.v6.sin6_scope_id = ifa->idev->dev->ifindex; |
103 | addr->valid = 1; | 102 | addr->valid = 1; |
104 | spin_lock_bh(&sctp_local_addr_lock); | 103 | spin_lock_bh(&sctp_local_addr_lock); |
diff --git a/net/sctp/output.c b/net/sctp/output.c index 73639355157e..47bfba6c03ec 100644 --- a/net/sctp/output.c +++ b/net/sctp/output.c | |||
@@ -367,7 +367,7 @@ int sctp_packet_transmit(struct sctp_packet *packet) | |||
367 | struct sctp_transport *tp = packet->transport; | 367 | struct sctp_transport *tp = packet->transport; |
368 | struct sctp_association *asoc = tp->asoc; | 368 | struct sctp_association *asoc = tp->asoc; |
369 | struct sctphdr *sh; | 369 | struct sctphdr *sh; |
370 | __be32 crc32 = __constant_cpu_to_be32(0); | 370 | __be32 crc32 = cpu_to_be32(0); |
371 | struct sk_buff *nskb; | 371 | struct sk_buff *nskb; |
372 | struct sctp_chunk *chunk, *tmp; | 372 | struct sctp_chunk *chunk, *tmp; |
373 | struct sock *sk; | 373 | struct sock *sk; |
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c index fd8acb48c3f2..b40e95f9851b 100644 --- a/net/sctp/sm_make_chunk.c +++ b/net/sctp/sm_make_chunk.c | |||
@@ -100,11 +100,11 @@ int sctp_chunk_iif(const struct sctp_chunk *chunk) | |||
100 | */ | 100 | */ |
101 | static const struct sctp_paramhdr ecap_param = { | 101 | static const struct sctp_paramhdr ecap_param = { |
102 | SCTP_PARAM_ECN_CAPABLE, | 102 | SCTP_PARAM_ECN_CAPABLE, |
103 | __constant_htons(sizeof(struct sctp_paramhdr)), | 103 | cpu_to_be16(sizeof(struct sctp_paramhdr)), |
104 | }; | 104 | }; |
105 | static const struct sctp_paramhdr prsctp_param = { | 105 | static const struct sctp_paramhdr prsctp_param = { |
106 | SCTP_PARAM_FWD_TSN_SUPPORT, | 106 | SCTP_PARAM_FWD_TSN_SUPPORT, |
107 | __constant_htons(sizeof(struct sctp_paramhdr)), | 107 | cpu_to_be16(sizeof(struct sctp_paramhdr)), |
108 | }; | 108 | }; |
109 | 109 | ||
110 | /* A helper to initialize to initialize an op error inside a | 110 | /* A helper to initialize to initialize an op error inside a |
diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c index 9fc5b023d111..8f76f4009c24 100644 --- a/net/x25/af_x25.c +++ b/net/x25/af_x25.c | |||
@@ -1609,7 +1609,7 @@ static const struct proto_ops SOCKOPS_WRAPPED(x25_proto_ops) = { | |||
1609 | SOCKOPS_WRAP(x25_proto, AF_X25); | 1609 | SOCKOPS_WRAP(x25_proto, AF_X25); |
1610 | 1610 | ||
1611 | static struct packet_type x25_packet_type = { | 1611 | static struct packet_type x25_packet_type = { |
1612 | .type = __constant_htons(ETH_P_X25), | 1612 | .type = cpu_to_be16(ETH_P_X25), |
1613 | .func = x25_lapb_receive_frame, | 1613 | .func = x25_lapb_receive_frame, |
1614 | }; | 1614 | }; |
1615 | 1615 | ||