diff options
269 files changed, 7600 insertions, 6517 deletions
diff --git a/Documentation/filesystems/nfs/nfsroot.txt b/Documentation/filesystems/nfs/nfsroot.txt index 3ba0b945aaf8..f2430a7974e1 100644 --- a/Documentation/filesystems/nfs/nfsroot.txt +++ b/Documentation/filesystems/nfs/nfsroot.txt | |||
@@ -124,6 +124,8 @@ ip=<client-ip>:<server-ip>:<gw-ip>:<netmask>:<hostname>:<device>:<autoconf> | |||
124 | 124 | ||
125 | <hostname> Name of the client. May be supplied by autoconfiguration, | 125 | <hostname> Name of the client. May be supplied by autoconfiguration, |
126 | but its absence will not trigger autoconfiguration. | 126 | but its absence will not trigger autoconfiguration. |
127 | If specified and DHCP is used, the user provided hostname will | ||
128 | be carried in the DHCP request to hopefully update DNS record. | ||
127 | 129 | ||
128 | Default: Client IP address is used in ASCII notation. | 130 | Default: Client IP address is used in ASCII notation. |
129 | 131 | ||
diff --git a/Documentation/networking/bonding.txt b/Documentation/networking/bonding.txt index 61f516b135b4..d0914781830e 100644 --- a/Documentation/networking/bonding.txt +++ b/Documentation/networking/bonding.txt | |||
@@ -49,6 +49,7 @@ Table of Contents | |||
49 | 3.3 Configuring Bonding Manually with Ifenslave | 49 | 3.3 Configuring Bonding Manually with Ifenslave |
50 | 3.3.1 Configuring Multiple Bonds Manually | 50 | 3.3.1 Configuring Multiple Bonds Manually |
51 | 3.4 Configuring Bonding Manually via Sysfs | 51 | 3.4 Configuring Bonding Manually via Sysfs |
52 | 3.5 Overriding Configuration for Special Cases | ||
52 | 53 | ||
53 | 4. Querying Bonding Configuration | 54 | 4. Querying Bonding Configuration |
54 | 4.1 Bonding Configuration | 55 | 4.1 Bonding Configuration |
@@ -1318,8 +1319,87 @@ echo 2000 > /sys/class/net/bond1/bonding/arp_interval | |||
1318 | echo +eth2 > /sys/class/net/bond1/bonding/slaves | 1319 | echo +eth2 > /sys/class/net/bond1/bonding/slaves |
1319 | echo +eth3 > /sys/class/net/bond1/bonding/slaves | 1320 | echo +eth3 > /sys/class/net/bond1/bonding/slaves |
1320 | 1321 | ||
1321 | 1322 | 3.5 Overriding Configuration for Special Cases | |
1322 | 4. Querying Bonding Configuration | 1323 | ---------------------------------------------- |
1324 | When using the bonding driver, the physical port which transmits a frame is | ||
1325 | typically selected by the bonding driver, and is not relevant to the user or | ||
1326 | system administrator. The output port is simply selected using the policies of | ||
1327 | the selected bonding mode. On occasion however, it is helpful to direct certain | ||
1328 | classes of traffic to certain physical interfaces on output to implement | ||
1329 | slightly more complex policies. For example, to reach a web server over a | ||
1330 | bonded interface in which eth0 connects to a private network, while eth1 | ||
1331 | connects via a public network, it may be desirous to bias the bond to send said | ||
1332 | traffic over eth0 first, using eth1 only as a fall back, while all other traffic | ||
1333 | can safely be sent over either interface. Such configurations may be achieved | ||
1334 | using the traffic control utilities inherent in linux. | ||
1335 | |||
1336 | By default the bonding driver is multiqueue aware and 16 queues are created | ||
1337 | when the driver initializes (see Documentation/networking/multiqueue.txt | ||
1338 | for details). If more or less queues are desired the module parameter | ||
1339 | tx_queues can be used to change this value. There is no sysfs parameter | ||
1340 | available as the allocation is done at module init time. | ||
1341 | |||
1342 | The output of the file /proc/net/bonding/bondX has changed so the output Queue | ||
1343 | ID is now printed for each slave: | ||
1344 | |||
1345 | Bonding Mode: fault-tolerance (active-backup) | ||
1346 | Primary Slave: None | ||
1347 | Currently Active Slave: eth0 | ||
1348 | MII Status: up | ||
1349 | MII Polling Interval (ms): 0 | ||
1350 | Up Delay (ms): 0 | ||
1351 | Down Delay (ms): 0 | ||
1352 | |||
1353 | Slave Interface: eth0 | ||
1354 | MII Status: up | ||
1355 | Link Failure Count: 0 | ||
1356 | Permanent HW addr: 00:1a:a0:12:8f:cb | ||
1357 | Slave queue ID: 0 | ||
1358 | |||
1359 | Slave Interface: eth1 | ||
1360 | MII Status: up | ||
1361 | Link Failure Count: 0 | ||
1362 | Permanent HW addr: 00:1a:a0:12:8f:cc | ||
1363 | Slave queue ID: 2 | ||
1364 | |||
1365 | The queue_id for a slave can be set using the command: | ||
1366 | |||
1367 | # echo "eth1:2" > /sys/class/net/bond0/bonding/queue_id | ||
1368 | |||
1369 | Any interface that needs a queue_id set should set it with multiple calls | ||
1370 | like the one above until proper priorities are set for all interfaces. On | ||
1371 | distributions that allow configuration via initscripts, multiple 'queue_id' | ||
1372 | arguments can be added to BONDING_OPTS to set all needed slave queues. | ||
1373 | |||
1374 | These queue id's can be used in conjunction with the tc utility to configure | ||
1375 | a multiqueue qdisc and filters to bias certain traffic to transmit on certain | ||
1376 | slave devices. For instance, say we wanted, in the above configuration to | ||
1377 | force all traffic bound to 192.168.1.100 to use eth1 in the bond as its output | ||
1378 | device. The following commands would accomplish this: | ||
1379 | |||
1380 | # tc qdisc add dev bond0 handle 1 root multiq | ||
1381 | |||
1382 | # tc filter add dev bond0 protocol ip parent 1: prio 1 u32 match ip dst \ | ||
1383 | 192.168.1.100 action skbedit queue_mapping 2 | ||
1384 | |||
1385 | These commands tell the kernel to attach a multiqueue queue discipline to the | ||
1386 | bond0 interface and filter traffic enqueued to it, such that packets with a dst | ||
1387 | ip of 192.168.1.100 have their output queue mapping value overwritten to 2. | ||
1388 | This value is then passed into the driver, causing the normal output path | ||
1389 | selection policy to be overridden, selecting instead qid 2, which maps to eth1. | ||
1390 | |||
1391 | Note that qid values begin at 1. Qid 0 is reserved to initiate to the driver | ||
1392 | that normal output policy selection should take place. One benefit to simply | ||
1393 | leaving the qid for a slave to 0 is the multiqueue awareness in the bonding | ||
1394 | driver that is now present. This awareness allows tc filters to be placed on | ||
1395 | slave devices as well as bond devices and the bonding driver will simply act as | ||
1396 | a pass-through for selecting output queues on the slave device rather than | ||
1397 | output port selection. | ||
1398 | |||
1399 | This feature first appeared in bonding driver version 3.7.0 and support for | ||
1400 | output slave selection was limited to round-robin and active-backup modes. | ||
1401 | |||
1402 | 4 Querying Bonding Configuration | ||
1323 | ================================= | 1403 | ================================= |
1324 | 1404 | ||
1325 | 4.1 Bonding Configuration | 1405 | 4.1 Bonding Configuration |
diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt index d0536b5a4e01..f350c69b2bb4 100644 --- a/Documentation/networking/ip-sysctl.txt +++ b/Documentation/networking/ip-sysctl.txt | |||
@@ -903,7 +903,7 @@ arp_ignore - INTEGER | |||
903 | arp_notify - BOOLEAN | 903 | arp_notify - BOOLEAN |
904 | Define mode for notification of address and device changes. | 904 | Define mode for notification of address and device changes. |
905 | 0 - (default): do nothing | 905 | 0 - (default): do nothing |
906 | 1 - Generate gratuitous arp replies when device is brought up | 906 | 1 - Generate gratuitous arp requests when device is brought up |
907 | or hardware address changes. | 907 | or hardware address changes. |
908 | 908 | ||
909 | arp_accept - BOOLEAN | 909 | arp_accept - BOOLEAN |
diff --git a/Documentation/networking/packet_mmap.txt b/Documentation/networking/packet_mmap.txt index 98f71a5cef00..2546aa4dc232 100644 --- a/Documentation/networking/packet_mmap.txt +++ b/Documentation/networking/packet_mmap.txt | |||
@@ -493,6 +493,32 @@ The user can also use poll() to check if a buffer is available: | |||
493 | pfd.events = POLLOUT; | 493 | pfd.events = POLLOUT; |
494 | retval = poll(&pfd, 1, timeout); | 494 | retval = poll(&pfd, 1, timeout); |
495 | 495 | ||
496 | ------------------------------------------------------------------------------- | ||
497 | + PACKET_TIMESTAMP | ||
498 | ------------------------------------------------------------------------------- | ||
499 | |||
500 | The PACKET_TIMESTAMP setting determines the source of the timestamp in | ||
501 | the packet meta information. If your NIC is capable of timestamping | ||
502 | packets in hardware, you can request those hardware timestamps to used. | ||
503 | Note: you may need to enable the generation of hardware timestamps with | ||
504 | SIOCSHWTSTAMP. | ||
505 | |||
506 | PACKET_TIMESTAMP accepts the same integer bit field as | ||
507 | SO_TIMESTAMPING. However, only the SOF_TIMESTAMPING_SYS_HARDWARE | ||
508 | and SOF_TIMESTAMPING_RAW_HARDWARE values are recognized by | ||
509 | PACKET_TIMESTAMP. SOF_TIMESTAMPING_SYS_HARDWARE takes precedence over | ||
510 | SOF_TIMESTAMPING_RAW_HARDWARE if both bits are set. | ||
511 | |||
512 | int req = 0; | ||
513 | req |= SOF_TIMESTAMPING_SYS_HARDWARE; | ||
514 | setsockopt(fd, SOL_PACKET, PACKET_TIMESTAMP, (void *) &req, sizeof(req)) | ||
515 | |||
516 | If PACKET_TIMESTAMP is not set, a software timestamp generated inside | ||
517 | the networking stack is used (the behavior before this setting was added). | ||
518 | |||
519 | See include/linux/net_tstamp.h and Documentation/networking/timestamping | ||
520 | for more information on hardware timestamps. | ||
521 | |||
496 | -------------------------------------------------------------------------------- | 522 | -------------------------------------------------------------------------------- |
497 | + THANKS | 523 | + THANKS |
498 | -------------------------------------------------------------------------------- | 524 | -------------------------------------------------------------------------------- |
diff --git a/drivers/atm/Kconfig b/drivers/atm/Kconfig index f1a0a00b3b07..be7461c9a87e 100644 --- a/drivers/atm/Kconfig +++ b/drivers/atm/Kconfig | |||
@@ -177,7 +177,7 @@ config ATM_ZATM_DEBUG | |||
177 | 177 | ||
178 | config ATM_NICSTAR | 178 | config ATM_NICSTAR |
179 | tristate "IDT 77201 (NICStAR) (ForeRunnerLE)" | 179 | tristate "IDT 77201 (NICStAR) (ForeRunnerLE)" |
180 | depends on PCI && !64BIT && VIRT_TO_BUS | 180 | depends on PCI |
181 | help | 181 | help |
182 | The NICStAR chipset family is used in a large number of ATM NICs for | 182 | The NICStAR chipset family is used in a large number of ATM NICs for |
183 | 25 and for 155 Mbps, including IDT cards and the Fore ForeRunnerLE | 183 | 25 and for 155 Mbps, including IDT cards and the Fore ForeRunnerLE |
diff --git a/drivers/atm/he.c b/drivers/atm/he.c index 56c2e99e458f..ea9cbe596a28 100644 --- a/drivers/atm/he.c +++ b/drivers/atm/he.c | |||
@@ -67,6 +67,7 @@ | |||
67 | #include <linux/timer.h> | 67 | #include <linux/timer.h> |
68 | #include <linux/interrupt.h> | 68 | #include <linux/interrupt.h> |
69 | #include <linux/dma-mapping.h> | 69 | #include <linux/dma-mapping.h> |
70 | #include <linux/bitmap.h> | ||
70 | #include <linux/slab.h> | 71 | #include <linux/slab.h> |
71 | #include <asm/io.h> | 72 | #include <asm/io.h> |
72 | #include <asm/byteorder.h> | 73 | #include <asm/byteorder.h> |
@@ -778,61 +779,39 @@ he_init_cs_block_rcm(struct he_dev *he_dev) | |||
778 | static int __devinit | 779 | static int __devinit |
779 | he_init_group(struct he_dev *he_dev, int group) | 780 | he_init_group(struct he_dev *he_dev, int group) |
780 | { | 781 | { |
782 | struct he_buff *heb, *next; | ||
783 | dma_addr_t mapping; | ||
781 | int i; | 784 | int i; |
782 | 785 | ||
783 | /* small buffer pool */ | 786 | he_writel(he_dev, 0x0, G0_RBPS_S + (group * 32)); |
784 | he_dev->rbps_pool = pci_pool_create("rbps", he_dev->pci_dev, | 787 | he_writel(he_dev, 0x0, G0_RBPS_T + (group * 32)); |
785 | CONFIG_RBPS_BUFSIZE, 8, 0); | 788 | he_writel(he_dev, 0x0, G0_RBPS_QI + (group * 32)); |
786 | if (he_dev->rbps_pool == NULL) { | 789 | he_writel(he_dev, RBP_THRESH(0x1) | RBP_QSIZE(0x0), |
787 | hprintk("unable to create rbps pages\n"); | 790 | G0_RBPS_BS + (group * 32)); |
791 | |||
792 | /* bitmap table */ | ||
793 | he_dev->rbpl_table = kmalloc(BITS_TO_LONGS(RBPL_TABLE_SIZE) | ||
794 | * sizeof(unsigned long), GFP_KERNEL); | ||
795 | if (!he_dev->rbpl_table) { | ||
796 | hprintk("unable to allocate rbpl bitmap table\n"); | ||
788 | return -ENOMEM; | 797 | return -ENOMEM; |
789 | } | 798 | } |
799 | bitmap_zero(he_dev->rbpl_table, RBPL_TABLE_SIZE); | ||
790 | 800 | ||
791 | he_dev->rbps_base = pci_alloc_consistent(he_dev->pci_dev, | 801 | /* rbpl_virt 64-bit pointers */ |
792 | CONFIG_RBPS_SIZE * sizeof(struct he_rbp), &he_dev->rbps_phys); | 802 | he_dev->rbpl_virt = kmalloc(RBPL_TABLE_SIZE |
793 | if (he_dev->rbps_base == NULL) { | 803 | * sizeof(struct he_buff *), GFP_KERNEL); |
794 | hprintk("failed to alloc rbps_base\n"); | 804 | if (!he_dev->rbpl_virt) { |
795 | goto out_destroy_rbps_pool; | 805 | hprintk("unable to allocate rbpl virt table\n"); |
806 | goto out_free_rbpl_table; | ||
796 | } | 807 | } |
797 | memset(he_dev->rbps_base, 0, CONFIG_RBPS_SIZE * sizeof(struct he_rbp)); | ||
798 | he_dev->rbps_virt = kmalloc(CONFIG_RBPS_SIZE * sizeof(struct he_virt), GFP_KERNEL); | ||
799 | if (he_dev->rbps_virt == NULL) { | ||
800 | hprintk("failed to alloc rbps_virt\n"); | ||
801 | goto out_free_rbps_base; | ||
802 | } | ||
803 | |||
804 | for (i = 0; i < CONFIG_RBPS_SIZE; ++i) { | ||
805 | dma_addr_t dma_handle; | ||
806 | void *cpuaddr; | ||
807 | |||
808 | cpuaddr = pci_pool_alloc(he_dev->rbps_pool, GFP_KERNEL|GFP_DMA, &dma_handle); | ||
809 | if (cpuaddr == NULL) | ||
810 | goto out_free_rbps_virt; | ||
811 | |||
812 | he_dev->rbps_virt[i].virt = cpuaddr; | ||
813 | he_dev->rbps_base[i].status = RBP_LOANED | RBP_SMALLBUF | (i << RBP_INDEX_OFF); | ||
814 | he_dev->rbps_base[i].phys = dma_handle; | ||
815 | |||
816 | } | ||
817 | he_dev->rbps_tail = &he_dev->rbps_base[CONFIG_RBPS_SIZE - 1]; | ||
818 | |||
819 | he_writel(he_dev, he_dev->rbps_phys, G0_RBPS_S + (group * 32)); | ||
820 | he_writel(he_dev, RBPS_MASK(he_dev->rbps_tail), | ||
821 | G0_RBPS_T + (group * 32)); | ||
822 | he_writel(he_dev, CONFIG_RBPS_BUFSIZE/4, | ||
823 | G0_RBPS_BS + (group * 32)); | ||
824 | he_writel(he_dev, | ||
825 | RBP_THRESH(CONFIG_RBPS_THRESH) | | ||
826 | RBP_QSIZE(CONFIG_RBPS_SIZE - 1) | | ||
827 | RBP_INT_ENB, | ||
828 | G0_RBPS_QI + (group * 32)); | ||
829 | 808 | ||
830 | /* large buffer pool */ | 809 | /* large buffer pool */ |
831 | he_dev->rbpl_pool = pci_pool_create("rbpl", he_dev->pci_dev, | 810 | he_dev->rbpl_pool = pci_pool_create("rbpl", he_dev->pci_dev, |
832 | CONFIG_RBPL_BUFSIZE, 8, 0); | 811 | CONFIG_RBPL_BUFSIZE, 64, 0); |
833 | if (he_dev->rbpl_pool == NULL) { | 812 | if (he_dev->rbpl_pool == NULL) { |
834 | hprintk("unable to create rbpl pool\n"); | 813 | hprintk("unable to create rbpl pool\n"); |
835 | goto out_free_rbps_virt; | 814 | goto out_free_rbpl_virt; |
836 | } | 815 | } |
837 | 816 | ||
838 | he_dev->rbpl_base = pci_alloc_consistent(he_dev->pci_dev, | 817 | he_dev->rbpl_base = pci_alloc_consistent(he_dev->pci_dev, |
@@ -842,30 +821,29 @@ he_init_group(struct he_dev *he_dev, int group) | |||
842 | goto out_destroy_rbpl_pool; | 821 | goto out_destroy_rbpl_pool; |
843 | } | 822 | } |
844 | memset(he_dev->rbpl_base, 0, CONFIG_RBPL_SIZE * sizeof(struct he_rbp)); | 823 | memset(he_dev->rbpl_base, 0, CONFIG_RBPL_SIZE * sizeof(struct he_rbp)); |
845 | he_dev->rbpl_virt = kmalloc(CONFIG_RBPL_SIZE * sizeof(struct he_virt), GFP_KERNEL); | 824 | |
846 | if (he_dev->rbpl_virt == NULL) { | 825 | INIT_LIST_HEAD(&he_dev->rbpl_outstanding); |
847 | hprintk("failed to alloc rbpl_virt\n"); | ||
848 | goto out_free_rbpl_base; | ||
849 | } | ||
850 | 826 | ||
851 | for (i = 0; i < CONFIG_RBPL_SIZE; ++i) { | 827 | for (i = 0; i < CONFIG_RBPL_SIZE; ++i) { |
852 | dma_addr_t dma_handle; | ||
853 | void *cpuaddr; | ||
854 | 828 | ||
855 | cpuaddr = pci_pool_alloc(he_dev->rbpl_pool, GFP_KERNEL|GFP_DMA, &dma_handle); | 829 | heb = pci_pool_alloc(he_dev->rbpl_pool, GFP_KERNEL|GFP_DMA, &mapping); |
856 | if (cpuaddr == NULL) | 830 | if (!heb) |
857 | goto out_free_rbpl_virt; | 831 | goto out_free_rbpl; |
832 | heb->mapping = mapping; | ||
833 | list_add(&heb->entry, &he_dev->rbpl_outstanding); | ||
858 | 834 | ||
859 | he_dev->rbpl_virt[i].virt = cpuaddr; | 835 | set_bit(i, he_dev->rbpl_table); |
860 | he_dev->rbpl_base[i].status = RBP_LOANED | (i << RBP_INDEX_OFF); | 836 | he_dev->rbpl_virt[i] = heb; |
861 | he_dev->rbpl_base[i].phys = dma_handle; | 837 | he_dev->rbpl_hint = i + 1; |
838 | he_dev->rbpl_base[i].idx = i << RBP_IDX_OFFSET; | ||
839 | he_dev->rbpl_base[i].phys = mapping + offsetof(struct he_buff, data); | ||
862 | } | 840 | } |
863 | he_dev->rbpl_tail = &he_dev->rbpl_base[CONFIG_RBPL_SIZE - 1]; | 841 | he_dev->rbpl_tail = &he_dev->rbpl_base[CONFIG_RBPL_SIZE - 1]; |
864 | 842 | ||
865 | he_writel(he_dev, he_dev->rbpl_phys, G0_RBPL_S + (group * 32)); | 843 | he_writel(he_dev, he_dev->rbpl_phys, G0_RBPL_S + (group * 32)); |
866 | he_writel(he_dev, RBPL_MASK(he_dev->rbpl_tail), | 844 | he_writel(he_dev, RBPL_MASK(he_dev->rbpl_tail), |
867 | G0_RBPL_T + (group * 32)); | 845 | G0_RBPL_T + (group * 32)); |
868 | he_writel(he_dev, CONFIG_RBPL_BUFSIZE/4, | 846 | he_writel(he_dev, (CONFIG_RBPL_BUFSIZE - sizeof(struct he_buff))/4, |
869 | G0_RBPL_BS + (group * 32)); | 847 | G0_RBPL_BS + (group * 32)); |
870 | he_writel(he_dev, | 848 | he_writel(he_dev, |
871 | RBP_THRESH(CONFIG_RBPL_THRESH) | | 849 | RBP_THRESH(CONFIG_RBPL_THRESH) | |
@@ -879,7 +857,7 @@ he_init_group(struct he_dev *he_dev, int group) | |||
879 | CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq), &he_dev->rbrq_phys); | 857 | CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq), &he_dev->rbrq_phys); |
880 | if (he_dev->rbrq_base == NULL) { | 858 | if (he_dev->rbrq_base == NULL) { |
881 | hprintk("failed to allocate rbrq\n"); | 859 | hprintk("failed to allocate rbrq\n"); |
882 | goto out_free_rbpl_virt; | 860 | goto out_free_rbpl; |
883 | } | 861 | } |
884 | memset(he_dev->rbrq_base, 0, CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq)); | 862 | memset(he_dev->rbrq_base, 0, CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq)); |
885 | 863 | ||
@@ -920,33 +898,20 @@ out_free_rbpq_base: | |||
920 | pci_free_consistent(he_dev->pci_dev, CONFIG_RBRQ_SIZE * | 898 | pci_free_consistent(he_dev->pci_dev, CONFIG_RBRQ_SIZE * |
921 | sizeof(struct he_rbrq), he_dev->rbrq_base, | 899 | sizeof(struct he_rbrq), he_dev->rbrq_base, |
922 | he_dev->rbrq_phys); | 900 | he_dev->rbrq_phys); |
923 | i = CONFIG_RBPL_SIZE; | 901 | out_free_rbpl: |
924 | out_free_rbpl_virt: | 902 | list_for_each_entry_safe(heb, next, &he_dev->rbpl_outstanding, entry) |
925 | while (i--) | 903 | pci_pool_free(he_dev->rbpl_pool, heb, heb->mapping); |
926 | pci_pool_free(he_dev->rbpl_pool, he_dev->rbpl_virt[i].virt, | ||
927 | he_dev->rbpl_base[i].phys); | ||
928 | kfree(he_dev->rbpl_virt); | ||
929 | 904 | ||
930 | out_free_rbpl_base: | ||
931 | pci_free_consistent(he_dev->pci_dev, CONFIG_RBPL_SIZE * | 905 | pci_free_consistent(he_dev->pci_dev, CONFIG_RBPL_SIZE * |
932 | sizeof(struct he_rbp), he_dev->rbpl_base, | 906 | sizeof(struct he_rbp), he_dev->rbpl_base, |
933 | he_dev->rbpl_phys); | 907 | he_dev->rbpl_phys); |
934 | out_destroy_rbpl_pool: | 908 | out_destroy_rbpl_pool: |
935 | pci_pool_destroy(he_dev->rbpl_pool); | 909 | pci_pool_destroy(he_dev->rbpl_pool); |
910 | out_free_rbpl_virt: | ||
911 | kfree(he_dev->rbpl_virt); | ||
912 | out_free_rbpl_table: | ||
913 | kfree(he_dev->rbpl_table); | ||
936 | 914 | ||
937 | i = CONFIG_RBPS_SIZE; | ||
938 | out_free_rbps_virt: | ||
939 | while (i--) | ||
940 | pci_pool_free(he_dev->rbps_pool, he_dev->rbps_virt[i].virt, | ||
941 | he_dev->rbps_base[i].phys); | ||
942 | kfree(he_dev->rbps_virt); | ||
943 | |||
944 | out_free_rbps_base: | ||
945 | pci_free_consistent(he_dev->pci_dev, CONFIG_RBPS_SIZE * | ||
946 | sizeof(struct he_rbp), he_dev->rbps_base, | ||
947 | he_dev->rbps_phys); | ||
948 | out_destroy_rbps_pool: | ||
949 | pci_pool_destroy(he_dev->rbps_pool); | ||
950 | return -ENOMEM; | 915 | return -ENOMEM; |
951 | } | 916 | } |
952 | 917 | ||
@@ -1576,9 +1541,10 @@ he_start(struct atm_dev *dev) | |||
1576 | static void | 1541 | static void |
1577 | he_stop(struct he_dev *he_dev) | 1542 | he_stop(struct he_dev *he_dev) |
1578 | { | 1543 | { |
1579 | u16 command; | 1544 | struct he_buff *heb, *next; |
1580 | u32 gen_cntl_0, reg; | ||
1581 | struct pci_dev *pci_dev; | 1545 | struct pci_dev *pci_dev; |
1546 | u32 gen_cntl_0, reg; | ||
1547 | u16 command; | ||
1582 | 1548 | ||
1583 | pci_dev = he_dev->pci_dev; | 1549 | pci_dev = he_dev->pci_dev; |
1584 | 1550 | ||
@@ -1619,37 +1585,19 @@ he_stop(struct he_dev *he_dev) | |||
1619 | he_dev->hsp, he_dev->hsp_phys); | 1585 | he_dev->hsp, he_dev->hsp_phys); |
1620 | 1586 | ||
1621 | if (he_dev->rbpl_base) { | 1587 | if (he_dev->rbpl_base) { |
1622 | int i; | 1588 | list_for_each_entry_safe(heb, next, &he_dev->rbpl_outstanding, entry) |
1623 | 1589 | pci_pool_free(he_dev->rbpl_pool, heb, heb->mapping); | |
1624 | for (i = 0; i < CONFIG_RBPL_SIZE; ++i) { | ||
1625 | void *cpuaddr = he_dev->rbpl_virt[i].virt; | ||
1626 | dma_addr_t dma_handle = he_dev->rbpl_base[i].phys; | ||
1627 | 1590 | ||
1628 | pci_pool_free(he_dev->rbpl_pool, cpuaddr, dma_handle); | ||
1629 | } | ||
1630 | pci_free_consistent(he_dev->pci_dev, CONFIG_RBPL_SIZE | 1591 | pci_free_consistent(he_dev->pci_dev, CONFIG_RBPL_SIZE |
1631 | * sizeof(struct he_rbp), he_dev->rbpl_base, he_dev->rbpl_phys); | 1592 | * sizeof(struct he_rbp), he_dev->rbpl_base, he_dev->rbpl_phys); |
1632 | } | 1593 | } |
1633 | 1594 | ||
1595 | kfree(he_dev->rbpl_virt); | ||
1596 | kfree(he_dev->rbpl_table); | ||
1597 | |||
1634 | if (he_dev->rbpl_pool) | 1598 | if (he_dev->rbpl_pool) |
1635 | pci_pool_destroy(he_dev->rbpl_pool); | 1599 | pci_pool_destroy(he_dev->rbpl_pool); |
1636 | 1600 | ||
1637 | if (he_dev->rbps_base) { | ||
1638 | int i; | ||
1639 | |||
1640 | for (i = 0; i < CONFIG_RBPS_SIZE; ++i) { | ||
1641 | void *cpuaddr = he_dev->rbps_virt[i].virt; | ||
1642 | dma_addr_t dma_handle = he_dev->rbps_base[i].phys; | ||
1643 | |||
1644 | pci_pool_free(he_dev->rbps_pool, cpuaddr, dma_handle); | ||
1645 | } | ||
1646 | pci_free_consistent(he_dev->pci_dev, CONFIG_RBPS_SIZE | ||
1647 | * sizeof(struct he_rbp), he_dev->rbps_base, he_dev->rbps_phys); | ||
1648 | } | ||
1649 | |||
1650 | if (he_dev->rbps_pool) | ||
1651 | pci_pool_destroy(he_dev->rbps_pool); | ||
1652 | |||
1653 | if (he_dev->rbrq_base) | 1601 | if (he_dev->rbrq_base) |
1654 | pci_free_consistent(he_dev->pci_dev, CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq), | 1602 | pci_free_consistent(he_dev->pci_dev, CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq), |
1655 | he_dev->rbrq_base, he_dev->rbrq_phys); | 1603 | he_dev->rbrq_base, he_dev->rbrq_phys); |
@@ -1679,13 +1627,13 @@ static struct he_tpd * | |||
1679 | __alloc_tpd(struct he_dev *he_dev) | 1627 | __alloc_tpd(struct he_dev *he_dev) |
1680 | { | 1628 | { |
1681 | struct he_tpd *tpd; | 1629 | struct he_tpd *tpd; |
1682 | dma_addr_t dma_handle; | 1630 | dma_addr_t mapping; |
1683 | 1631 | ||
1684 | tpd = pci_pool_alloc(he_dev->tpd_pool, GFP_ATOMIC|GFP_DMA, &dma_handle); | 1632 | tpd = pci_pool_alloc(he_dev->tpd_pool, GFP_ATOMIC|GFP_DMA, &mapping); |
1685 | if (tpd == NULL) | 1633 | if (tpd == NULL) |
1686 | return NULL; | 1634 | return NULL; |
1687 | 1635 | ||
1688 | tpd->status = TPD_ADDR(dma_handle); | 1636 | tpd->status = TPD_ADDR(mapping); |
1689 | tpd->reserved = 0; | 1637 | tpd->reserved = 0; |
1690 | tpd->iovec[0].addr = 0; tpd->iovec[0].len = 0; | 1638 | tpd->iovec[0].addr = 0; tpd->iovec[0].len = 0; |
1691 | tpd->iovec[1].addr = 0; tpd->iovec[1].len = 0; | 1639 | tpd->iovec[1].addr = 0; tpd->iovec[1].len = 0; |
@@ -1714,13 +1662,12 @@ he_service_rbrq(struct he_dev *he_dev, int group) | |||
1714 | struct he_rbrq *rbrq_tail = (struct he_rbrq *) | 1662 | struct he_rbrq *rbrq_tail = (struct he_rbrq *) |
1715 | ((unsigned long)he_dev->rbrq_base | | 1663 | ((unsigned long)he_dev->rbrq_base | |
1716 | he_dev->hsp->group[group].rbrq_tail); | 1664 | he_dev->hsp->group[group].rbrq_tail); |
1717 | struct he_rbp *rbp = NULL; | ||
1718 | unsigned cid, lastcid = -1; | 1665 | unsigned cid, lastcid = -1; |
1719 | unsigned buf_len = 0; | ||
1720 | struct sk_buff *skb; | 1666 | struct sk_buff *skb; |
1721 | struct atm_vcc *vcc = NULL; | 1667 | struct atm_vcc *vcc = NULL; |
1722 | struct he_vcc *he_vcc; | 1668 | struct he_vcc *he_vcc; |
1723 | struct he_iovec *iov; | 1669 | struct he_buff *heb, *next; |
1670 | int i; | ||
1724 | int pdus_assembled = 0; | 1671 | int pdus_assembled = 0; |
1725 | int updated = 0; | 1672 | int updated = 0; |
1726 | 1673 | ||
@@ -1740,44 +1687,35 @@ he_service_rbrq(struct he_dev *he_dev, int group) | |||
1740 | RBRQ_CON_CLOSED(he_dev->rbrq_head) ? " CON_CLOSED" : "", | 1687 | RBRQ_CON_CLOSED(he_dev->rbrq_head) ? " CON_CLOSED" : "", |
1741 | RBRQ_HBUF_ERR(he_dev->rbrq_head) ? " HBUF_ERR" : ""); | 1688 | RBRQ_HBUF_ERR(he_dev->rbrq_head) ? " HBUF_ERR" : ""); |
1742 | 1689 | ||
1743 | if (RBRQ_ADDR(he_dev->rbrq_head) & RBP_SMALLBUF) | 1690 | i = RBRQ_ADDR(he_dev->rbrq_head) >> RBP_IDX_OFFSET; |
1744 | rbp = &he_dev->rbps_base[RBP_INDEX(RBRQ_ADDR(he_dev->rbrq_head))]; | 1691 | heb = he_dev->rbpl_virt[i]; |
1745 | else | ||
1746 | rbp = &he_dev->rbpl_base[RBP_INDEX(RBRQ_ADDR(he_dev->rbrq_head))]; | ||
1747 | |||
1748 | buf_len = RBRQ_BUFLEN(he_dev->rbrq_head) * 4; | ||
1749 | cid = RBRQ_CID(he_dev->rbrq_head); | ||
1750 | 1692 | ||
1693 | cid = RBRQ_CID(he_dev->rbrq_head); | ||
1751 | if (cid != lastcid) | 1694 | if (cid != lastcid) |
1752 | vcc = __find_vcc(he_dev, cid); | 1695 | vcc = __find_vcc(he_dev, cid); |
1753 | lastcid = cid; | 1696 | lastcid = cid; |
1754 | 1697 | ||
1755 | if (vcc == NULL) { | 1698 | if (vcc == NULL || (he_vcc = HE_VCC(vcc)) == NULL) { |
1756 | hprintk("vcc == NULL (cid 0x%x)\n", cid); | 1699 | hprintk("vcc/he_vcc == NULL (cid 0x%x)\n", cid); |
1757 | if (!RBRQ_HBUF_ERR(he_dev->rbrq_head)) | 1700 | if (!RBRQ_HBUF_ERR(he_dev->rbrq_head)) { |
1758 | rbp->status &= ~RBP_LOANED; | 1701 | clear_bit(i, he_dev->rbpl_table); |
1702 | list_del(&heb->entry); | ||
1703 | pci_pool_free(he_dev->rbpl_pool, heb, heb->mapping); | ||
1704 | } | ||
1759 | 1705 | ||
1760 | goto next_rbrq_entry; | 1706 | goto next_rbrq_entry; |
1761 | } | 1707 | } |
1762 | 1708 | ||
1763 | he_vcc = HE_VCC(vcc); | ||
1764 | if (he_vcc == NULL) { | ||
1765 | hprintk("he_vcc == NULL (cid 0x%x)\n", cid); | ||
1766 | if (!RBRQ_HBUF_ERR(he_dev->rbrq_head)) | ||
1767 | rbp->status &= ~RBP_LOANED; | ||
1768 | goto next_rbrq_entry; | ||
1769 | } | ||
1770 | |||
1771 | if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) { | 1709 | if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) { |
1772 | hprintk("HBUF_ERR! (cid 0x%x)\n", cid); | 1710 | hprintk("HBUF_ERR! (cid 0x%x)\n", cid); |
1773 | atomic_inc(&vcc->stats->rx_drop); | 1711 | atomic_inc(&vcc->stats->rx_drop); |
1774 | goto return_host_buffers; | 1712 | goto return_host_buffers; |
1775 | } | 1713 | } |
1776 | 1714 | ||
1777 | he_vcc->iov_tail->iov_base = RBRQ_ADDR(he_dev->rbrq_head); | 1715 | heb->len = RBRQ_BUFLEN(he_dev->rbrq_head) * 4; |
1778 | he_vcc->iov_tail->iov_len = buf_len; | 1716 | clear_bit(i, he_dev->rbpl_table); |
1779 | he_vcc->pdu_len += buf_len; | 1717 | list_move_tail(&heb->entry, &he_vcc->buffers); |
1780 | ++he_vcc->iov_tail; | 1718 | he_vcc->pdu_len += heb->len; |
1781 | 1719 | ||
1782 | if (RBRQ_CON_CLOSED(he_dev->rbrq_head)) { | 1720 | if (RBRQ_CON_CLOSED(he_dev->rbrq_head)) { |
1783 | lastcid = -1; | 1721 | lastcid = -1; |
@@ -1786,12 +1724,6 @@ he_service_rbrq(struct he_dev *he_dev, int group) | |||
1786 | goto return_host_buffers; | 1724 | goto return_host_buffers; |
1787 | } | 1725 | } |
1788 | 1726 | ||
1789 | #ifdef notdef | ||
1790 | if ((he_vcc->iov_tail - he_vcc->iov_head) > HE_MAXIOV) { | ||
1791 | hprintk("iovec full! cid 0x%x\n", cid); | ||
1792 | goto return_host_buffers; | ||
1793 | } | ||
1794 | #endif | ||
1795 | if (!RBRQ_END_PDU(he_dev->rbrq_head)) | 1727 | if (!RBRQ_END_PDU(he_dev->rbrq_head)) |
1796 | goto next_rbrq_entry; | 1728 | goto next_rbrq_entry; |
1797 | 1729 | ||
@@ -1819,15 +1751,8 @@ he_service_rbrq(struct he_dev *he_dev, int group) | |||
1819 | 1751 | ||
1820 | __net_timestamp(skb); | 1752 | __net_timestamp(skb); |
1821 | 1753 | ||
1822 | for (iov = he_vcc->iov_head; | 1754 | list_for_each_entry(heb, &he_vcc->buffers, entry) |
1823 | iov < he_vcc->iov_tail; ++iov) { | 1755 | memcpy(skb_put(skb, heb->len), &heb->data, heb->len); |
1824 | if (iov->iov_base & RBP_SMALLBUF) | ||
1825 | memcpy(skb_put(skb, iov->iov_len), | ||
1826 | he_dev->rbps_virt[RBP_INDEX(iov->iov_base)].virt, iov->iov_len); | ||
1827 | else | ||
1828 | memcpy(skb_put(skb, iov->iov_len), | ||
1829 | he_dev->rbpl_virt[RBP_INDEX(iov->iov_base)].virt, iov->iov_len); | ||
1830 | } | ||
1831 | 1756 | ||
1832 | switch (vcc->qos.aal) { | 1757 | switch (vcc->qos.aal) { |
1833 | case ATM_AAL0: | 1758 | case ATM_AAL0: |
@@ -1867,17 +1792,9 @@ he_service_rbrq(struct he_dev *he_dev, int group) | |||
1867 | return_host_buffers: | 1792 | return_host_buffers: |
1868 | ++pdus_assembled; | 1793 | ++pdus_assembled; |
1869 | 1794 | ||
1870 | for (iov = he_vcc->iov_head; | 1795 | list_for_each_entry_safe(heb, next, &he_vcc->buffers, entry) |
1871 | iov < he_vcc->iov_tail; ++iov) { | 1796 | pci_pool_free(he_dev->rbpl_pool, heb, heb->mapping); |
1872 | if (iov->iov_base & RBP_SMALLBUF) | 1797 | INIT_LIST_HEAD(&he_vcc->buffers); |
1873 | rbp = &he_dev->rbps_base[RBP_INDEX(iov->iov_base)]; | ||
1874 | else | ||
1875 | rbp = &he_dev->rbpl_base[RBP_INDEX(iov->iov_base)]; | ||
1876 | |||
1877 | rbp->status &= ~RBP_LOANED; | ||
1878 | } | ||
1879 | |||
1880 | he_vcc->iov_tail = he_vcc->iov_head; | ||
1881 | he_vcc->pdu_len = 0; | 1798 | he_vcc->pdu_len = 0; |
1882 | 1799 | ||
1883 | next_rbrq_entry: | 1800 | next_rbrq_entry: |
@@ -1978,59 +1895,51 @@ next_tbrq_entry: | |||
1978 | } | 1895 | } |
1979 | } | 1896 | } |
1980 | 1897 | ||
1981 | |||
1982 | static void | 1898 | static void |
1983 | he_service_rbpl(struct he_dev *he_dev, int group) | 1899 | he_service_rbpl(struct he_dev *he_dev, int group) |
1984 | { | 1900 | { |
1985 | struct he_rbp *newtail; | 1901 | struct he_rbp *new_tail; |
1986 | struct he_rbp *rbpl_head; | 1902 | struct he_rbp *rbpl_head; |
1903 | struct he_buff *heb; | ||
1904 | dma_addr_t mapping; | ||
1905 | int i; | ||
1987 | int moved = 0; | 1906 | int moved = 0; |
1988 | 1907 | ||
1989 | rbpl_head = (struct he_rbp *) ((unsigned long)he_dev->rbpl_base | | 1908 | rbpl_head = (struct he_rbp *) ((unsigned long)he_dev->rbpl_base | |
1990 | RBPL_MASK(he_readl(he_dev, G0_RBPL_S))); | 1909 | RBPL_MASK(he_readl(he_dev, G0_RBPL_S))); |
1991 | 1910 | ||
1992 | for (;;) { | 1911 | for (;;) { |
1993 | newtail = (struct he_rbp *) ((unsigned long)he_dev->rbpl_base | | 1912 | new_tail = (struct he_rbp *) ((unsigned long)he_dev->rbpl_base | |
1994 | RBPL_MASK(he_dev->rbpl_tail+1)); | 1913 | RBPL_MASK(he_dev->rbpl_tail+1)); |
1995 | 1914 | ||
1996 | /* table 3.42 -- rbpl_tail should never be set to rbpl_head */ | 1915 | /* table 3.42 -- rbpl_tail should never be set to rbpl_head */ |
1997 | if ((newtail == rbpl_head) || (newtail->status & RBP_LOANED)) | 1916 | if (new_tail == rbpl_head) |
1998 | break; | 1917 | break; |
1999 | 1918 | ||
2000 | newtail->status |= RBP_LOANED; | 1919 | i = find_next_zero_bit(he_dev->rbpl_table, RBPL_TABLE_SIZE, he_dev->rbpl_hint); |
2001 | he_dev->rbpl_tail = newtail; | 1920 | if (i > (RBPL_TABLE_SIZE - 1)) { |
2002 | ++moved; | 1921 | i = find_first_zero_bit(he_dev->rbpl_table, RBPL_TABLE_SIZE); |
2003 | } | 1922 | if (i > (RBPL_TABLE_SIZE - 1)) |
2004 | 1923 | break; | |
2005 | if (moved) | 1924 | } |
2006 | he_writel(he_dev, RBPL_MASK(he_dev->rbpl_tail), G0_RBPL_T); | 1925 | he_dev->rbpl_hint = i + 1; |
2007 | } | ||
2008 | |||
2009 | static void | ||
2010 | he_service_rbps(struct he_dev *he_dev, int group) | ||
2011 | { | ||
2012 | struct he_rbp *newtail; | ||
2013 | struct he_rbp *rbps_head; | ||
2014 | int moved = 0; | ||
2015 | |||
2016 | rbps_head = (struct he_rbp *) ((unsigned long)he_dev->rbps_base | | ||
2017 | RBPS_MASK(he_readl(he_dev, G0_RBPS_S))); | ||
2018 | |||
2019 | for (;;) { | ||
2020 | newtail = (struct he_rbp *) ((unsigned long)he_dev->rbps_base | | ||
2021 | RBPS_MASK(he_dev->rbps_tail+1)); | ||
2022 | 1926 | ||
2023 | /* table 3.42 -- rbps_tail should never be set to rbps_head */ | 1927 | heb = pci_pool_alloc(he_dev->rbpl_pool, GFP_ATOMIC|GFP_DMA, &mapping); |
2024 | if ((newtail == rbps_head) || (newtail->status & RBP_LOANED)) | 1928 | if (!heb) |
2025 | break; | 1929 | break; |
2026 | 1930 | heb->mapping = mapping; | |
2027 | newtail->status |= RBP_LOANED; | 1931 | list_add(&heb->entry, &he_dev->rbpl_outstanding); |
2028 | he_dev->rbps_tail = newtail; | 1932 | he_dev->rbpl_virt[i] = heb; |
1933 | set_bit(i, he_dev->rbpl_table); | ||
1934 | new_tail->idx = i << RBP_IDX_OFFSET; | ||
1935 | new_tail->phys = mapping + offsetof(struct he_buff, data); | ||
1936 | |||
1937 | he_dev->rbpl_tail = new_tail; | ||
2029 | ++moved; | 1938 | ++moved; |
2030 | } | 1939 | } |
2031 | 1940 | ||
2032 | if (moved) | 1941 | if (moved) |
2033 | he_writel(he_dev, RBPS_MASK(he_dev->rbps_tail), G0_RBPS_T); | 1942 | he_writel(he_dev, RBPL_MASK(he_dev->rbpl_tail), G0_RBPL_T); |
2034 | } | 1943 | } |
2035 | 1944 | ||
2036 | static void | 1945 | static void |
@@ -2055,10 +1964,8 @@ he_tasklet(unsigned long data) | |||
2055 | HPRINTK("rbrq%d threshold\n", group); | 1964 | HPRINTK("rbrq%d threshold\n", group); |
2056 | /* fall through */ | 1965 | /* fall through */ |
2057 | case ITYPE_RBRQ_TIMER: | 1966 | case ITYPE_RBRQ_TIMER: |
2058 | if (he_service_rbrq(he_dev, group)) { | 1967 | if (he_service_rbrq(he_dev, group)) |
2059 | he_service_rbpl(he_dev, group); | 1968 | he_service_rbpl(he_dev, group); |
2060 | he_service_rbps(he_dev, group); | ||
2061 | } | ||
2062 | break; | 1969 | break; |
2063 | case ITYPE_TBRQ_THRESH: | 1970 | case ITYPE_TBRQ_THRESH: |
2064 | HPRINTK("tbrq%d threshold\n", group); | 1971 | HPRINTK("tbrq%d threshold\n", group); |
@@ -2070,7 +1977,7 @@ he_tasklet(unsigned long data) | |||
2070 | he_service_rbpl(he_dev, group); | 1977 | he_service_rbpl(he_dev, group); |
2071 | break; | 1978 | break; |
2072 | case ITYPE_RBPS_THRESH: | 1979 | case ITYPE_RBPS_THRESH: |
2073 | he_service_rbps(he_dev, group); | 1980 | /* shouldn't happen unless small buffers enabled */ |
2074 | break; | 1981 | break; |
2075 | case ITYPE_PHY: | 1982 | case ITYPE_PHY: |
2076 | HPRINTK("phy interrupt\n"); | 1983 | HPRINTK("phy interrupt\n"); |
@@ -2098,7 +2005,6 @@ he_tasklet(unsigned long data) | |||
2098 | 2005 | ||
2099 | he_service_rbrq(he_dev, 0); | 2006 | he_service_rbrq(he_dev, 0); |
2100 | he_service_rbpl(he_dev, 0); | 2007 | he_service_rbpl(he_dev, 0); |
2101 | he_service_rbps(he_dev, 0); | ||
2102 | he_service_tbrq(he_dev, 0); | 2008 | he_service_tbrq(he_dev, 0); |
2103 | break; | 2009 | break; |
2104 | default: | 2010 | default: |
@@ -2252,7 +2158,7 @@ he_open(struct atm_vcc *vcc) | |||
2252 | return -ENOMEM; | 2158 | return -ENOMEM; |
2253 | } | 2159 | } |
2254 | 2160 | ||
2255 | he_vcc->iov_tail = he_vcc->iov_head; | 2161 | INIT_LIST_HEAD(&he_vcc->buffers); |
2256 | he_vcc->pdu_len = 0; | 2162 | he_vcc->pdu_len = 0; |
2257 | he_vcc->rc_index = -1; | 2163 | he_vcc->rc_index = -1; |
2258 | 2164 | ||
@@ -2406,8 +2312,8 @@ he_open(struct atm_vcc *vcc) | |||
2406 | goto open_failed; | 2312 | goto open_failed; |
2407 | } | 2313 | } |
2408 | 2314 | ||
2409 | rsr1 = RSR1_GROUP(0); | 2315 | rsr1 = RSR1_GROUP(0) | RSR1_RBPL_ONLY; |
2410 | rsr4 = RSR4_GROUP(0); | 2316 | rsr4 = RSR4_GROUP(0) | RSR4_RBPL_ONLY; |
2411 | rsr0 = vcc->qos.rxtp.traffic_class == ATM_UBR ? | 2317 | rsr0 = vcc->qos.rxtp.traffic_class == ATM_UBR ? |
2412 | (RSR0_EPD_ENABLE|RSR0_PPD_ENABLE) : 0; | 2318 | (RSR0_EPD_ENABLE|RSR0_PPD_ENABLE) : 0; |
2413 | 2319 | ||
diff --git a/drivers/atm/he.h b/drivers/atm/he.h index c2983e0d4ec1..110a27d2ecfc 100644 --- a/drivers/atm/he.h +++ b/drivers/atm/he.h | |||
@@ -67,11 +67,6 @@ | |||
67 | #define CONFIG_RBPL_BUFSIZE 4096 | 67 | #define CONFIG_RBPL_BUFSIZE 4096 |
68 | #define RBPL_MASK(x) (((unsigned long)(x))&((CONFIG_RBPL_SIZE<<3)-1)) | 68 | #define RBPL_MASK(x) (((unsigned long)(x))&((CONFIG_RBPL_SIZE<<3)-1)) |
69 | 69 | ||
70 | #define CONFIG_RBPS_SIZE 1024 | ||
71 | #define CONFIG_RBPS_THRESH 64 | ||
72 | #define CONFIG_RBPS_BUFSIZE 128 | ||
73 | #define RBPS_MASK(x) (((unsigned long)(x))&((CONFIG_RBPS_SIZE<<3)-1)) | ||
74 | |||
75 | /* 5.1.3 initialize connection memory */ | 70 | /* 5.1.3 initialize connection memory */ |
76 | 71 | ||
77 | #define CONFIG_RSRA 0x00000 | 72 | #define CONFIG_RSRA 0x00000 |
@@ -203,36 +198,37 @@ struct he_hsp { | |||
203 | } group[HE_NUM_GROUPS]; | 198 | } group[HE_NUM_GROUPS]; |
204 | }; | 199 | }; |
205 | 200 | ||
206 | /* figure 2.9 receive buffer pools */ | 201 | /* |
202 | * figure 2.9 receive buffer pools | ||
203 | * | ||
204 | * since a virtual address might be more than 32 bits, we store an index | ||
205 | * in the virt member of he_rbp. NOTE: the lower six bits in the rbrq | ||
206 | * addr member are used for buffer status further limiting us to 26 bits. | ||
207 | */ | ||
207 | 208 | ||
208 | struct he_rbp { | 209 | struct he_rbp { |
209 | volatile u32 phys; | 210 | volatile u32 phys; |
210 | volatile u32 status; | 211 | volatile u32 idx; /* virt */ |
211 | }; | 212 | }; |
212 | 213 | ||
213 | /* NOTE: it is suggested that virt be the virtual address of the host | 214 | #define RBP_IDX_OFFSET 6 |
214 | buffer. on a 64-bit machine, this would not work. Instead, we | ||
215 | store the real virtual address in another list, and store an index | ||
216 | (and buffer status) in the virt member. | ||
217 | */ | ||
218 | 215 | ||
219 | #define RBP_INDEX_OFF 6 | 216 | /* |
220 | #define RBP_INDEX(x) (((long)(x) >> RBP_INDEX_OFF) & 0xffff) | 217 | * the he dma engine will try to hold an extra 16 buffers in its local |
221 | #define RBP_LOANED 0x80000000 | 218 | * caches. and add a couple buffers for safety. |
222 | #define RBP_SMALLBUF 0x40000000 | 219 | */ |
223 | 220 | ||
224 | struct he_virt { | 221 | #define RBPL_TABLE_SIZE (CONFIG_RBPL_SIZE + 16 + 2) |
225 | void *virt; | ||
226 | }; | ||
227 | 222 | ||
228 | #define RBPL_ALIGNMENT CONFIG_RBPL_SIZE | 223 | struct he_buff { |
229 | #define RBPS_ALIGNMENT CONFIG_RBPS_SIZE | 224 | struct list_head entry; |
225 | dma_addr_t mapping; | ||
226 | unsigned long len; | ||
227 | u8 data[]; | ||
228 | }; | ||
230 | 229 | ||
231 | #ifdef notyet | 230 | #ifdef notyet |
232 | struct he_group { | 231 | struct he_group { |
233 | u32 rpbs_size, rpbs_qsize; | ||
234 | struct he_rbp rbps_ba; | ||
235 | |||
236 | u32 rpbl_size, rpbl_qsize; | 232 | u32 rpbl_size, rpbl_qsize; |
237 | struct he_rpb_entry *rbpl_ba; | 233 | struct he_rpb_entry *rbpl_ba; |
238 | }; | 234 | }; |
@@ -297,18 +293,15 @@ struct he_dev { | |||
297 | struct he_rbrq *rbrq_base, *rbrq_head; | 293 | struct he_rbrq *rbrq_base, *rbrq_head; |
298 | int rbrq_peak; | 294 | int rbrq_peak; |
299 | 295 | ||
296 | struct he_buff **rbpl_virt; | ||
297 | unsigned long *rbpl_table; | ||
298 | unsigned long rbpl_hint; | ||
300 | struct pci_pool *rbpl_pool; | 299 | struct pci_pool *rbpl_pool; |
301 | dma_addr_t rbpl_phys; | 300 | dma_addr_t rbpl_phys; |
302 | struct he_rbp *rbpl_base, *rbpl_tail; | 301 | struct he_rbp *rbpl_base, *rbpl_tail; |
303 | struct he_virt *rbpl_virt; | 302 | struct list_head rbpl_outstanding; |
304 | int rbpl_peak; | 303 | int rbpl_peak; |
305 | 304 | ||
306 | struct pci_pool *rbps_pool; | ||
307 | dma_addr_t rbps_phys; | ||
308 | struct he_rbp *rbps_base, *rbps_tail; | ||
309 | struct he_virt *rbps_virt; | ||
310 | int rbps_peak; | ||
311 | |||
312 | dma_addr_t tbrq_phys; | 305 | dma_addr_t tbrq_phys; |
313 | struct he_tbrq *tbrq_base, *tbrq_head; | 306 | struct he_tbrq *tbrq_base, *tbrq_head; |
314 | int tbrq_peak; | 307 | int tbrq_peak; |
@@ -321,20 +314,12 @@ struct he_dev { | |||
321 | struct he_dev *next; | 314 | struct he_dev *next; |
322 | }; | 315 | }; |
323 | 316 | ||
324 | struct he_iovec | ||
325 | { | ||
326 | u32 iov_base; | ||
327 | u32 iov_len; | ||
328 | }; | ||
329 | |||
330 | #define HE_MAXIOV 20 | 317 | #define HE_MAXIOV 20 |
331 | 318 | ||
332 | struct he_vcc | 319 | struct he_vcc |
333 | { | 320 | { |
334 | struct he_iovec iov_head[HE_MAXIOV]; | 321 | struct list_head buffers; |
335 | struct he_iovec *iov_tail; | ||
336 | int pdu_len; | 322 | int pdu_len; |
337 | |||
338 | int rc_index; | 323 | int rc_index; |
339 | 324 | ||
340 | wait_queue_head_t rx_waitq; | 325 | wait_queue_head_t rx_waitq; |
diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c index b7473a6110a7..59876c66a92a 100644 --- a/drivers/atm/nicstar.c +++ b/drivers/atm/nicstar.c | |||
@@ -1,5 +1,4 @@ | |||
1 | /****************************************************************************** | 1 | /* |
2 | * | ||
3 | * nicstar.c | 2 | * nicstar.c |
4 | * | 3 | * |
5 | * Device driver supporting CBR for IDT 77201/77211 "NICStAR" based cards. | 4 | * Device driver supporting CBR for IDT 77201/77211 "NICStAR" based cards. |
@@ -16,12 +15,10 @@ | |||
16 | * | 15 | * |
17 | * | 16 | * |
18 | * (C) INESC 1999 | 17 | * (C) INESC 1999 |
19 | * | 18 | */ |
20 | * | ||
21 | ******************************************************************************/ | ||
22 | |||
23 | 19 | ||
24 | /**** IMPORTANT INFORMATION *************************************************** | 20 | /* |
21 | * IMPORTANT INFORMATION | ||
25 | * | 22 | * |
26 | * There are currently three types of spinlocks: | 23 | * There are currently three types of spinlocks: |
27 | * | 24 | * |
@@ -31,9 +28,9 @@ | |||
31 | * | 28 | * |
32 | * These must NEVER be grabbed in reverse order. | 29 | * These must NEVER be grabbed in reverse order. |
33 | * | 30 | * |
34 | ******************************************************************************/ | 31 | */ |
35 | 32 | ||
36 | /* Header files ***************************************************************/ | 33 | /* Header files */ |
37 | 34 | ||
38 | #include <linux/module.h> | 35 | #include <linux/module.h> |
39 | #include <linux/kernel.h> | 36 | #include <linux/kernel.h> |
@@ -41,6 +38,7 @@ | |||
41 | #include <linux/atmdev.h> | 38 | #include <linux/atmdev.h> |
42 | #include <linux/atm.h> | 39 | #include <linux/atm.h> |
43 | #include <linux/pci.h> | 40 | #include <linux/pci.h> |
41 | #include <linux/dma-mapping.h> | ||
44 | #include <linux/types.h> | 42 | #include <linux/types.h> |
45 | #include <linux/string.h> | 43 | #include <linux/string.h> |
46 | #include <linux/delay.h> | 44 | #include <linux/delay.h> |
@@ -50,6 +48,7 @@ | |||
50 | #include <linux/interrupt.h> | 48 | #include <linux/interrupt.h> |
51 | #include <linux/bitops.h> | 49 | #include <linux/bitops.h> |
52 | #include <linux/slab.h> | 50 | #include <linux/slab.h> |
51 | #include <linux/idr.h> | ||
53 | #include <asm/io.h> | 52 | #include <asm/io.h> |
54 | #include <asm/uaccess.h> | 53 | #include <asm/uaccess.h> |
55 | #include <asm/atomic.h> | 54 | #include <asm/atomic.h> |
@@ -61,16 +60,11 @@ | |||
61 | #include "idt77105.h" | 60 | #include "idt77105.h" |
62 | #endif /* CONFIG_ATM_NICSTAR_USE_IDT77105 */ | 61 | #endif /* CONFIG_ATM_NICSTAR_USE_IDT77105 */ |
63 | 62 | ||
64 | #if BITS_PER_LONG != 32 | 63 | /* Additional code */ |
65 | # error FIXME: this driver requires a 32-bit platform | ||
66 | #endif | ||
67 | |||
68 | /* Additional code ************************************************************/ | ||
69 | 64 | ||
70 | #include "nicstarmac.c" | 65 | #include "nicstarmac.c" |
71 | 66 | ||
72 | 67 | /* Configurable parameters */ | |
73 | /* Configurable parameters ****************************************************/ | ||
74 | 68 | ||
75 | #undef PHY_LOOPBACK | 69 | #undef PHY_LOOPBACK |
76 | #undef TX_DEBUG | 70 | #undef TX_DEBUG |
@@ -78,11 +72,10 @@ | |||
78 | #undef GENERAL_DEBUG | 72 | #undef GENERAL_DEBUG |
79 | #undef EXTRA_DEBUG | 73 | #undef EXTRA_DEBUG |
80 | 74 | ||
81 | #undef NS_USE_DESTRUCTORS /* For now keep this undefined unless you know | 75 | #undef NS_USE_DESTRUCTORS /* For now keep this undefined unless you know |
82 | you're going to use only raw ATM */ | 76 | you're going to use only raw ATM */ |
83 | 77 | ||
84 | 78 | /* Do not touch these */ | |
85 | /* Do not touch these *********************************************************/ | ||
86 | 79 | ||
87 | #ifdef TX_DEBUG | 80 | #ifdef TX_DEBUG |
88 | #define TXPRINTK(args...) printk(args) | 81 | #define TXPRINTK(args...) printk(args) |
@@ -108,2908 +101,2786 @@ | |||
108 | #define XPRINTK(args...) | 101 | #define XPRINTK(args...) |
109 | #endif /* EXTRA_DEBUG */ | 102 | #endif /* EXTRA_DEBUG */ |
110 | 103 | ||
111 | 104 | /* Macros */ | |
112 | /* Macros *********************************************************************/ | ||
113 | 105 | ||
114 | #define CMD_BUSY(card) (readl((card)->membase + STAT) & NS_STAT_CMDBZ) | 106 | #define CMD_BUSY(card) (readl((card)->membase + STAT) & NS_STAT_CMDBZ) |
115 | 107 | ||
116 | #define NS_DELAY mdelay(1) | 108 | #define NS_DELAY mdelay(1) |
117 | 109 | ||
118 | #define ALIGN_BUS_ADDR(addr, alignment) \ | 110 | #define PTR_DIFF(a, b) ((u32)((unsigned long)(a) - (unsigned long)(b))) |
119 | ((((u32) (addr)) + (((u32) (alignment)) - 1)) & ~(((u32) (alignment)) - 1)) | ||
120 | #define ALIGN_ADDRESS(addr, alignment) \ | ||
121 | bus_to_virt(ALIGN_BUS_ADDR(virt_to_bus(addr), alignment)) | ||
122 | |||
123 | #undef CEIL | ||
124 | 111 | ||
125 | #ifndef ATM_SKB | 112 | #ifndef ATM_SKB |
126 | #define ATM_SKB(s) (&(s)->atm) | 113 | #define ATM_SKB(s) (&(s)->atm) |
127 | #endif | 114 | #endif |
128 | 115 | ||
116 | #define scq_virt_to_bus(scq, p) \ | ||
117 | (scq->dma + ((unsigned long)(p) - (unsigned long)(scq)->org)) | ||
129 | 118 | ||
130 | /* Function declarations ******************************************************/ | 119 | /* Function declarations */ |
131 | 120 | ||
132 | static u32 ns_read_sram(ns_dev *card, u32 sram_address); | 121 | static u32 ns_read_sram(ns_dev * card, u32 sram_address); |
133 | static void ns_write_sram(ns_dev *card, u32 sram_address, u32 *value, int count); | 122 | static void ns_write_sram(ns_dev * card, u32 sram_address, u32 * value, |
123 | int count); | ||
134 | static int __devinit ns_init_card(int i, struct pci_dev *pcidev); | 124 | static int __devinit ns_init_card(int i, struct pci_dev *pcidev); |
135 | static void __devinit ns_init_card_error(ns_dev *card, int error); | 125 | static void __devinit ns_init_card_error(ns_dev * card, int error); |
136 | static scq_info *get_scq(int size, u32 scd); | 126 | static scq_info *get_scq(ns_dev *card, int size, u32 scd); |
137 | static void free_scq(scq_info *scq, struct atm_vcc *vcc); | 127 | static void free_scq(ns_dev *card, scq_info * scq, struct atm_vcc *vcc); |
138 | static void push_rxbufs(ns_dev *, struct sk_buff *); | 128 | static void push_rxbufs(ns_dev *, struct sk_buff *); |
139 | static irqreturn_t ns_irq_handler(int irq, void *dev_id); | 129 | static irqreturn_t ns_irq_handler(int irq, void *dev_id); |
140 | static int ns_open(struct atm_vcc *vcc); | 130 | static int ns_open(struct atm_vcc *vcc); |
141 | static void ns_close(struct atm_vcc *vcc); | 131 | static void ns_close(struct atm_vcc *vcc); |
142 | static void fill_tst(ns_dev *card, int n, vc_map *vc); | 132 | static void fill_tst(ns_dev * card, int n, vc_map * vc); |
143 | static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb); | 133 | static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb); |
144 | static int push_scqe(ns_dev *card, vc_map *vc, scq_info *scq, ns_scqe *tbd, | 134 | static int push_scqe(ns_dev * card, vc_map * vc, scq_info * scq, ns_scqe * tbd, |
145 | struct sk_buff *skb); | 135 | struct sk_buff *skb); |
146 | static void process_tsq(ns_dev *card); | 136 | static void process_tsq(ns_dev * card); |
147 | static void drain_scq(ns_dev *card, scq_info *scq, int pos); | 137 | static void drain_scq(ns_dev * card, scq_info * scq, int pos); |
148 | static void process_rsq(ns_dev *card); | 138 | static void process_rsq(ns_dev * card); |
149 | static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe); | 139 | static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe); |
150 | #ifdef NS_USE_DESTRUCTORS | 140 | #ifdef NS_USE_DESTRUCTORS |
151 | static void ns_sb_destructor(struct sk_buff *sb); | 141 | static void ns_sb_destructor(struct sk_buff *sb); |
152 | static void ns_lb_destructor(struct sk_buff *lb); | 142 | static void ns_lb_destructor(struct sk_buff *lb); |
153 | static void ns_hb_destructor(struct sk_buff *hb); | 143 | static void ns_hb_destructor(struct sk_buff *hb); |
154 | #endif /* NS_USE_DESTRUCTORS */ | 144 | #endif /* NS_USE_DESTRUCTORS */ |
155 | static void recycle_rx_buf(ns_dev *card, struct sk_buff *skb); | 145 | static void recycle_rx_buf(ns_dev * card, struct sk_buff *skb); |
156 | static void recycle_iovec_rx_bufs(ns_dev *card, struct iovec *iov, int count); | 146 | static void recycle_iovec_rx_bufs(ns_dev * card, struct iovec *iov, int count); |
157 | static void recycle_iov_buf(ns_dev *card, struct sk_buff *iovb); | 147 | static void recycle_iov_buf(ns_dev * card, struct sk_buff *iovb); |
158 | static void dequeue_sm_buf(ns_dev *card, struct sk_buff *sb); | 148 | static void dequeue_sm_buf(ns_dev * card, struct sk_buff *sb); |
159 | static void dequeue_lg_buf(ns_dev *card, struct sk_buff *lb); | 149 | static void dequeue_lg_buf(ns_dev * card, struct sk_buff *lb); |
160 | static int ns_proc_read(struct atm_dev *dev, loff_t *pos, char *page); | 150 | static int ns_proc_read(struct atm_dev *dev, loff_t * pos, char *page); |
161 | static int ns_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg); | 151 | static int ns_ioctl(struct atm_dev *dev, unsigned int cmd, void __user * arg); |
162 | static void which_list(ns_dev *card, struct sk_buff *skb); | 152 | #ifdef EXTRA_DEBUG |
153 | static void which_list(ns_dev * card, struct sk_buff *skb); | ||
154 | #endif | ||
163 | static void ns_poll(unsigned long arg); | 155 | static void ns_poll(unsigned long arg); |
164 | static int ns_parse_mac(char *mac, unsigned char *esi); | 156 | static int ns_parse_mac(char *mac, unsigned char *esi); |
165 | static short ns_h2i(char c); | 157 | static short ns_h2i(char c); |
166 | static void ns_phy_put(struct atm_dev *dev, unsigned char value, | 158 | static void ns_phy_put(struct atm_dev *dev, unsigned char value, |
167 | unsigned long addr); | 159 | unsigned long addr); |
168 | static unsigned char ns_phy_get(struct atm_dev *dev, unsigned long addr); | 160 | static unsigned char ns_phy_get(struct atm_dev *dev, unsigned long addr); |
169 | 161 | ||
170 | 162 | /* Global variables */ | |
171 | |||
172 | /* Global variables ***********************************************************/ | ||
173 | 163 | ||
174 | static struct ns_dev *cards[NS_MAX_CARDS]; | 164 | static struct ns_dev *cards[NS_MAX_CARDS]; |
175 | static unsigned num_cards; | 165 | static unsigned num_cards; |
176 | static struct atmdev_ops atm_ops = | 166 | static struct atmdev_ops atm_ops = { |
177 | { | 167 | .open = ns_open, |
178 | .open = ns_open, | 168 | .close = ns_close, |
179 | .close = ns_close, | 169 | .ioctl = ns_ioctl, |
180 | .ioctl = ns_ioctl, | 170 | .send = ns_send, |
181 | .send = ns_send, | 171 | .phy_put = ns_phy_put, |
182 | .phy_put = ns_phy_put, | 172 | .phy_get = ns_phy_get, |
183 | .phy_get = ns_phy_get, | 173 | .proc_read = ns_proc_read, |
184 | .proc_read = ns_proc_read, | 174 | .owner = THIS_MODULE, |
185 | .owner = THIS_MODULE, | ||
186 | }; | 175 | }; |
176 | |||
187 | static struct timer_list ns_timer; | 177 | static struct timer_list ns_timer; |
188 | static char *mac[NS_MAX_CARDS]; | 178 | static char *mac[NS_MAX_CARDS]; |
189 | module_param_array(mac, charp, NULL, 0); | 179 | module_param_array(mac, charp, NULL, 0); |
190 | MODULE_LICENSE("GPL"); | 180 | MODULE_LICENSE("GPL"); |
191 | 181 | ||
192 | 182 | /* Functions */ | |
193 | /* Functions*******************************************************************/ | ||
194 | 183 | ||
195 | static int __devinit nicstar_init_one(struct pci_dev *pcidev, | 184 | static int __devinit nicstar_init_one(struct pci_dev *pcidev, |
196 | const struct pci_device_id *ent) | 185 | const struct pci_device_id *ent) |
197 | { | 186 | { |
198 | static int index = -1; | 187 | static int index = -1; |
199 | unsigned int error; | 188 | unsigned int error; |
200 | 189 | ||
201 | index++; | 190 | index++; |
202 | cards[index] = NULL; | 191 | cards[index] = NULL; |
203 | 192 | ||
204 | error = ns_init_card(index, pcidev); | 193 | error = ns_init_card(index, pcidev); |
205 | if (error) { | 194 | if (error) { |
206 | cards[index--] = NULL; /* don't increment index */ | 195 | cards[index--] = NULL; /* don't increment index */ |
207 | goto err_out; | 196 | goto err_out; |
208 | } | 197 | } |
209 | 198 | ||
210 | return 0; | 199 | return 0; |
211 | err_out: | 200 | err_out: |
212 | return -ENODEV; | 201 | return -ENODEV; |
213 | } | 202 | } |
214 | 203 | ||
215 | |||
216 | |||
217 | static void __devexit nicstar_remove_one(struct pci_dev *pcidev) | 204 | static void __devexit nicstar_remove_one(struct pci_dev *pcidev) |
218 | { | 205 | { |
219 | int i, j; | 206 | int i, j; |
220 | ns_dev *card = pci_get_drvdata(pcidev); | 207 | ns_dev *card = pci_get_drvdata(pcidev); |
221 | struct sk_buff *hb; | 208 | struct sk_buff *hb; |
222 | struct sk_buff *iovb; | 209 | struct sk_buff *iovb; |
223 | struct sk_buff *lb; | 210 | struct sk_buff *lb; |
224 | struct sk_buff *sb; | 211 | struct sk_buff *sb; |
225 | 212 | ||
226 | i = card->index; | 213 | i = card->index; |
227 | 214 | ||
228 | if (cards[i] == NULL) | 215 | if (cards[i] == NULL) |
229 | return; | 216 | return; |
230 | 217 | ||
231 | if (card->atmdev->phy && card->atmdev->phy->stop) | 218 | if (card->atmdev->phy && card->atmdev->phy->stop) |
232 | card->atmdev->phy->stop(card->atmdev); | 219 | card->atmdev->phy->stop(card->atmdev); |
233 | 220 | ||
234 | /* Stop everything */ | 221 | /* Stop everything */ |
235 | writel(0x00000000, card->membase + CFG); | 222 | writel(0x00000000, card->membase + CFG); |
236 | 223 | ||
237 | /* De-register device */ | 224 | /* De-register device */ |
238 | atm_dev_deregister(card->atmdev); | 225 | atm_dev_deregister(card->atmdev); |
239 | 226 | ||
240 | /* Disable PCI device */ | 227 | /* Disable PCI device */ |
241 | pci_disable_device(pcidev); | 228 | pci_disable_device(pcidev); |
242 | 229 | ||
243 | /* Free up resources */ | 230 | /* Free up resources */ |
244 | j = 0; | 231 | j = 0; |
245 | PRINTK("nicstar%d: freeing %d huge buffers.\n", i, card->hbpool.count); | 232 | PRINTK("nicstar%d: freeing %d huge buffers.\n", i, card->hbpool.count); |
246 | while ((hb = skb_dequeue(&card->hbpool.queue)) != NULL) | 233 | while ((hb = skb_dequeue(&card->hbpool.queue)) != NULL) { |
247 | { | 234 | dev_kfree_skb_any(hb); |
248 | dev_kfree_skb_any(hb); | 235 | j++; |
249 | j++; | 236 | } |
250 | } | 237 | PRINTK("nicstar%d: %d huge buffers freed.\n", i, j); |
251 | PRINTK("nicstar%d: %d huge buffers freed.\n", i, j); | 238 | j = 0; |
252 | j = 0; | 239 | PRINTK("nicstar%d: freeing %d iovec buffers.\n", i, |
253 | PRINTK("nicstar%d: freeing %d iovec buffers.\n", i, card->iovpool.count); | 240 | card->iovpool.count); |
254 | while ((iovb = skb_dequeue(&card->iovpool.queue)) != NULL) | 241 | while ((iovb = skb_dequeue(&card->iovpool.queue)) != NULL) { |
255 | { | 242 | dev_kfree_skb_any(iovb); |
256 | dev_kfree_skb_any(iovb); | 243 | j++; |
257 | j++; | 244 | } |
258 | } | 245 | PRINTK("nicstar%d: %d iovec buffers freed.\n", i, j); |
259 | PRINTK("nicstar%d: %d iovec buffers freed.\n", i, j); | 246 | while ((lb = skb_dequeue(&card->lbpool.queue)) != NULL) |
260 | while ((lb = skb_dequeue(&card->lbpool.queue)) != NULL) | 247 | dev_kfree_skb_any(lb); |
261 | dev_kfree_skb_any(lb); | 248 | while ((sb = skb_dequeue(&card->sbpool.queue)) != NULL) |
262 | while ((sb = skb_dequeue(&card->sbpool.queue)) != NULL) | 249 | dev_kfree_skb_any(sb); |
263 | dev_kfree_skb_any(sb); | 250 | free_scq(card, card->scq0, NULL); |
264 | free_scq(card->scq0, NULL); | 251 | for (j = 0; j < NS_FRSCD_NUM; j++) { |
265 | for (j = 0; j < NS_FRSCD_NUM; j++) | 252 | if (card->scd2vc[j] != NULL) |
266 | { | 253 | free_scq(card, card->scd2vc[j]->scq, card->scd2vc[j]->tx_vcc); |
267 | if (card->scd2vc[j] != NULL) | 254 | } |
268 | free_scq(card->scd2vc[j]->scq, card->scd2vc[j]->tx_vcc); | 255 | idr_remove_all(&card->idr); |
269 | } | 256 | idr_destroy(&card->idr); |
270 | kfree(card->rsq.org); | 257 | pci_free_consistent(card->pcidev, NS_RSQSIZE + NS_RSQ_ALIGNMENT, |
271 | kfree(card->tsq.org); | 258 | card->rsq.org, card->rsq.dma); |
272 | free_irq(card->pcidev->irq, card); | 259 | pci_free_consistent(card->pcidev, NS_TSQSIZE + NS_TSQ_ALIGNMENT, |
273 | iounmap(card->membase); | 260 | card->tsq.org, card->tsq.dma); |
274 | kfree(card); | 261 | free_irq(card->pcidev->irq, card); |
262 | iounmap(card->membase); | ||
263 | kfree(card); | ||
275 | } | 264 | } |
276 | 265 | ||
277 | 266 | static struct pci_device_id nicstar_pci_tbl[] __devinitdata = { | |
278 | |||
279 | static struct pci_device_id nicstar_pci_tbl[] __devinitdata = | ||
280 | { | ||
281 | {PCI_VENDOR_ID_IDT, PCI_DEVICE_ID_IDT_IDT77201, | 267 | {PCI_VENDOR_ID_IDT, PCI_DEVICE_ID_IDT_IDT77201, |
282 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, | 268 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, |
283 | {0,} /* terminate list */ | 269 | {0,} /* terminate list */ |
284 | }; | 270 | }; |
285 | MODULE_DEVICE_TABLE(pci, nicstar_pci_tbl); | ||
286 | |||
287 | 271 | ||
272 | MODULE_DEVICE_TABLE(pci, nicstar_pci_tbl); | ||
288 | 273 | ||
289 | static struct pci_driver nicstar_driver = { | 274 | static struct pci_driver nicstar_driver = { |
290 | .name = "nicstar", | 275 | .name = "nicstar", |
291 | .id_table = nicstar_pci_tbl, | 276 | .id_table = nicstar_pci_tbl, |
292 | .probe = nicstar_init_one, | 277 | .probe = nicstar_init_one, |
293 | .remove = __devexit_p(nicstar_remove_one), | 278 | .remove = __devexit_p(nicstar_remove_one), |
294 | }; | 279 | }; |
295 | 280 | ||
296 | |||
297 | |||
298 | static int __init nicstar_init(void) | 281 | static int __init nicstar_init(void) |
299 | { | 282 | { |
300 | unsigned error = 0; /* Initialized to remove compile warning */ | 283 | unsigned error = 0; /* Initialized to remove compile warning */ |
284 | |||
285 | XPRINTK("nicstar: nicstar_init() called.\n"); | ||
301 | 286 | ||
302 | XPRINTK("nicstar: nicstar_init() called.\n"); | 287 | error = pci_register_driver(&nicstar_driver); |
303 | 288 | ||
304 | error = pci_register_driver(&nicstar_driver); | 289 | TXPRINTK("nicstar: TX debug enabled.\n"); |
305 | 290 | RXPRINTK("nicstar: RX debug enabled.\n"); | |
306 | TXPRINTK("nicstar: TX debug enabled.\n"); | 291 | PRINTK("nicstar: General debug enabled.\n"); |
307 | RXPRINTK("nicstar: RX debug enabled.\n"); | ||
308 | PRINTK("nicstar: General debug enabled.\n"); | ||
309 | #ifdef PHY_LOOPBACK | 292 | #ifdef PHY_LOOPBACK |
310 | printk("nicstar: using PHY loopback.\n"); | 293 | printk("nicstar: using PHY loopback.\n"); |
311 | #endif /* PHY_LOOPBACK */ | 294 | #endif /* PHY_LOOPBACK */ |
312 | XPRINTK("nicstar: nicstar_init() returned.\n"); | 295 | XPRINTK("nicstar: nicstar_init() returned.\n"); |
313 | |||
314 | if (!error) { | ||
315 | init_timer(&ns_timer); | ||
316 | ns_timer.expires = jiffies + NS_POLL_PERIOD; | ||
317 | ns_timer.data = 0UL; | ||
318 | ns_timer.function = ns_poll; | ||
319 | add_timer(&ns_timer); | ||
320 | } | ||
321 | |||
322 | return error; | ||
323 | } | ||
324 | 296 | ||
297 | if (!error) { | ||
298 | init_timer(&ns_timer); | ||
299 | ns_timer.expires = jiffies + NS_POLL_PERIOD; | ||
300 | ns_timer.data = 0UL; | ||
301 | ns_timer.function = ns_poll; | ||
302 | add_timer(&ns_timer); | ||
303 | } | ||
325 | 304 | ||
305 | return error; | ||
306 | } | ||
326 | 307 | ||
327 | static void __exit nicstar_cleanup(void) | 308 | static void __exit nicstar_cleanup(void) |
328 | { | 309 | { |
329 | XPRINTK("nicstar: nicstar_cleanup() called.\n"); | 310 | XPRINTK("nicstar: nicstar_cleanup() called.\n"); |
330 | 311 | ||
331 | del_timer(&ns_timer); | 312 | del_timer(&ns_timer); |
332 | 313 | ||
333 | pci_unregister_driver(&nicstar_driver); | 314 | pci_unregister_driver(&nicstar_driver); |
334 | 315 | ||
335 | XPRINTK("nicstar: nicstar_cleanup() returned.\n"); | 316 | XPRINTK("nicstar: nicstar_cleanup() returned.\n"); |
336 | } | 317 | } |
337 | 318 | ||
338 | 319 | static u32 ns_read_sram(ns_dev * card, u32 sram_address) | |
339 | |||
340 | static u32 ns_read_sram(ns_dev *card, u32 sram_address) | ||
341 | { | 320 | { |
342 | unsigned long flags; | 321 | unsigned long flags; |
343 | u32 data; | 322 | u32 data; |
344 | sram_address <<= 2; | 323 | sram_address <<= 2; |
345 | sram_address &= 0x0007FFFC; /* address must be dword aligned */ | 324 | sram_address &= 0x0007FFFC; /* address must be dword aligned */ |
346 | sram_address |= 0x50000000; /* SRAM read command */ | 325 | sram_address |= 0x50000000; /* SRAM read command */ |
347 | spin_lock_irqsave(&card->res_lock, flags); | 326 | spin_lock_irqsave(&card->res_lock, flags); |
348 | while (CMD_BUSY(card)); | 327 | while (CMD_BUSY(card)) ; |
349 | writel(sram_address, card->membase + CMD); | 328 | writel(sram_address, card->membase + CMD); |
350 | while (CMD_BUSY(card)); | 329 | while (CMD_BUSY(card)) ; |
351 | data = readl(card->membase + DR0); | 330 | data = readl(card->membase + DR0); |
352 | spin_unlock_irqrestore(&card->res_lock, flags); | 331 | spin_unlock_irqrestore(&card->res_lock, flags); |
353 | return data; | 332 | return data; |
354 | } | 333 | } |
355 | 334 | ||
356 | 335 | static void ns_write_sram(ns_dev * card, u32 sram_address, u32 * value, | |
357 | 336 | int count) | |
358 | static void ns_write_sram(ns_dev *card, u32 sram_address, u32 *value, int count) | ||
359 | { | 337 | { |
360 | unsigned long flags; | 338 | unsigned long flags; |
361 | int i, c; | 339 | int i, c; |
362 | count--; /* count range now is 0..3 instead of 1..4 */ | 340 | count--; /* count range now is 0..3 instead of 1..4 */ |
363 | c = count; | 341 | c = count; |
364 | c <<= 2; /* to use increments of 4 */ | 342 | c <<= 2; /* to use increments of 4 */ |
365 | spin_lock_irqsave(&card->res_lock, flags); | 343 | spin_lock_irqsave(&card->res_lock, flags); |
366 | while (CMD_BUSY(card)); | 344 | while (CMD_BUSY(card)) ; |
367 | for (i = 0; i <= c; i += 4) | 345 | for (i = 0; i <= c; i += 4) |
368 | writel(*(value++), card->membase + i); | 346 | writel(*(value++), card->membase + i); |
369 | /* Note: DR# registers are the first 4 dwords in nicstar's memspace, | 347 | /* Note: DR# registers are the first 4 dwords in nicstar's memspace, |
370 | so card->membase + DR0 == card->membase */ | 348 | so card->membase + DR0 == card->membase */ |
371 | sram_address <<= 2; | 349 | sram_address <<= 2; |
372 | sram_address &= 0x0007FFFC; | 350 | sram_address &= 0x0007FFFC; |
373 | sram_address |= (0x40000000 | count); | 351 | sram_address |= (0x40000000 | count); |
374 | writel(sram_address, card->membase + CMD); | 352 | writel(sram_address, card->membase + CMD); |
375 | spin_unlock_irqrestore(&card->res_lock, flags); | 353 | spin_unlock_irqrestore(&card->res_lock, flags); |
376 | } | 354 | } |
377 | 355 | ||
378 | |||
379 | static int __devinit ns_init_card(int i, struct pci_dev *pcidev) | 356 | static int __devinit ns_init_card(int i, struct pci_dev *pcidev) |
380 | { | 357 | { |
381 | int j; | 358 | int j; |
382 | struct ns_dev *card = NULL; | 359 | struct ns_dev *card = NULL; |
383 | unsigned char pci_latency; | 360 | unsigned char pci_latency; |
384 | unsigned error; | 361 | unsigned error; |
385 | u32 data; | 362 | u32 data; |
386 | u32 u32d[4]; | 363 | u32 u32d[4]; |
387 | u32 ns_cfg_rctsize; | 364 | u32 ns_cfg_rctsize; |
388 | int bcount; | 365 | int bcount; |
389 | unsigned long membase; | 366 | unsigned long membase; |
390 | 367 | ||
391 | error = 0; | 368 | error = 0; |
392 | 369 | ||
393 | if (pci_enable_device(pcidev)) | 370 | if (pci_enable_device(pcidev)) { |
394 | { | 371 | printk("nicstar%d: can't enable PCI device\n", i); |
395 | printk("nicstar%d: can't enable PCI device\n", i); | 372 | error = 2; |
396 | error = 2; | 373 | ns_init_card_error(card, error); |
397 | ns_init_card_error(card, error); | 374 | return error; |
398 | return error; | 375 | } |
399 | } | 376 | if ((pci_set_dma_mask(pcidev, DMA_BIT_MASK(32)) != 0) || |
400 | 377 | (pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(32)) != 0)) { | |
401 | if ((card = kmalloc(sizeof(ns_dev), GFP_KERNEL)) == NULL) | 378 | printk(KERN_WARNING |
402 | { | 379 | "nicstar%d: No suitable DMA available.\n", i); |
403 | printk("nicstar%d: can't allocate memory for device structure.\n", i); | 380 | error = 2; |
404 | error = 2; | 381 | ns_init_card_error(card, error); |
405 | ns_init_card_error(card, error); | 382 | return error; |
406 | return error; | 383 | } |
407 | } | 384 | |
408 | cards[i] = card; | 385 | if ((card = kmalloc(sizeof(ns_dev), GFP_KERNEL)) == NULL) { |
409 | spin_lock_init(&card->int_lock); | 386 | printk |
410 | spin_lock_init(&card->res_lock); | 387 | ("nicstar%d: can't allocate memory for device structure.\n", |
411 | 388 | i); | |
412 | pci_set_drvdata(pcidev, card); | 389 | error = 2; |
413 | 390 | ns_init_card_error(card, error); | |
414 | card->index = i; | 391 | return error; |
415 | card->atmdev = NULL; | 392 | } |
416 | card->pcidev = pcidev; | 393 | cards[i] = card; |
417 | membase = pci_resource_start(pcidev, 1); | 394 | spin_lock_init(&card->int_lock); |
418 | card->membase = ioremap(membase, NS_IOREMAP_SIZE); | 395 | spin_lock_init(&card->res_lock); |
419 | if (!card->membase) | 396 | |
420 | { | 397 | pci_set_drvdata(pcidev, card); |
421 | printk("nicstar%d: can't ioremap() membase.\n",i); | 398 | |
422 | error = 3; | 399 | card->index = i; |
423 | ns_init_card_error(card, error); | 400 | card->atmdev = NULL; |
424 | return error; | 401 | card->pcidev = pcidev; |
425 | } | 402 | membase = pci_resource_start(pcidev, 1); |
426 | PRINTK("nicstar%d: membase at 0x%x.\n", i, card->membase); | 403 | card->membase = ioremap(membase, NS_IOREMAP_SIZE); |
427 | 404 | if (!card->membase) { | |
428 | pci_set_master(pcidev); | 405 | printk("nicstar%d: can't ioremap() membase.\n", i); |
429 | 406 | error = 3; | |
430 | if (pci_read_config_byte(pcidev, PCI_LATENCY_TIMER, &pci_latency) != 0) | 407 | ns_init_card_error(card, error); |
431 | { | 408 | return error; |
432 | printk("nicstar%d: can't read PCI latency timer.\n", i); | 409 | } |
433 | error = 6; | 410 | PRINTK("nicstar%d: membase at 0x%p.\n", i, card->membase); |
434 | ns_init_card_error(card, error); | 411 | |
435 | return error; | 412 | pci_set_master(pcidev); |
436 | } | 413 | |
414 | if (pci_read_config_byte(pcidev, PCI_LATENCY_TIMER, &pci_latency) != 0) { | ||
415 | printk("nicstar%d: can't read PCI latency timer.\n", i); | ||
416 | error = 6; | ||
417 | ns_init_card_error(card, error); | ||
418 | return error; | ||
419 | } | ||
437 | #ifdef NS_PCI_LATENCY | 420 | #ifdef NS_PCI_LATENCY |
438 | if (pci_latency < NS_PCI_LATENCY) | 421 | if (pci_latency < NS_PCI_LATENCY) { |
439 | { | 422 | PRINTK("nicstar%d: setting PCI latency timer to %d.\n", i, |
440 | PRINTK("nicstar%d: setting PCI latency timer to %d.\n", i, NS_PCI_LATENCY); | 423 | NS_PCI_LATENCY); |
441 | for (j = 1; j < 4; j++) | 424 | for (j = 1; j < 4; j++) { |
442 | { | 425 | if (pci_write_config_byte |
443 | if (pci_write_config_byte(pcidev, PCI_LATENCY_TIMER, NS_PCI_LATENCY) != 0) | 426 | (pcidev, PCI_LATENCY_TIMER, NS_PCI_LATENCY) != 0) |
444 | break; | 427 | break; |
445 | } | 428 | } |
446 | if (j == 4) | 429 | if (j == 4) { |
447 | { | 430 | printk |
448 | printk("nicstar%d: can't set PCI latency timer to %d.\n", i, NS_PCI_LATENCY); | 431 | ("nicstar%d: can't set PCI latency timer to %d.\n", |
449 | error = 7; | 432 | i, NS_PCI_LATENCY); |
450 | ns_init_card_error(card, error); | 433 | error = 7; |
451 | return error; | 434 | ns_init_card_error(card, error); |
452 | } | 435 | return error; |
453 | } | 436 | } |
437 | } | ||
454 | #endif /* NS_PCI_LATENCY */ | 438 | #endif /* NS_PCI_LATENCY */ |
455 | 439 | ||
456 | /* Clear timer overflow */ | 440 | /* Clear timer overflow */ |
457 | data = readl(card->membase + STAT); | 441 | data = readl(card->membase + STAT); |
458 | if (data & NS_STAT_TMROF) | 442 | if (data & NS_STAT_TMROF) |
459 | writel(NS_STAT_TMROF, card->membase + STAT); | 443 | writel(NS_STAT_TMROF, card->membase + STAT); |
460 | 444 | ||
461 | /* Software reset */ | 445 | /* Software reset */ |
462 | writel(NS_CFG_SWRST, card->membase + CFG); | 446 | writel(NS_CFG_SWRST, card->membase + CFG); |
463 | NS_DELAY; | 447 | NS_DELAY; |
464 | writel(0x00000000, card->membase + CFG); | 448 | writel(0x00000000, card->membase + CFG); |
465 | 449 | ||
466 | /* PHY reset */ | 450 | /* PHY reset */ |
467 | writel(0x00000008, card->membase + GP); | 451 | writel(0x00000008, card->membase + GP); |
468 | NS_DELAY; | 452 | NS_DELAY; |
469 | writel(0x00000001, card->membase + GP); | 453 | writel(0x00000001, card->membase + GP); |
470 | NS_DELAY; | 454 | NS_DELAY; |
471 | while (CMD_BUSY(card)); | 455 | while (CMD_BUSY(card)) ; |
472 | writel(NS_CMD_WRITE_UTILITY | 0x00000100, card->membase + CMD); /* Sync UTOPIA with SAR clock */ | 456 | writel(NS_CMD_WRITE_UTILITY | 0x00000100, card->membase + CMD); /* Sync UTOPIA with SAR clock */ |
473 | NS_DELAY; | 457 | NS_DELAY; |
474 | 458 | ||
475 | /* Detect PHY type */ | 459 | /* Detect PHY type */ |
476 | while (CMD_BUSY(card)); | 460 | while (CMD_BUSY(card)) ; |
477 | writel(NS_CMD_READ_UTILITY | 0x00000200, card->membase + CMD); | 461 | writel(NS_CMD_READ_UTILITY | 0x00000200, card->membase + CMD); |
478 | while (CMD_BUSY(card)); | 462 | while (CMD_BUSY(card)) ; |
479 | data = readl(card->membase + DR0); | 463 | data = readl(card->membase + DR0); |
480 | switch(data) { | 464 | switch (data) { |
481 | case 0x00000009: | 465 | case 0x00000009: |
482 | printk("nicstar%d: PHY seems to be 25 Mbps.\n", i); | 466 | printk("nicstar%d: PHY seems to be 25 Mbps.\n", i); |
483 | card->max_pcr = ATM_25_PCR; | 467 | card->max_pcr = ATM_25_PCR; |
484 | while(CMD_BUSY(card)); | 468 | while (CMD_BUSY(card)) ; |
485 | writel(0x00000008, card->membase + DR0); | 469 | writel(0x00000008, card->membase + DR0); |
486 | writel(NS_CMD_WRITE_UTILITY | 0x00000200, card->membase + CMD); | 470 | writel(NS_CMD_WRITE_UTILITY | 0x00000200, card->membase + CMD); |
487 | /* Clear an eventual pending interrupt */ | 471 | /* Clear an eventual pending interrupt */ |
488 | writel(NS_STAT_SFBQF, card->membase + STAT); | 472 | writel(NS_STAT_SFBQF, card->membase + STAT); |
489 | #ifdef PHY_LOOPBACK | 473 | #ifdef PHY_LOOPBACK |
490 | while(CMD_BUSY(card)); | 474 | while (CMD_BUSY(card)) ; |
491 | writel(0x00000022, card->membase + DR0); | 475 | writel(0x00000022, card->membase + DR0); |
492 | writel(NS_CMD_WRITE_UTILITY | 0x00000202, card->membase + CMD); | 476 | writel(NS_CMD_WRITE_UTILITY | 0x00000202, card->membase + CMD); |
493 | #endif /* PHY_LOOPBACK */ | 477 | #endif /* PHY_LOOPBACK */ |
494 | break; | 478 | break; |
495 | case 0x00000030: | 479 | case 0x00000030: |
496 | case 0x00000031: | 480 | case 0x00000031: |
497 | printk("nicstar%d: PHY seems to be 155 Mbps.\n", i); | 481 | printk("nicstar%d: PHY seems to be 155 Mbps.\n", i); |
498 | card->max_pcr = ATM_OC3_PCR; | 482 | card->max_pcr = ATM_OC3_PCR; |
499 | #ifdef PHY_LOOPBACK | 483 | #ifdef PHY_LOOPBACK |
500 | while(CMD_BUSY(card)); | 484 | while (CMD_BUSY(card)) ; |
501 | writel(0x00000002, card->membase + DR0); | 485 | writel(0x00000002, card->membase + DR0); |
502 | writel(NS_CMD_WRITE_UTILITY | 0x00000205, card->membase + CMD); | 486 | writel(NS_CMD_WRITE_UTILITY | 0x00000205, card->membase + CMD); |
503 | #endif /* PHY_LOOPBACK */ | 487 | #endif /* PHY_LOOPBACK */ |
504 | break; | 488 | break; |
505 | default: | 489 | default: |
506 | printk("nicstar%d: unknown PHY type (0x%08X).\n", i, data); | 490 | printk("nicstar%d: unknown PHY type (0x%08X).\n", i, data); |
507 | error = 8; | 491 | error = 8; |
508 | ns_init_card_error(card, error); | 492 | ns_init_card_error(card, error); |
509 | return error; | 493 | return error; |
510 | } | 494 | } |
511 | writel(0x00000000, card->membase + GP); | 495 | writel(0x00000000, card->membase + GP); |
512 | 496 | ||
513 | /* Determine SRAM size */ | 497 | /* Determine SRAM size */ |
514 | data = 0x76543210; | 498 | data = 0x76543210; |
515 | ns_write_sram(card, 0x1C003, &data, 1); | 499 | ns_write_sram(card, 0x1C003, &data, 1); |
516 | data = 0x89ABCDEF; | 500 | data = 0x89ABCDEF; |
517 | ns_write_sram(card, 0x14003, &data, 1); | 501 | ns_write_sram(card, 0x14003, &data, 1); |
518 | if (ns_read_sram(card, 0x14003) == 0x89ABCDEF && | 502 | if (ns_read_sram(card, 0x14003) == 0x89ABCDEF && |
519 | ns_read_sram(card, 0x1C003) == 0x76543210) | 503 | ns_read_sram(card, 0x1C003) == 0x76543210) |
520 | card->sram_size = 128; | 504 | card->sram_size = 128; |
521 | else | 505 | else |
522 | card->sram_size = 32; | 506 | card->sram_size = 32; |
523 | PRINTK("nicstar%d: %dK x 32bit SRAM size.\n", i, card->sram_size); | 507 | PRINTK("nicstar%d: %dK x 32bit SRAM size.\n", i, card->sram_size); |
524 | 508 | ||
525 | card->rct_size = NS_MAX_RCTSIZE; | 509 | card->rct_size = NS_MAX_RCTSIZE; |
526 | 510 | ||
527 | #if (NS_MAX_RCTSIZE == 4096) | 511 | #if (NS_MAX_RCTSIZE == 4096) |
528 | if (card->sram_size == 128) | 512 | if (card->sram_size == 128) |
529 | printk("nicstar%d: limiting maximum VCI. See NS_MAX_RCTSIZE in nicstar.h\n", i); | 513 | printk |
514 | ("nicstar%d: limiting maximum VCI. See NS_MAX_RCTSIZE in nicstar.h\n", | ||
515 | i); | ||
530 | #elif (NS_MAX_RCTSIZE == 16384) | 516 | #elif (NS_MAX_RCTSIZE == 16384) |
531 | if (card->sram_size == 32) | 517 | if (card->sram_size == 32) { |
532 | { | 518 | printk |
533 | printk("nicstar%d: wasting memory. See NS_MAX_RCTSIZE in nicstar.h\n", i); | 519 | ("nicstar%d: wasting memory. See NS_MAX_RCTSIZE in nicstar.h\n", |
534 | card->rct_size = 4096; | 520 | i); |
535 | } | 521 | card->rct_size = 4096; |
522 | } | ||
536 | #else | 523 | #else |
537 | #error NS_MAX_RCTSIZE must be either 4096 or 16384 in nicstar.c | 524 | #error NS_MAX_RCTSIZE must be either 4096 or 16384 in nicstar.c |
538 | #endif | 525 | #endif |
539 | 526 | ||
540 | card->vpibits = NS_VPIBITS; | 527 | card->vpibits = NS_VPIBITS; |
541 | if (card->rct_size == 4096) | 528 | if (card->rct_size == 4096) |
542 | card->vcibits = 12 - NS_VPIBITS; | 529 | card->vcibits = 12 - NS_VPIBITS; |
543 | else /* card->rct_size == 16384 */ | 530 | else /* card->rct_size == 16384 */ |
544 | card->vcibits = 14 - NS_VPIBITS; | 531 | card->vcibits = 14 - NS_VPIBITS; |
545 | 532 | ||
546 | /* Initialize the nicstar eeprom/eprom stuff, for the MAC addr */ | 533 | /* Initialize the nicstar eeprom/eprom stuff, for the MAC addr */ |
547 | if (mac[i] == NULL) | 534 | if (mac[i] == NULL) |
548 | nicstar_init_eprom(card->membase); | 535 | nicstar_init_eprom(card->membase); |
549 | 536 | ||
550 | /* Set the VPI/VCI MSb mask to zero so we can receive OAM cells */ | 537 | /* Set the VPI/VCI MSb mask to zero so we can receive OAM cells */ |
551 | writel(0x00000000, card->membase + VPM); | 538 | writel(0x00000000, card->membase + VPM); |
552 | 539 | ||
553 | /* Initialize TSQ */ | 540 | /* Initialize TSQ */ |
554 | card->tsq.org = kmalloc(NS_TSQSIZE + NS_TSQ_ALIGNMENT, GFP_KERNEL); | 541 | card->tsq.org = pci_alloc_consistent(card->pcidev, |
555 | if (card->tsq.org == NULL) | 542 | NS_TSQSIZE + NS_TSQ_ALIGNMENT, |
556 | { | 543 | &card->tsq.dma); |
557 | printk("nicstar%d: can't allocate TSQ.\n", i); | 544 | if (card->tsq.org == NULL) { |
558 | error = 10; | 545 | printk("nicstar%d: can't allocate TSQ.\n", i); |
559 | ns_init_card_error(card, error); | 546 | error = 10; |
560 | return error; | 547 | ns_init_card_error(card, error); |
561 | } | 548 | return error; |
562 | card->tsq.base = (ns_tsi *) ALIGN_ADDRESS(card->tsq.org, NS_TSQ_ALIGNMENT); | 549 | } |
563 | card->tsq.next = card->tsq.base; | 550 | card->tsq.base = PTR_ALIGN(card->tsq.org, NS_TSQ_ALIGNMENT); |
564 | card->tsq.last = card->tsq.base + (NS_TSQ_NUM_ENTRIES - 1); | 551 | card->tsq.next = card->tsq.base; |
565 | for (j = 0; j < NS_TSQ_NUM_ENTRIES; j++) | 552 | card->tsq.last = card->tsq.base + (NS_TSQ_NUM_ENTRIES - 1); |
566 | ns_tsi_init(card->tsq.base + j); | 553 | for (j = 0; j < NS_TSQ_NUM_ENTRIES; j++) |
567 | writel(0x00000000, card->membase + TSQH); | 554 | ns_tsi_init(card->tsq.base + j); |
568 | writel((u32) virt_to_bus(card->tsq.base), card->membase + TSQB); | 555 | writel(0x00000000, card->membase + TSQH); |
569 | PRINTK("nicstar%d: TSQ base at 0x%x 0x%x 0x%x.\n", i, (u32) card->tsq.base, | 556 | writel(ALIGN(card->tsq.dma, NS_TSQ_ALIGNMENT), card->membase + TSQB); |
570 | (u32) virt_to_bus(card->tsq.base), readl(card->membase + TSQB)); | 557 | PRINTK("nicstar%d: TSQ base at 0x%p.\n", i, card->tsq.base); |
571 | 558 | ||
572 | /* Initialize RSQ */ | 559 | /* Initialize RSQ */ |
573 | card->rsq.org = kmalloc(NS_RSQSIZE + NS_RSQ_ALIGNMENT, GFP_KERNEL); | 560 | card->rsq.org = pci_alloc_consistent(card->pcidev, |
574 | if (card->rsq.org == NULL) | 561 | NS_RSQSIZE + NS_RSQ_ALIGNMENT, |
575 | { | 562 | &card->rsq.dma); |
576 | printk("nicstar%d: can't allocate RSQ.\n", i); | 563 | if (card->rsq.org == NULL) { |
577 | error = 11; | 564 | printk("nicstar%d: can't allocate RSQ.\n", i); |
578 | ns_init_card_error(card, error); | 565 | error = 11; |
579 | return error; | 566 | ns_init_card_error(card, error); |
580 | } | 567 | return error; |
581 | card->rsq.base = (ns_rsqe *) ALIGN_ADDRESS(card->rsq.org, NS_RSQ_ALIGNMENT); | 568 | } |
582 | card->rsq.next = card->rsq.base; | 569 | card->rsq.base = PTR_ALIGN(card->rsq.org, NS_RSQ_ALIGNMENT); |
583 | card->rsq.last = card->rsq.base + (NS_RSQ_NUM_ENTRIES - 1); | 570 | card->rsq.next = card->rsq.base; |
584 | for (j = 0; j < NS_RSQ_NUM_ENTRIES; j++) | 571 | card->rsq.last = card->rsq.base + (NS_RSQ_NUM_ENTRIES - 1); |
585 | ns_rsqe_init(card->rsq.base + j); | 572 | for (j = 0; j < NS_RSQ_NUM_ENTRIES; j++) |
586 | writel(0x00000000, card->membase + RSQH); | 573 | ns_rsqe_init(card->rsq.base + j); |
587 | writel((u32) virt_to_bus(card->rsq.base), card->membase + RSQB); | 574 | writel(0x00000000, card->membase + RSQH); |
588 | PRINTK("nicstar%d: RSQ base at 0x%x.\n", i, (u32) card->rsq.base); | 575 | writel(ALIGN(card->rsq.dma, NS_RSQ_ALIGNMENT), card->membase + RSQB); |
589 | 576 | PRINTK("nicstar%d: RSQ base at 0x%p.\n", i, card->rsq.base); | |
590 | /* Initialize SCQ0, the only VBR SCQ used */ | 577 | |
591 | card->scq1 = NULL; | 578 | /* Initialize SCQ0, the only VBR SCQ used */ |
592 | card->scq2 = NULL; | 579 | card->scq1 = NULL; |
593 | card->scq0 = get_scq(VBR_SCQSIZE, NS_VRSCD0); | 580 | card->scq2 = NULL; |
594 | if (card->scq0 == NULL) | 581 | card->scq0 = get_scq(card, VBR_SCQSIZE, NS_VRSCD0); |
595 | { | 582 | if (card->scq0 == NULL) { |
596 | printk("nicstar%d: can't get SCQ0.\n", i); | 583 | printk("nicstar%d: can't get SCQ0.\n", i); |
597 | error = 12; | 584 | error = 12; |
598 | ns_init_card_error(card, error); | 585 | ns_init_card_error(card, error); |
599 | return error; | 586 | return error; |
600 | } | 587 | } |
601 | u32d[0] = (u32) virt_to_bus(card->scq0->base); | 588 | u32d[0] = scq_virt_to_bus(card->scq0, card->scq0->base); |
602 | u32d[1] = (u32) 0x00000000; | 589 | u32d[1] = (u32) 0x00000000; |
603 | u32d[2] = (u32) 0xffffffff; | 590 | u32d[2] = (u32) 0xffffffff; |
604 | u32d[3] = (u32) 0x00000000; | 591 | u32d[3] = (u32) 0x00000000; |
605 | ns_write_sram(card, NS_VRSCD0, u32d, 4); | 592 | ns_write_sram(card, NS_VRSCD0, u32d, 4); |
606 | ns_write_sram(card, NS_VRSCD1, u32d, 4); /* These last two won't be used */ | 593 | ns_write_sram(card, NS_VRSCD1, u32d, 4); /* These last two won't be used */ |
607 | ns_write_sram(card, NS_VRSCD2, u32d, 4); /* but are initialized, just in case... */ | 594 | ns_write_sram(card, NS_VRSCD2, u32d, 4); /* but are initialized, just in case... */ |
608 | card->scq0->scd = NS_VRSCD0; | 595 | card->scq0->scd = NS_VRSCD0; |
609 | PRINTK("nicstar%d: VBR-SCQ0 base at 0x%x.\n", i, (u32) card->scq0->base); | 596 | PRINTK("nicstar%d: VBR-SCQ0 base at 0x%p.\n", i, card->scq0->base); |
610 | 597 | ||
611 | /* Initialize TSTs */ | 598 | /* Initialize TSTs */ |
612 | card->tst_addr = NS_TST0; | 599 | card->tst_addr = NS_TST0; |
613 | card->tst_free_entries = NS_TST_NUM_ENTRIES; | 600 | card->tst_free_entries = NS_TST_NUM_ENTRIES; |
614 | data = NS_TST_OPCODE_VARIABLE; | 601 | data = NS_TST_OPCODE_VARIABLE; |
615 | for (j = 0; j < NS_TST_NUM_ENTRIES; j++) | 602 | for (j = 0; j < NS_TST_NUM_ENTRIES; j++) |
616 | ns_write_sram(card, NS_TST0 + j, &data, 1); | 603 | ns_write_sram(card, NS_TST0 + j, &data, 1); |
617 | data = ns_tste_make(NS_TST_OPCODE_END, NS_TST0); | 604 | data = ns_tste_make(NS_TST_OPCODE_END, NS_TST0); |
618 | ns_write_sram(card, NS_TST0 + NS_TST_NUM_ENTRIES, &data, 1); | 605 | ns_write_sram(card, NS_TST0 + NS_TST_NUM_ENTRIES, &data, 1); |
619 | for (j = 0; j < NS_TST_NUM_ENTRIES; j++) | 606 | for (j = 0; j < NS_TST_NUM_ENTRIES; j++) |
620 | ns_write_sram(card, NS_TST1 + j, &data, 1); | 607 | ns_write_sram(card, NS_TST1 + j, &data, 1); |
621 | data = ns_tste_make(NS_TST_OPCODE_END, NS_TST1); | 608 | data = ns_tste_make(NS_TST_OPCODE_END, NS_TST1); |
622 | ns_write_sram(card, NS_TST1 + NS_TST_NUM_ENTRIES, &data, 1); | 609 | ns_write_sram(card, NS_TST1 + NS_TST_NUM_ENTRIES, &data, 1); |
623 | for (j = 0; j < NS_TST_NUM_ENTRIES; j++) | 610 | for (j = 0; j < NS_TST_NUM_ENTRIES; j++) |
624 | card->tste2vc[j] = NULL; | 611 | card->tste2vc[j] = NULL; |
625 | writel(NS_TST0 << 2, card->membase + TSTB); | 612 | writel(NS_TST0 << 2, card->membase + TSTB); |
626 | 613 | ||
627 | 614 | /* Initialize RCT. AAL type is set on opening the VC. */ | |
628 | /* Initialize RCT. AAL type is set on opening the VC. */ | ||
629 | #ifdef RCQ_SUPPORT | 615 | #ifdef RCQ_SUPPORT |
630 | u32d[0] = NS_RCTE_RAWCELLINTEN; | 616 | u32d[0] = NS_RCTE_RAWCELLINTEN; |
631 | #else | 617 | #else |
632 | u32d[0] = 0x00000000; | 618 | u32d[0] = 0x00000000; |
633 | #endif /* RCQ_SUPPORT */ | 619 | #endif /* RCQ_SUPPORT */ |
634 | u32d[1] = 0x00000000; | 620 | u32d[1] = 0x00000000; |
635 | u32d[2] = 0x00000000; | 621 | u32d[2] = 0x00000000; |
636 | u32d[3] = 0xFFFFFFFF; | 622 | u32d[3] = 0xFFFFFFFF; |
637 | for (j = 0; j < card->rct_size; j++) | 623 | for (j = 0; j < card->rct_size; j++) |
638 | ns_write_sram(card, j * 4, u32d, 4); | 624 | ns_write_sram(card, j * 4, u32d, 4); |
639 | 625 | ||
640 | memset(card->vcmap, 0, NS_MAX_RCTSIZE * sizeof(vc_map)); | 626 | memset(card->vcmap, 0, NS_MAX_RCTSIZE * sizeof(vc_map)); |
641 | 627 | ||
642 | for (j = 0; j < NS_FRSCD_NUM; j++) | 628 | for (j = 0; j < NS_FRSCD_NUM; j++) |
643 | card->scd2vc[j] = NULL; | 629 | card->scd2vc[j] = NULL; |
644 | 630 | ||
645 | /* Initialize buffer levels */ | 631 | /* Initialize buffer levels */ |
646 | card->sbnr.min = MIN_SB; | 632 | card->sbnr.min = MIN_SB; |
647 | card->sbnr.init = NUM_SB; | 633 | card->sbnr.init = NUM_SB; |
648 | card->sbnr.max = MAX_SB; | 634 | card->sbnr.max = MAX_SB; |
649 | card->lbnr.min = MIN_LB; | 635 | card->lbnr.min = MIN_LB; |
650 | card->lbnr.init = NUM_LB; | 636 | card->lbnr.init = NUM_LB; |
651 | card->lbnr.max = MAX_LB; | 637 | card->lbnr.max = MAX_LB; |
652 | card->iovnr.min = MIN_IOVB; | 638 | card->iovnr.min = MIN_IOVB; |
653 | card->iovnr.init = NUM_IOVB; | 639 | card->iovnr.init = NUM_IOVB; |
654 | card->iovnr.max = MAX_IOVB; | 640 | card->iovnr.max = MAX_IOVB; |
655 | card->hbnr.min = MIN_HB; | 641 | card->hbnr.min = MIN_HB; |
656 | card->hbnr.init = NUM_HB; | 642 | card->hbnr.init = NUM_HB; |
657 | card->hbnr.max = MAX_HB; | 643 | card->hbnr.max = MAX_HB; |
658 | 644 | ||
659 | card->sm_handle = 0x00000000; | 645 | card->sm_handle = 0x00000000; |
660 | card->sm_addr = 0x00000000; | 646 | card->sm_addr = 0x00000000; |
661 | card->lg_handle = 0x00000000; | 647 | card->lg_handle = 0x00000000; |
662 | card->lg_addr = 0x00000000; | 648 | card->lg_addr = 0x00000000; |
663 | 649 | ||
664 | card->efbie = 1; /* To prevent push_rxbufs from enabling the interrupt */ | 650 | card->efbie = 1; /* To prevent push_rxbufs from enabling the interrupt */ |
665 | 651 | ||
666 | /* Pre-allocate some huge buffers */ | 652 | idr_init(&card->idr); |
667 | skb_queue_head_init(&card->hbpool.queue); | 653 | |
668 | card->hbpool.count = 0; | 654 | /* Pre-allocate some huge buffers */ |
669 | for (j = 0; j < NUM_HB; j++) | 655 | skb_queue_head_init(&card->hbpool.queue); |
670 | { | 656 | card->hbpool.count = 0; |
671 | struct sk_buff *hb; | 657 | for (j = 0; j < NUM_HB; j++) { |
672 | hb = __dev_alloc_skb(NS_HBUFSIZE, GFP_KERNEL); | 658 | struct sk_buff *hb; |
673 | if (hb == NULL) | 659 | hb = __dev_alloc_skb(NS_HBUFSIZE, GFP_KERNEL); |
674 | { | 660 | if (hb == NULL) { |
675 | printk("nicstar%d: can't allocate %dth of %d huge buffers.\n", | 661 | printk |
676 | i, j, NUM_HB); | 662 | ("nicstar%d: can't allocate %dth of %d huge buffers.\n", |
677 | error = 13; | 663 | i, j, NUM_HB); |
678 | ns_init_card_error(card, error); | 664 | error = 13; |
679 | return error; | 665 | ns_init_card_error(card, error); |
680 | } | 666 | return error; |
681 | NS_SKB_CB(hb)->buf_type = BUF_NONE; | 667 | } |
682 | skb_queue_tail(&card->hbpool.queue, hb); | 668 | NS_PRV_BUFTYPE(hb) = BUF_NONE; |
683 | card->hbpool.count++; | 669 | skb_queue_tail(&card->hbpool.queue, hb); |
684 | } | 670 | card->hbpool.count++; |
685 | 671 | } | |
686 | 672 | ||
687 | /* Allocate large buffers */ | 673 | /* Allocate large buffers */ |
688 | skb_queue_head_init(&card->lbpool.queue); | 674 | skb_queue_head_init(&card->lbpool.queue); |
689 | card->lbpool.count = 0; /* Not used */ | 675 | card->lbpool.count = 0; /* Not used */ |
690 | for (j = 0; j < NUM_LB; j++) | 676 | for (j = 0; j < NUM_LB; j++) { |
691 | { | 677 | struct sk_buff *lb; |
692 | struct sk_buff *lb; | 678 | lb = __dev_alloc_skb(NS_LGSKBSIZE, GFP_KERNEL); |
693 | lb = __dev_alloc_skb(NS_LGSKBSIZE, GFP_KERNEL); | 679 | if (lb == NULL) { |
694 | if (lb == NULL) | 680 | printk |
695 | { | 681 | ("nicstar%d: can't allocate %dth of %d large buffers.\n", |
696 | printk("nicstar%d: can't allocate %dth of %d large buffers.\n", | 682 | i, j, NUM_LB); |
697 | i, j, NUM_LB); | 683 | error = 14; |
698 | error = 14; | 684 | ns_init_card_error(card, error); |
699 | ns_init_card_error(card, error); | 685 | return error; |
700 | return error; | 686 | } |
701 | } | 687 | NS_PRV_BUFTYPE(lb) = BUF_LG; |
702 | NS_SKB_CB(lb)->buf_type = BUF_LG; | 688 | skb_queue_tail(&card->lbpool.queue, lb); |
703 | skb_queue_tail(&card->lbpool.queue, lb); | 689 | skb_reserve(lb, NS_SMBUFSIZE); |
704 | skb_reserve(lb, NS_SMBUFSIZE); | 690 | push_rxbufs(card, lb); |
705 | push_rxbufs(card, lb); | 691 | /* Due to the implementation of push_rxbufs() this is 1, not 0 */ |
706 | /* Due to the implementation of push_rxbufs() this is 1, not 0 */ | 692 | if (j == 1) { |
707 | if (j == 1) | 693 | card->rcbuf = lb; |
708 | { | 694 | card->rawcell = (struct ns_rcqe *) lb->data; |
709 | card->rcbuf = lb; | 695 | card->rawch = NS_PRV_DMA(lb); |
710 | card->rawch = (u32) virt_to_bus(lb->data); | 696 | } |
711 | } | 697 | } |
712 | } | 698 | /* Test for strange behaviour which leads to crashes */ |
713 | /* Test for strange behaviour which leads to crashes */ | 699 | if ((bcount = |
714 | if ((bcount = ns_stat_lfbqc_get(readl(card->membase + STAT))) < card->lbnr.min) | 700 | ns_stat_lfbqc_get(readl(card->membase + STAT))) < card->lbnr.min) { |
715 | { | 701 | printk |
716 | printk("nicstar%d: Strange... Just allocated %d large buffers and lfbqc = %d.\n", | 702 | ("nicstar%d: Strange... Just allocated %d large buffers and lfbqc = %d.\n", |
717 | i, j, bcount); | 703 | i, j, bcount); |
718 | error = 14; | 704 | error = 14; |
719 | ns_init_card_error(card, error); | 705 | ns_init_card_error(card, error); |
720 | return error; | 706 | return error; |
721 | } | 707 | } |
722 | 708 | ||
723 | 709 | /* Allocate small buffers */ | |
724 | /* Allocate small buffers */ | 710 | skb_queue_head_init(&card->sbpool.queue); |
725 | skb_queue_head_init(&card->sbpool.queue); | 711 | card->sbpool.count = 0; /* Not used */ |
726 | card->sbpool.count = 0; /* Not used */ | 712 | for (j = 0; j < NUM_SB; j++) { |
727 | for (j = 0; j < NUM_SB; j++) | 713 | struct sk_buff *sb; |
728 | { | 714 | sb = __dev_alloc_skb(NS_SMSKBSIZE, GFP_KERNEL); |
729 | struct sk_buff *sb; | 715 | if (sb == NULL) { |
730 | sb = __dev_alloc_skb(NS_SMSKBSIZE, GFP_KERNEL); | 716 | printk |
731 | if (sb == NULL) | 717 | ("nicstar%d: can't allocate %dth of %d small buffers.\n", |
732 | { | 718 | i, j, NUM_SB); |
733 | printk("nicstar%d: can't allocate %dth of %d small buffers.\n", | 719 | error = 15; |
734 | i, j, NUM_SB); | 720 | ns_init_card_error(card, error); |
735 | error = 15; | 721 | return error; |
736 | ns_init_card_error(card, error); | 722 | } |
737 | return error; | 723 | NS_PRV_BUFTYPE(sb) = BUF_SM; |
738 | } | 724 | skb_queue_tail(&card->sbpool.queue, sb); |
739 | NS_SKB_CB(sb)->buf_type = BUF_SM; | 725 | skb_reserve(sb, NS_AAL0_HEADER); |
740 | skb_queue_tail(&card->sbpool.queue, sb); | 726 | push_rxbufs(card, sb); |
741 | skb_reserve(sb, NS_AAL0_HEADER); | 727 | } |
742 | push_rxbufs(card, sb); | 728 | /* Test for strange behaviour which leads to crashes */ |
743 | } | 729 | if ((bcount = |
744 | /* Test for strange behaviour which leads to crashes */ | 730 | ns_stat_sfbqc_get(readl(card->membase + STAT))) < card->sbnr.min) { |
745 | if ((bcount = ns_stat_sfbqc_get(readl(card->membase + STAT))) < card->sbnr.min) | 731 | printk |
746 | { | 732 | ("nicstar%d: Strange... Just allocated %d small buffers and sfbqc = %d.\n", |
747 | printk("nicstar%d: Strange... Just allocated %d small buffers and sfbqc = %d.\n", | 733 | i, j, bcount); |
748 | i, j, bcount); | 734 | error = 15; |
749 | error = 15; | 735 | ns_init_card_error(card, error); |
750 | ns_init_card_error(card, error); | 736 | return error; |
751 | return error; | 737 | } |
752 | } | 738 | |
753 | 739 | /* Allocate iovec buffers */ | |
754 | 740 | skb_queue_head_init(&card->iovpool.queue); | |
755 | /* Allocate iovec buffers */ | 741 | card->iovpool.count = 0; |
756 | skb_queue_head_init(&card->iovpool.queue); | 742 | for (j = 0; j < NUM_IOVB; j++) { |
757 | card->iovpool.count = 0; | 743 | struct sk_buff *iovb; |
758 | for (j = 0; j < NUM_IOVB; j++) | 744 | iovb = alloc_skb(NS_IOVBUFSIZE, GFP_KERNEL); |
759 | { | 745 | if (iovb == NULL) { |
760 | struct sk_buff *iovb; | 746 | printk |
761 | iovb = alloc_skb(NS_IOVBUFSIZE, GFP_KERNEL); | 747 | ("nicstar%d: can't allocate %dth of %d iovec buffers.\n", |
762 | if (iovb == NULL) | 748 | i, j, NUM_IOVB); |
763 | { | 749 | error = 16; |
764 | printk("nicstar%d: can't allocate %dth of %d iovec buffers.\n", | 750 | ns_init_card_error(card, error); |
765 | i, j, NUM_IOVB); | 751 | return error; |
766 | error = 16; | 752 | } |
767 | ns_init_card_error(card, error); | 753 | NS_PRV_BUFTYPE(iovb) = BUF_NONE; |
768 | return error; | 754 | skb_queue_tail(&card->iovpool.queue, iovb); |
769 | } | 755 | card->iovpool.count++; |
770 | NS_SKB_CB(iovb)->buf_type = BUF_NONE; | 756 | } |
771 | skb_queue_tail(&card->iovpool.queue, iovb); | 757 | |
772 | card->iovpool.count++; | 758 | /* Configure NICStAR */ |
773 | } | 759 | if (card->rct_size == 4096) |
774 | 760 | ns_cfg_rctsize = NS_CFG_RCTSIZE_4096_ENTRIES; | |
775 | /* Configure NICStAR */ | 761 | else /* (card->rct_size == 16384) */ |
776 | if (card->rct_size == 4096) | 762 | ns_cfg_rctsize = NS_CFG_RCTSIZE_16384_ENTRIES; |
777 | ns_cfg_rctsize = NS_CFG_RCTSIZE_4096_ENTRIES; | 763 | |
778 | else /* (card->rct_size == 16384) */ | 764 | card->efbie = 1; |
779 | ns_cfg_rctsize = NS_CFG_RCTSIZE_16384_ENTRIES; | 765 | |
780 | 766 | card->intcnt = 0; | |
781 | card->efbie = 1; | 767 | if (request_irq |
782 | 768 | (pcidev->irq, &ns_irq_handler, IRQF_DISABLED | IRQF_SHARED, | |
783 | card->intcnt = 0; | 769 | "nicstar", card) != 0) { |
784 | if (request_irq(pcidev->irq, &ns_irq_handler, IRQF_DISABLED | IRQF_SHARED, "nicstar", card) != 0) | 770 | printk("nicstar%d: can't allocate IRQ %d.\n", i, pcidev->irq); |
785 | { | 771 | error = 9; |
786 | printk("nicstar%d: can't allocate IRQ %d.\n", i, pcidev->irq); | 772 | ns_init_card_error(card, error); |
787 | error = 9; | 773 | return error; |
788 | ns_init_card_error(card, error); | 774 | } |
789 | return error; | 775 | |
790 | } | 776 | /* Register device */ |
791 | 777 | card->atmdev = atm_dev_register("nicstar", &atm_ops, -1, NULL); | |
792 | /* Register device */ | 778 | if (card->atmdev == NULL) { |
793 | card->atmdev = atm_dev_register("nicstar", &atm_ops, -1, NULL); | 779 | printk("nicstar%d: can't register device.\n", i); |
794 | if (card->atmdev == NULL) | 780 | error = 17; |
795 | { | 781 | ns_init_card_error(card, error); |
796 | printk("nicstar%d: can't register device.\n", i); | 782 | return error; |
797 | error = 17; | 783 | } |
798 | ns_init_card_error(card, error); | 784 | |
799 | return error; | 785 | if (ns_parse_mac(mac[i], card->atmdev->esi)) { |
800 | } | 786 | nicstar_read_eprom(card->membase, NICSTAR_EPROM_MAC_ADDR_OFFSET, |
801 | 787 | card->atmdev->esi, 6); | |
802 | if (ns_parse_mac(mac[i], card->atmdev->esi)) { | 788 | if (memcmp(card->atmdev->esi, "\x00\x00\x00\x00\x00\x00", 6) == |
803 | nicstar_read_eprom(card->membase, NICSTAR_EPROM_MAC_ADDR_OFFSET, | 789 | 0) { |
804 | card->atmdev->esi, 6); | 790 | nicstar_read_eprom(card->membase, |
805 | if (memcmp(card->atmdev->esi, "\x00\x00\x00\x00\x00\x00", 6) == 0) { | 791 | NICSTAR_EPROM_MAC_ADDR_OFFSET_ALT, |
806 | nicstar_read_eprom(card->membase, NICSTAR_EPROM_MAC_ADDR_OFFSET_ALT, | 792 | card->atmdev->esi, 6); |
807 | card->atmdev->esi, 6); | 793 | } |
808 | } | 794 | } |
809 | } | 795 | |
810 | 796 | printk("nicstar%d: MAC address %pM\n", i, card->atmdev->esi); | |
811 | printk("nicstar%d: MAC address %pM\n", i, card->atmdev->esi); | 797 | |
812 | 798 | card->atmdev->dev_data = card; | |
813 | card->atmdev->dev_data = card; | 799 | card->atmdev->ci_range.vpi_bits = card->vpibits; |
814 | card->atmdev->ci_range.vpi_bits = card->vpibits; | 800 | card->atmdev->ci_range.vci_bits = card->vcibits; |
815 | card->atmdev->ci_range.vci_bits = card->vcibits; | 801 | card->atmdev->link_rate = card->max_pcr; |
816 | card->atmdev->link_rate = card->max_pcr; | 802 | card->atmdev->phy = NULL; |
817 | card->atmdev->phy = NULL; | ||
818 | 803 | ||
819 | #ifdef CONFIG_ATM_NICSTAR_USE_SUNI | 804 | #ifdef CONFIG_ATM_NICSTAR_USE_SUNI |
820 | if (card->max_pcr == ATM_OC3_PCR) | 805 | if (card->max_pcr == ATM_OC3_PCR) |
821 | suni_init(card->atmdev); | 806 | suni_init(card->atmdev); |
822 | #endif /* CONFIG_ATM_NICSTAR_USE_SUNI */ | 807 | #endif /* CONFIG_ATM_NICSTAR_USE_SUNI */ |
823 | 808 | ||
824 | #ifdef CONFIG_ATM_NICSTAR_USE_IDT77105 | 809 | #ifdef CONFIG_ATM_NICSTAR_USE_IDT77105 |
825 | if (card->max_pcr == ATM_25_PCR) | 810 | if (card->max_pcr == ATM_25_PCR) |
826 | idt77105_init(card->atmdev); | 811 | idt77105_init(card->atmdev); |
827 | #endif /* CONFIG_ATM_NICSTAR_USE_IDT77105 */ | 812 | #endif /* CONFIG_ATM_NICSTAR_USE_IDT77105 */ |
828 | 813 | ||
829 | if (card->atmdev->phy && card->atmdev->phy->start) | 814 | if (card->atmdev->phy && card->atmdev->phy->start) |
830 | card->atmdev->phy->start(card->atmdev); | 815 | card->atmdev->phy->start(card->atmdev); |
831 | |||
832 | writel(NS_CFG_RXPATH | | ||
833 | NS_CFG_SMBUFSIZE | | ||
834 | NS_CFG_LGBUFSIZE | | ||
835 | NS_CFG_EFBIE | | ||
836 | NS_CFG_RSQSIZE | | ||
837 | NS_CFG_VPIBITS | | ||
838 | ns_cfg_rctsize | | ||
839 | NS_CFG_RXINT_NODELAY | | ||
840 | NS_CFG_RAWIE | /* Only enabled if RCQ_SUPPORT */ | ||
841 | NS_CFG_RSQAFIE | | ||
842 | NS_CFG_TXEN | | ||
843 | NS_CFG_TXIE | | ||
844 | NS_CFG_TSQFIE_OPT | /* Only enabled if ENABLE_TSQFIE */ | ||
845 | NS_CFG_PHYIE, | ||
846 | card->membase + CFG); | ||
847 | |||
848 | num_cards++; | ||
849 | |||
850 | return error; | ||
851 | } | ||
852 | 816 | ||
817 | writel(NS_CFG_RXPATH | NS_CFG_SMBUFSIZE | NS_CFG_LGBUFSIZE | NS_CFG_EFBIE | NS_CFG_RSQSIZE | NS_CFG_VPIBITS | ns_cfg_rctsize | NS_CFG_RXINT_NODELAY | NS_CFG_RAWIE | /* Only enabled if RCQ_SUPPORT */ | ||
818 | NS_CFG_RSQAFIE | NS_CFG_TXEN | NS_CFG_TXIE | NS_CFG_TSQFIE_OPT | /* Only enabled if ENABLE_TSQFIE */ | ||
819 | NS_CFG_PHYIE, card->membase + CFG); | ||
853 | 820 | ||
821 | num_cards++; | ||
854 | 822 | ||
855 | static void __devinit ns_init_card_error(ns_dev *card, int error) | 823 | return error; |
856 | { | ||
857 | if (error >= 17) | ||
858 | { | ||
859 | writel(0x00000000, card->membase + CFG); | ||
860 | } | ||
861 | if (error >= 16) | ||
862 | { | ||
863 | struct sk_buff *iovb; | ||
864 | while ((iovb = skb_dequeue(&card->iovpool.queue)) != NULL) | ||
865 | dev_kfree_skb_any(iovb); | ||
866 | } | ||
867 | if (error >= 15) | ||
868 | { | ||
869 | struct sk_buff *sb; | ||
870 | while ((sb = skb_dequeue(&card->sbpool.queue)) != NULL) | ||
871 | dev_kfree_skb_any(sb); | ||
872 | free_scq(card->scq0, NULL); | ||
873 | } | ||
874 | if (error >= 14) | ||
875 | { | ||
876 | struct sk_buff *lb; | ||
877 | while ((lb = skb_dequeue(&card->lbpool.queue)) != NULL) | ||
878 | dev_kfree_skb_any(lb); | ||
879 | } | ||
880 | if (error >= 13) | ||
881 | { | ||
882 | struct sk_buff *hb; | ||
883 | while ((hb = skb_dequeue(&card->hbpool.queue)) != NULL) | ||
884 | dev_kfree_skb_any(hb); | ||
885 | } | ||
886 | if (error >= 12) | ||
887 | { | ||
888 | kfree(card->rsq.org); | ||
889 | } | ||
890 | if (error >= 11) | ||
891 | { | ||
892 | kfree(card->tsq.org); | ||
893 | } | ||
894 | if (error >= 10) | ||
895 | { | ||
896 | free_irq(card->pcidev->irq, card); | ||
897 | } | ||
898 | if (error >= 4) | ||
899 | { | ||
900 | iounmap(card->membase); | ||
901 | } | ||
902 | if (error >= 3) | ||
903 | { | ||
904 | pci_disable_device(card->pcidev); | ||
905 | kfree(card); | ||
906 | } | ||
907 | } | 824 | } |
908 | 825 | ||
909 | 826 | static void __devinit ns_init_card_error(ns_dev * card, int error) | |
910 | |||
911 | static scq_info *get_scq(int size, u32 scd) | ||
912 | { | 827 | { |
913 | scq_info *scq; | 828 | if (error >= 17) { |
914 | int i; | 829 | writel(0x00000000, card->membase + CFG); |
915 | 830 | } | |
916 | if (size != VBR_SCQSIZE && size != CBR_SCQSIZE) | 831 | if (error >= 16) { |
917 | return NULL; | 832 | struct sk_buff *iovb; |
918 | 833 | while ((iovb = skb_dequeue(&card->iovpool.queue)) != NULL) | |
919 | scq = kmalloc(sizeof(scq_info), GFP_KERNEL); | 834 | dev_kfree_skb_any(iovb); |
920 | if (scq == NULL) | 835 | } |
921 | return NULL; | 836 | if (error >= 15) { |
922 | scq->org = kmalloc(2 * size, GFP_KERNEL); | 837 | struct sk_buff *sb; |
923 | if (scq->org == NULL) | 838 | while ((sb = skb_dequeue(&card->sbpool.queue)) != NULL) |
924 | { | 839 | dev_kfree_skb_any(sb); |
925 | kfree(scq); | 840 | free_scq(card, card->scq0, NULL); |
926 | return NULL; | 841 | } |
927 | } | 842 | if (error >= 14) { |
928 | scq->skb = kmalloc(sizeof(struct sk_buff *) * | 843 | struct sk_buff *lb; |
929 | (size / NS_SCQE_SIZE), GFP_KERNEL); | 844 | while ((lb = skb_dequeue(&card->lbpool.queue)) != NULL) |
930 | if (scq->skb == NULL) | 845 | dev_kfree_skb_any(lb); |
931 | { | 846 | } |
932 | kfree(scq->org); | 847 | if (error >= 13) { |
933 | kfree(scq); | 848 | struct sk_buff *hb; |
934 | return NULL; | 849 | while ((hb = skb_dequeue(&card->hbpool.queue)) != NULL) |
935 | } | 850 | dev_kfree_skb_any(hb); |
936 | scq->num_entries = size / NS_SCQE_SIZE; | 851 | } |
937 | scq->base = (ns_scqe *) ALIGN_ADDRESS(scq->org, size); | 852 | if (error >= 12) { |
938 | scq->next = scq->base; | 853 | kfree(card->rsq.org); |
939 | scq->last = scq->base + (scq->num_entries - 1); | 854 | } |
940 | scq->tail = scq->last; | 855 | if (error >= 11) { |
941 | scq->scd = scd; | 856 | kfree(card->tsq.org); |
942 | scq->num_entries = size / NS_SCQE_SIZE; | 857 | } |
943 | scq->tbd_count = 0; | 858 | if (error >= 10) { |
944 | init_waitqueue_head(&scq->scqfull_waitq); | 859 | free_irq(card->pcidev->irq, card); |
945 | scq->full = 0; | 860 | } |
946 | spin_lock_init(&scq->lock); | 861 | if (error >= 4) { |
947 | 862 | iounmap(card->membase); | |
948 | for (i = 0; i < scq->num_entries; i++) | 863 | } |
949 | scq->skb[i] = NULL; | 864 | if (error >= 3) { |
950 | 865 | pci_disable_device(card->pcidev); | |
951 | return scq; | 866 | kfree(card); |
867 | } | ||
952 | } | 868 | } |
953 | 869 | ||
954 | 870 | static scq_info *get_scq(ns_dev *card, int size, u32 scd) | |
871 | { | ||
872 | scq_info *scq; | ||
873 | int i; | ||
874 | |||
875 | if (size != VBR_SCQSIZE && size != CBR_SCQSIZE) | ||
876 | return NULL; | ||
877 | |||
878 | scq = kmalloc(sizeof(scq_info), GFP_KERNEL); | ||
879 | if (!scq) | ||
880 | return NULL; | ||
881 | scq->org = pci_alloc_consistent(card->pcidev, 2 * size, &scq->dma); | ||
882 | if (!scq->org) { | ||
883 | kfree(scq); | ||
884 | return NULL; | ||
885 | } | ||
886 | scq->skb = kmalloc(sizeof(struct sk_buff *) * | ||
887 | (size / NS_SCQE_SIZE), GFP_KERNEL); | ||
888 | if (!scq->skb) { | ||
889 | kfree(scq->org); | ||
890 | kfree(scq); | ||
891 | return NULL; | ||
892 | } | ||
893 | scq->num_entries = size / NS_SCQE_SIZE; | ||
894 | scq->base = PTR_ALIGN(scq->org, size); | ||
895 | scq->next = scq->base; | ||
896 | scq->last = scq->base + (scq->num_entries - 1); | ||
897 | scq->tail = scq->last; | ||
898 | scq->scd = scd; | ||
899 | scq->num_entries = size / NS_SCQE_SIZE; | ||
900 | scq->tbd_count = 0; | ||
901 | init_waitqueue_head(&scq->scqfull_waitq); | ||
902 | scq->full = 0; | ||
903 | spin_lock_init(&scq->lock); | ||
904 | |||
905 | for (i = 0; i < scq->num_entries; i++) | ||
906 | scq->skb[i] = NULL; | ||
907 | |||
908 | return scq; | ||
909 | } | ||
955 | 910 | ||
956 | /* For variable rate SCQ vcc must be NULL */ | 911 | /* For variable rate SCQ vcc must be NULL */ |
957 | static void free_scq(scq_info *scq, struct atm_vcc *vcc) | 912 | static void free_scq(ns_dev *card, scq_info *scq, struct atm_vcc *vcc) |
958 | { | 913 | { |
959 | int i; | 914 | int i; |
960 | 915 | ||
961 | if (scq->num_entries == VBR_SCQ_NUM_ENTRIES) | 916 | if (scq->num_entries == VBR_SCQ_NUM_ENTRIES) |
962 | for (i = 0; i < scq->num_entries; i++) | 917 | for (i = 0; i < scq->num_entries; i++) { |
963 | { | 918 | if (scq->skb[i] != NULL) { |
964 | if (scq->skb[i] != NULL) | 919 | vcc = ATM_SKB(scq->skb[i])->vcc; |
965 | { | 920 | if (vcc->pop != NULL) |
966 | vcc = ATM_SKB(scq->skb[i])->vcc; | 921 | vcc->pop(vcc, scq->skb[i]); |
967 | if (vcc->pop != NULL) | 922 | else |
968 | vcc->pop(vcc, scq->skb[i]); | 923 | dev_kfree_skb_any(scq->skb[i]); |
969 | else | 924 | } |
970 | dev_kfree_skb_any(scq->skb[i]); | 925 | } else { /* vcc must be != NULL */ |
971 | } | 926 | |
972 | } | 927 | if (vcc == NULL) { |
973 | else /* vcc must be != NULL */ | 928 | printk |
974 | { | 929 | ("nicstar: free_scq() called with vcc == NULL for fixed rate scq."); |
975 | if (vcc == NULL) | 930 | for (i = 0; i < scq->num_entries; i++) |
976 | { | 931 | dev_kfree_skb_any(scq->skb[i]); |
977 | printk("nicstar: free_scq() called with vcc == NULL for fixed rate scq."); | 932 | } else |
978 | for (i = 0; i < scq->num_entries; i++) | 933 | for (i = 0; i < scq->num_entries; i++) { |
979 | dev_kfree_skb_any(scq->skb[i]); | 934 | if (scq->skb[i] != NULL) { |
980 | } | 935 | if (vcc->pop != NULL) |
981 | else | 936 | vcc->pop(vcc, scq->skb[i]); |
982 | for (i = 0; i < scq->num_entries; i++) | 937 | else |
983 | { | 938 | dev_kfree_skb_any(scq->skb[i]); |
984 | if (scq->skb[i] != NULL) | 939 | } |
985 | { | 940 | } |
986 | if (vcc->pop != NULL) | 941 | } |
987 | vcc->pop(vcc, scq->skb[i]); | 942 | kfree(scq->skb); |
988 | else | 943 | pci_free_consistent(card->pcidev, |
989 | dev_kfree_skb_any(scq->skb[i]); | 944 | 2 * (scq->num_entries == VBR_SCQ_NUM_ENTRIES ? |
990 | } | 945 | VBR_SCQSIZE : CBR_SCQSIZE), |
991 | } | 946 | scq->org, scq->dma); |
992 | } | 947 | kfree(scq); |
993 | kfree(scq->skb); | ||
994 | kfree(scq->org); | ||
995 | kfree(scq); | ||
996 | } | 948 | } |
997 | 949 | ||
998 | |||
999 | |||
1000 | /* The handles passed must be pointers to the sk_buff containing the small | 950 | /* The handles passed must be pointers to the sk_buff containing the small |
1001 | or large buffer(s) cast to u32. */ | 951 | or large buffer(s) cast to u32. */ |
1002 | static void push_rxbufs(ns_dev *card, struct sk_buff *skb) | 952 | static void push_rxbufs(ns_dev * card, struct sk_buff *skb) |
1003 | { | 953 | { |
1004 | struct ns_skb_cb *cb = NS_SKB_CB(skb); | 954 | struct sk_buff *handle1, *handle2; |
1005 | u32 handle1, addr1; | 955 | u32 id1 = 0, id2 = 0; |
1006 | u32 handle2, addr2; | 956 | u32 addr1, addr2; |
1007 | u32 stat; | 957 | u32 stat; |
1008 | unsigned long flags; | 958 | unsigned long flags; |
1009 | 959 | int err; | |
1010 | /* *BARF* */ | 960 | |
1011 | handle2 = addr2 = 0; | 961 | /* *BARF* */ |
1012 | handle1 = (u32)skb; | 962 | handle2 = NULL; |
1013 | addr1 = (u32)virt_to_bus(skb->data); | 963 | addr2 = 0; |
964 | handle1 = skb; | ||
965 | addr1 = pci_map_single(card->pcidev, | ||
966 | skb->data, | ||
967 | (NS_PRV_BUFTYPE(skb) == BUF_SM | ||
968 | ? NS_SMSKBSIZE : NS_LGSKBSIZE), | ||
969 | PCI_DMA_TODEVICE); | ||
970 | NS_PRV_DMA(skb) = addr1; /* save so we can unmap later */ | ||
1014 | 971 | ||
1015 | #ifdef GENERAL_DEBUG | 972 | #ifdef GENERAL_DEBUG |
1016 | if (!addr1) | 973 | if (!addr1) |
1017 | printk("nicstar%d: push_rxbufs called with addr1 = 0.\n", card->index); | 974 | printk("nicstar%d: push_rxbufs called with addr1 = 0.\n", |
975 | card->index); | ||
1018 | #endif /* GENERAL_DEBUG */ | 976 | #endif /* GENERAL_DEBUG */ |
1019 | 977 | ||
1020 | stat = readl(card->membase + STAT); | 978 | stat = readl(card->membase + STAT); |
1021 | card->sbfqc = ns_stat_sfbqc_get(stat); | 979 | card->sbfqc = ns_stat_sfbqc_get(stat); |
1022 | card->lbfqc = ns_stat_lfbqc_get(stat); | 980 | card->lbfqc = ns_stat_lfbqc_get(stat); |
1023 | if (cb->buf_type == BUF_SM) | 981 | if (NS_PRV_BUFTYPE(skb) == BUF_SM) { |
1024 | { | 982 | if (!addr2) { |
1025 | if (!addr2) | 983 | if (card->sm_addr) { |
1026 | { | 984 | addr2 = card->sm_addr; |
1027 | if (card->sm_addr) | 985 | handle2 = card->sm_handle; |
1028 | { | 986 | card->sm_addr = 0x00000000; |
1029 | addr2 = card->sm_addr; | 987 | card->sm_handle = 0x00000000; |
1030 | handle2 = card->sm_handle; | 988 | } else { /* (!sm_addr) */ |
1031 | card->sm_addr = 0x00000000; | 989 | |
1032 | card->sm_handle = 0x00000000; | 990 | card->sm_addr = addr1; |
1033 | } | 991 | card->sm_handle = handle1; |
1034 | else /* (!sm_addr) */ | 992 | } |
1035 | { | 993 | } |
1036 | card->sm_addr = addr1; | 994 | } else { /* buf_type == BUF_LG */ |
1037 | card->sm_handle = handle1; | 995 | |
1038 | } | 996 | if (!addr2) { |
1039 | } | 997 | if (card->lg_addr) { |
1040 | } | 998 | addr2 = card->lg_addr; |
1041 | else /* buf_type == BUF_LG */ | 999 | handle2 = card->lg_handle; |
1042 | { | 1000 | card->lg_addr = 0x00000000; |
1043 | if (!addr2) | 1001 | card->lg_handle = 0x00000000; |
1044 | { | 1002 | } else { /* (!lg_addr) */ |
1045 | if (card->lg_addr) | 1003 | |
1046 | { | 1004 | card->lg_addr = addr1; |
1047 | addr2 = card->lg_addr; | 1005 | card->lg_handle = handle1; |
1048 | handle2 = card->lg_handle; | 1006 | } |
1049 | card->lg_addr = 0x00000000; | 1007 | } |
1050 | card->lg_handle = 0x00000000; | 1008 | } |
1051 | } | 1009 | |
1052 | else /* (!lg_addr) */ | 1010 | if (addr2) { |
1053 | { | 1011 | if (NS_PRV_BUFTYPE(skb) == BUF_SM) { |
1054 | card->lg_addr = addr1; | 1012 | if (card->sbfqc >= card->sbnr.max) { |
1055 | card->lg_handle = handle1; | 1013 | skb_unlink(handle1, &card->sbpool.queue); |
1056 | } | 1014 | dev_kfree_skb_any(handle1); |
1057 | } | 1015 | skb_unlink(handle2, &card->sbpool.queue); |
1058 | } | 1016 | dev_kfree_skb_any(handle2); |
1059 | 1017 | return; | |
1060 | if (addr2) | 1018 | } else |
1061 | { | 1019 | card->sbfqc += 2; |
1062 | if (cb->buf_type == BUF_SM) | 1020 | } else { /* (buf_type == BUF_LG) */ |
1063 | { | 1021 | |
1064 | if (card->sbfqc >= card->sbnr.max) | 1022 | if (card->lbfqc >= card->lbnr.max) { |
1065 | { | 1023 | skb_unlink(handle1, &card->lbpool.queue); |
1066 | skb_unlink((struct sk_buff *) handle1, &card->sbpool.queue); | 1024 | dev_kfree_skb_any(handle1); |
1067 | dev_kfree_skb_any((struct sk_buff *) handle1); | 1025 | skb_unlink(handle2, &card->lbpool.queue); |
1068 | skb_unlink((struct sk_buff *) handle2, &card->sbpool.queue); | 1026 | dev_kfree_skb_any(handle2); |
1069 | dev_kfree_skb_any((struct sk_buff *) handle2); | 1027 | return; |
1070 | return; | 1028 | } else |
1071 | } | 1029 | card->lbfqc += 2; |
1072 | else | 1030 | } |
1073 | card->sbfqc += 2; | 1031 | |
1074 | } | 1032 | do { |
1075 | else /* (buf_type == BUF_LG) */ | 1033 | if (!idr_pre_get(&card->idr, GFP_ATOMIC)) { |
1076 | { | 1034 | printk(KERN_ERR |
1077 | if (card->lbfqc >= card->lbnr.max) | 1035 | "nicstar%d: no free memory for idr\n", |
1078 | { | 1036 | card->index); |
1079 | skb_unlink((struct sk_buff *) handle1, &card->lbpool.queue); | 1037 | goto out; |
1080 | dev_kfree_skb_any((struct sk_buff *) handle1); | 1038 | } |
1081 | skb_unlink((struct sk_buff *) handle2, &card->lbpool.queue); | 1039 | |
1082 | dev_kfree_skb_any((struct sk_buff *) handle2); | 1040 | if (!id1) |
1083 | return; | 1041 | err = idr_get_new_above(&card->idr, handle1, 0, &id1); |
1084 | } | 1042 | |
1085 | else | 1043 | if (!id2 && err == 0) |
1086 | card->lbfqc += 2; | 1044 | err = idr_get_new_above(&card->idr, handle2, 0, &id2); |
1087 | } | 1045 | |
1088 | 1046 | } while (err == -EAGAIN); | |
1089 | spin_lock_irqsave(&card->res_lock, flags); | 1047 | |
1090 | 1048 | if (err) | |
1091 | while (CMD_BUSY(card)); | 1049 | goto out; |
1092 | writel(addr2, card->membase + DR3); | 1050 | |
1093 | writel(handle2, card->membase + DR2); | 1051 | spin_lock_irqsave(&card->res_lock, flags); |
1094 | writel(addr1, card->membase + DR1); | 1052 | while (CMD_BUSY(card)) ; |
1095 | writel(handle1, card->membase + DR0); | 1053 | writel(addr2, card->membase + DR3); |
1096 | writel(NS_CMD_WRITE_FREEBUFQ | cb->buf_type, card->membase + CMD); | 1054 | writel(id2, card->membase + DR2); |
1097 | 1055 | writel(addr1, card->membase + DR1); | |
1098 | spin_unlock_irqrestore(&card->res_lock, flags); | 1056 | writel(id1, card->membase + DR0); |
1099 | 1057 | writel(NS_CMD_WRITE_FREEBUFQ | NS_PRV_BUFTYPE(skb), | |
1100 | XPRINTK("nicstar%d: Pushing %s buffers at 0x%x and 0x%x.\n", card->index, | 1058 | card->membase + CMD); |
1101 | (cb->buf_type == BUF_SM ? "small" : "large"), addr1, addr2); | 1059 | spin_unlock_irqrestore(&card->res_lock, flags); |
1102 | } | 1060 | |
1103 | 1061 | XPRINTK("nicstar%d: Pushing %s buffers at 0x%x and 0x%x.\n", | |
1104 | if (!card->efbie && card->sbfqc >= card->sbnr.min && | 1062 | card->index, |
1105 | card->lbfqc >= card->lbnr.min) | 1063 | (NS_PRV_BUFTYPE(skb) == BUF_SM ? "small" : "large"), |
1106 | { | 1064 | addr1, addr2); |
1107 | card->efbie = 1; | 1065 | } |
1108 | writel((readl(card->membase + CFG) | NS_CFG_EFBIE), card->membase + CFG); | 1066 | |
1109 | } | 1067 | if (!card->efbie && card->sbfqc >= card->sbnr.min && |
1110 | 1068 | card->lbfqc >= card->lbnr.min) { | |
1111 | return; | 1069 | card->efbie = 1; |
1070 | writel((readl(card->membase + CFG) | NS_CFG_EFBIE), | ||
1071 | card->membase + CFG); | ||
1072 | } | ||
1073 | |||
1074 | out: | ||
1075 | return; | ||
1112 | } | 1076 | } |
1113 | 1077 | ||
1114 | |||
1115 | |||
1116 | static irqreturn_t ns_irq_handler(int irq, void *dev_id) | 1078 | static irqreturn_t ns_irq_handler(int irq, void *dev_id) |
1117 | { | 1079 | { |
1118 | u32 stat_r; | 1080 | u32 stat_r; |
1119 | ns_dev *card; | 1081 | ns_dev *card; |
1120 | struct atm_dev *dev; | 1082 | struct atm_dev *dev; |
1121 | unsigned long flags; | 1083 | unsigned long flags; |
1122 | 1084 | ||
1123 | card = (ns_dev *) dev_id; | 1085 | card = (ns_dev *) dev_id; |
1124 | dev = card->atmdev; | 1086 | dev = card->atmdev; |
1125 | card->intcnt++; | 1087 | card->intcnt++; |
1126 | 1088 | ||
1127 | PRINTK("nicstar%d: NICStAR generated an interrupt\n", card->index); | 1089 | PRINTK("nicstar%d: NICStAR generated an interrupt\n", card->index); |
1128 | 1090 | ||
1129 | spin_lock_irqsave(&card->int_lock, flags); | 1091 | spin_lock_irqsave(&card->int_lock, flags); |
1130 | 1092 | ||
1131 | stat_r = readl(card->membase + STAT); | 1093 | stat_r = readl(card->membase + STAT); |
1132 | 1094 | ||
1133 | /* Transmit Status Indicator has been written to T. S. Queue */ | 1095 | /* Transmit Status Indicator has been written to T. S. Queue */ |
1134 | if (stat_r & NS_STAT_TSIF) | 1096 | if (stat_r & NS_STAT_TSIF) { |
1135 | { | 1097 | TXPRINTK("nicstar%d: TSI interrupt\n", card->index); |
1136 | TXPRINTK("nicstar%d: TSI interrupt\n", card->index); | 1098 | process_tsq(card); |
1137 | process_tsq(card); | 1099 | writel(NS_STAT_TSIF, card->membase + STAT); |
1138 | writel(NS_STAT_TSIF, card->membase + STAT); | 1100 | } |
1139 | } | 1101 | |
1140 | 1102 | /* Incomplete CS-PDU has been transmitted */ | |
1141 | /* Incomplete CS-PDU has been transmitted */ | 1103 | if (stat_r & NS_STAT_TXICP) { |
1142 | if (stat_r & NS_STAT_TXICP) | 1104 | writel(NS_STAT_TXICP, card->membase + STAT); |
1143 | { | 1105 | TXPRINTK("nicstar%d: Incomplete CS-PDU transmitted.\n", |
1144 | writel(NS_STAT_TXICP, card->membase + STAT); | 1106 | card->index); |
1145 | TXPRINTK("nicstar%d: Incomplete CS-PDU transmitted.\n", | 1107 | } |
1146 | card->index); | 1108 | |
1147 | } | 1109 | /* Transmit Status Queue 7/8 full */ |
1148 | 1110 | if (stat_r & NS_STAT_TSQF) { | |
1149 | /* Transmit Status Queue 7/8 full */ | 1111 | writel(NS_STAT_TSQF, card->membase + STAT); |
1150 | if (stat_r & NS_STAT_TSQF) | 1112 | PRINTK("nicstar%d: TSQ full.\n", card->index); |
1151 | { | 1113 | process_tsq(card); |
1152 | writel(NS_STAT_TSQF, card->membase + STAT); | 1114 | } |
1153 | PRINTK("nicstar%d: TSQ full.\n", card->index); | 1115 | |
1154 | process_tsq(card); | 1116 | /* Timer overflow */ |
1155 | } | 1117 | if (stat_r & NS_STAT_TMROF) { |
1156 | 1118 | writel(NS_STAT_TMROF, card->membase + STAT); | |
1157 | /* Timer overflow */ | 1119 | PRINTK("nicstar%d: Timer overflow.\n", card->index); |
1158 | if (stat_r & NS_STAT_TMROF) | 1120 | } |
1159 | { | 1121 | |
1160 | writel(NS_STAT_TMROF, card->membase + STAT); | 1122 | /* PHY device interrupt signal active */ |
1161 | PRINTK("nicstar%d: Timer overflow.\n", card->index); | 1123 | if (stat_r & NS_STAT_PHYI) { |
1162 | } | 1124 | writel(NS_STAT_PHYI, card->membase + STAT); |
1163 | 1125 | PRINTK("nicstar%d: PHY interrupt.\n", card->index); | |
1164 | /* PHY device interrupt signal active */ | 1126 | if (dev->phy && dev->phy->interrupt) { |
1165 | if (stat_r & NS_STAT_PHYI) | 1127 | dev->phy->interrupt(dev); |
1166 | { | 1128 | } |
1167 | writel(NS_STAT_PHYI, card->membase + STAT); | 1129 | } |
1168 | PRINTK("nicstar%d: PHY interrupt.\n", card->index); | 1130 | |
1169 | if (dev->phy && dev->phy->interrupt) { | 1131 | /* Small Buffer Queue is full */ |
1170 | dev->phy->interrupt(dev); | 1132 | if (stat_r & NS_STAT_SFBQF) { |
1171 | } | 1133 | writel(NS_STAT_SFBQF, card->membase + STAT); |
1172 | } | 1134 | printk("nicstar%d: Small free buffer queue is full.\n", |
1173 | 1135 | card->index); | |
1174 | /* Small Buffer Queue is full */ | 1136 | } |
1175 | if (stat_r & NS_STAT_SFBQF) | 1137 | |
1176 | { | 1138 | /* Large Buffer Queue is full */ |
1177 | writel(NS_STAT_SFBQF, card->membase + STAT); | 1139 | if (stat_r & NS_STAT_LFBQF) { |
1178 | printk("nicstar%d: Small free buffer queue is full.\n", card->index); | 1140 | writel(NS_STAT_LFBQF, card->membase + STAT); |
1179 | } | 1141 | printk("nicstar%d: Large free buffer queue is full.\n", |
1180 | 1142 | card->index); | |
1181 | /* Large Buffer Queue is full */ | 1143 | } |
1182 | if (stat_r & NS_STAT_LFBQF) | 1144 | |
1183 | { | 1145 | /* Receive Status Queue is full */ |
1184 | writel(NS_STAT_LFBQF, card->membase + STAT); | 1146 | if (stat_r & NS_STAT_RSQF) { |
1185 | printk("nicstar%d: Large free buffer queue is full.\n", card->index); | 1147 | writel(NS_STAT_RSQF, card->membase + STAT); |
1186 | } | 1148 | printk("nicstar%d: RSQ full.\n", card->index); |
1187 | 1149 | process_rsq(card); | |
1188 | /* Receive Status Queue is full */ | 1150 | } |
1189 | if (stat_r & NS_STAT_RSQF) | 1151 | |
1190 | { | 1152 | /* Complete CS-PDU received */ |
1191 | writel(NS_STAT_RSQF, card->membase + STAT); | 1153 | if (stat_r & NS_STAT_EOPDU) { |
1192 | printk("nicstar%d: RSQ full.\n", card->index); | 1154 | RXPRINTK("nicstar%d: End of CS-PDU received.\n", card->index); |
1193 | process_rsq(card); | 1155 | process_rsq(card); |
1194 | } | 1156 | writel(NS_STAT_EOPDU, card->membase + STAT); |
1195 | 1157 | } | |
1196 | /* Complete CS-PDU received */ | 1158 | |
1197 | if (stat_r & NS_STAT_EOPDU) | 1159 | /* Raw cell received */ |
1198 | { | 1160 | if (stat_r & NS_STAT_RAWCF) { |
1199 | RXPRINTK("nicstar%d: End of CS-PDU received.\n", card->index); | 1161 | writel(NS_STAT_RAWCF, card->membase + STAT); |
1200 | process_rsq(card); | ||
1201 | writel(NS_STAT_EOPDU, card->membase + STAT); | ||
1202 | } | ||
1203 | |||
1204 | /* Raw cell received */ | ||
1205 | if (stat_r & NS_STAT_RAWCF) | ||
1206 | { | ||
1207 | writel(NS_STAT_RAWCF, card->membase + STAT); | ||
1208 | #ifndef RCQ_SUPPORT | 1162 | #ifndef RCQ_SUPPORT |
1209 | printk("nicstar%d: Raw cell received and no support yet...\n", | 1163 | printk("nicstar%d: Raw cell received and no support yet...\n", |
1210 | card->index); | 1164 | card->index); |
1211 | #endif /* RCQ_SUPPORT */ | 1165 | #endif /* RCQ_SUPPORT */ |
1212 | /* NOTE: the following procedure may keep a raw cell pending until the | 1166 | /* NOTE: the following procedure may keep a raw cell pending until the |
1213 | next interrupt. As this preliminary support is only meant to | 1167 | next interrupt. As this preliminary support is only meant to |
1214 | avoid buffer leakage, this is not an issue. */ | 1168 | avoid buffer leakage, this is not an issue. */ |
1215 | while (readl(card->membase + RAWCT) != card->rawch) | 1169 | while (readl(card->membase + RAWCT) != card->rawch) { |
1216 | { | 1170 | |
1217 | ns_rcqe *rawcell; | 1171 | if (ns_rcqe_islast(card->rawcell)) { |
1218 | 1172 | struct sk_buff *oldbuf; | |
1219 | rawcell = (ns_rcqe *) bus_to_virt(card->rawch); | 1173 | |
1220 | if (ns_rcqe_islast(rawcell)) | 1174 | oldbuf = card->rcbuf; |
1221 | { | 1175 | card->rcbuf = idr_find(&card->idr, |
1222 | struct sk_buff *oldbuf; | 1176 | ns_rcqe_nextbufhandle(card->rawcell)); |
1223 | 1177 | card->rawch = NS_PRV_DMA(card->rcbuf); | |
1224 | oldbuf = card->rcbuf; | 1178 | card->rawcell = (struct ns_rcqe *) |
1225 | card->rcbuf = (struct sk_buff *) ns_rcqe_nextbufhandle(rawcell); | 1179 | card->rcbuf->data; |
1226 | card->rawch = (u32) virt_to_bus(card->rcbuf->data); | 1180 | recycle_rx_buf(card, oldbuf); |
1227 | recycle_rx_buf(card, oldbuf); | 1181 | } else { |
1228 | } | 1182 | card->rawch += NS_RCQE_SIZE; |
1229 | else | 1183 | card->rawcell++; |
1230 | card->rawch += NS_RCQE_SIZE; | 1184 | } |
1231 | } | 1185 | } |
1232 | } | 1186 | } |
1233 | 1187 | ||
1234 | /* Small buffer queue is empty */ | 1188 | /* Small buffer queue is empty */ |
1235 | if (stat_r & NS_STAT_SFBQE) | 1189 | if (stat_r & NS_STAT_SFBQE) { |
1236 | { | 1190 | int i; |
1237 | int i; | 1191 | struct sk_buff *sb; |
1238 | struct sk_buff *sb; | 1192 | |
1239 | 1193 | writel(NS_STAT_SFBQE, card->membase + STAT); | |
1240 | writel(NS_STAT_SFBQE, card->membase + STAT); | 1194 | printk("nicstar%d: Small free buffer queue empty.\n", |
1241 | printk("nicstar%d: Small free buffer queue empty.\n", | 1195 | card->index); |
1242 | card->index); | 1196 | for (i = 0; i < card->sbnr.min; i++) { |
1243 | for (i = 0; i < card->sbnr.min; i++) | 1197 | sb = dev_alloc_skb(NS_SMSKBSIZE); |
1244 | { | 1198 | if (sb == NULL) { |
1245 | sb = dev_alloc_skb(NS_SMSKBSIZE); | 1199 | writel(readl(card->membase + CFG) & |
1246 | if (sb == NULL) | 1200 | ~NS_CFG_EFBIE, card->membase + CFG); |
1247 | { | 1201 | card->efbie = 0; |
1248 | writel(readl(card->membase + CFG) & ~NS_CFG_EFBIE, card->membase + CFG); | 1202 | break; |
1249 | card->efbie = 0; | 1203 | } |
1250 | break; | 1204 | NS_PRV_BUFTYPE(sb) = BUF_SM; |
1251 | } | 1205 | skb_queue_tail(&card->sbpool.queue, sb); |
1252 | NS_SKB_CB(sb)->buf_type = BUF_SM; | 1206 | skb_reserve(sb, NS_AAL0_HEADER); |
1253 | skb_queue_tail(&card->sbpool.queue, sb); | 1207 | push_rxbufs(card, sb); |
1254 | skb_reserve(sb, NS_AAL0_HEADER); | 1208 | } |
1255 | push_rxbufs(card, sb); | 1209 | card->sbfqc = i; |
1256 | } | 1210 | process_rsq(card); |
1257 | card->sbfqc = i; | 1211 | } |
1258 | process_rsq(card); | 1212 | |
1259 | } | 1213 | /* Large buffer queue empty */ |
1260 | 1214 | if (stat_r & NS_STAT_LFBQE) { | |
1261 | /* Large buffer queue empty */ | 1215 | int i; |
1262 | if (stat_r & NS_STAT_LFBQE) | 1216 | struct sk_buff *lb; |
1263 | { | 1217 | |
1264 | int i; | 1218 | writel(NS_STAT_LFBQE, card->membase + STAT); |
1265 | struct sk_buff *lb; | 1219 | printk("nicstar%d: Large free buffer queue empty.\n", |
1266 | 1220 | card->index); | |
1267 | writel(NS_STAT_LFBQE, card->membase + STAT); | 1221 | for (i = 0; i < card->lbnr.min; i++) { |
1268 | printk("nicstar%d: Large free buffer queue empty.\n", | 1222 | lb = dev_alloc_skb(NS_LGSKBSIZE); |
1269 | card->index); | 1223 | if (lb == NULL) { |
1270 | for (i = 0; i < card->lbnr.min; i++) | 1224 | writel(readl(card->membase + CFG) & |
1271 | { | 1225 | ~NS_CFG_EFBIE, card->membase + CFG); |
1272 | lb = dev_alloc_skb(NS_LGSKBSIZE); | 1226 | card->efbie = 0; |
1273 | if (lb == NULL) | 1227 | break; |
1274 | { | 1228 | } |
1275 | writel(readl(card->membase + CFG) & ~NS_CFG_EFBIE, card->membase + CFG); | 1229 | NS_PRV_BUFTYPE(lb) = BUF_LG; |
1276 | card->efbie = 0; | 1230 | skb_queue_tail(&card->lbpool.queue, lb); |
1277 | break; | 1231 | skb_reserve(lb, NS_SMBUFSIZE); |
1278 | } | 1232 | push_rxbufs(card, lb); |
1279 | NS_SKB_CB(lb)->buf_type = BUF_LG; | 1233 | } |
1280 | skb_queue_tail(&card->lbpool.queue, lb); | 1234 | card->lbfqc = i; |
1281 | skb_reserve(lb, NS_SMBUFSIZE); | 1235 | process_rsq(card); |
1282 | push_rxbufs(card, lb); | 1236 | } |
1283 | } | 1237 | |
1284 | card->lbfqc = i; | 1238 | /* Receive Status Queue is 7/8 full */ |
1285 | process_rsq(card); | 1239 | if (stat_r & NS_STAT_RSQAF) { |
1286 | } | 1240 | writel(NS_STAT_RSQAF, card->membase + STAT); |
1287 | 1241 | RXPRINTK("nicstar%d: RSQ almost full.\n", card->index); | |
1288 | /* Receive Status Queue is 7/8 full */ | 1242 | process_rsq(card); |
1289 | if (stat_r & NS_STAT_RSQAF) | 1243 | } |
1290 | { | 1244 | |
1291 | writel(NS_STAT_RSQAF, card->membase + STAT); | 1245 | spin_unlock_irqrestore(&card->int_lock, flags); |
1292 | RXPRINTK("nicstar%d: RSQ almost full.\n", card->index); | 1246 | PRINTK("nicstar%d: end of interrupt service\n", card->index); |
1293 | process_rsq(card); | 1247 | return IRQ_HANDLED; |
1294 | } | ||
1295 | |||
1296 | spin_unlock_irqrestore(&card->int_lock, flags); | ||
1297 | PRINTK("nicstar%d: end of interrupt service\n", card->index); | ||
1298 | return IRQ_HANDLED; | ||
1299 | } | 1248 | } |
1300 | 1249 | ||
1301 | |||
1302 | |||
1303 | static int ns_open(struct atm_vcc *vcc) | 1250 | static int ns_open(struct atm_vcc *vcc) |
1304 | { | 1251 | { |
1305 | ns_dev *card; | 1252 | ns_dev *card; |
1306 | vc_map *vc; | 1253 | vc_map *vc; |
1307 | unsigned long tmpl, modl; | 1254 | unsigned long tmpl, modl; |
1308 | int tcr, tcra; /* target cell rate, and absolute value */ | 1255 | int tcr, tcra; /* target cell rate, and absolute value */ |
1309 | int n = 0; /* Number of entries in the TST. Initialized to remove | 1256 | int n = 0; /* Number of entries in the TST. Initialized to remove |
1310 | the compiler warning. */ | 1257 | the compiler warning. */ |
1311 | u32 u32d[4]; | 1258 | u32 u32d[4]; |
1312 | int frscdi = 0; /* Index of the SCD. Initialized to remove the compiler | 1259 | int frscdi = 0; /* Index of the SCD. Initialized to remove the compiler |
1313 | warning. How I wish compilers were clever enough to | 1260 | warning. How I wish compilers were clever enough to |
1314 | tell which variables can truly be used | 1261 | tell which variables can truly be used |
1315 | uninitialized... */ | 1262 | uninitialized... */ |
1316 | int inuse; /* tx or rx vc already in use by another vcc */ | 1263 | int inuse; /* tx or rx vc already in use by another vcc */ |
1317 | short vpi = vcc->vpi; | 1264 | short vpi = vcc->vpi; |
1318 | int vci = vcc->vci; | 1265 | int vci = vcc->vci; |
1319 | 1266 | ||
1320 | card = (ns_dev *) vcc->dev->dev_data; | 1267 | card = (ns_dev *) vcc->dev->dev_data; |
1321 | PRINTK("nicstar%d: opening vpi.vci %d.%d \n", card->index, (int) vpi, vci); | 1268 | PRINTK("nicstar%d: opening vpi.vci %d.%d \n", card->index, (int)vpi, |
1322 | if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0) | 1269 | vci); |
1323 | { | 1270 | if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0) { |
1324 | PRINTK("nicstar%d: unsupported AAL.\n", card->index); | 1271 | PRINTK("nicstar%d: unsupported AAL.\n", card->index); |
1325 | return -EINVAL; | 1272 | return -EINVAL; |
1326 | } | 1273 | } |
1327 | 1274 | ||
1328 | vc = &(card->vcmap[vpi << card->vcibits | vci]); | 1275 | vc = &(card->vcmap[vpi << card->vcibits | vci]); |
1329 | vcc->dev_data = vc; | 1276 | vcc->dev_data = vc; |
1330 | 1277 | ||
1331 | inuse = 0; | 1278 | inuse = 0; |
1332 | if (vcc->qos.txtp.traffic_class != ATM_NONE && vc->tx) | 1279 | if (vcc->qos.txtp.traffic_class != ATM_NONE && vc->tx) |
1333 | inuse = 1; | 1280 | inuse = 1; |
1334 | if (vcc->qos.rxtp.traffic_class != ATM_NONE && vc->rx) | 1281 | if (vcc->qos.rxtp.traffic_class != ATM_NONE && vc->rx) |
1335 | inuse += 2; | 1282 | inuse += 2; |
1336 | if (inuse) | 1283 | if (inuse) { |
1337 | { | 1284 | printk("nicstar%d: %s vci already in use.\n", card->index, |
1338 | printk("nicstar%d: %s vci already in use.\n", card->index, | 1285 | inuse == 1 ? "tx" : inuse == 2 ? "rx" : "tx and rx"); |
1339 | inuse == 1 ? "tx" : inuse == 2 ? "rx" : "tx and rx"); | 1286 | return -EINVAL; |
1340 | return -EINVAL; | 1287 | } |
1341 | } | 1288 | |
1342 | 1289 | set_bit(ATM_VF_ADDR, &vcc->flags); | |
1343 | set_bit(ATM_VF_ADDR,&vcc->flags); | 1290 | |
1344 | 1291 | /* NOTE: You are not allowed to modify an open connection's QOS. To change | |
1345 | /* NOTE: You are not allowed to modify an open connection's QOS. To change | 1292 | that, remove the ATM_VF_PARTIAL flag checking. There may be other changes |
1346 | that, remove the ATM_VF_PARTIAL flag checking. There may be other changes | 1293 | needed to do that. */ |
1347 | needed to do that. */ | 1294 | if (!test_bit(ATM_VF_PARTIAL, &vcc->flags)) { |
1348 | if (!test_bit(ATM_VF_PARTIAL,&vcc->flags)) | 1295 | scq_info *scq; |
1349 | { | 1296 | |
1350 | scq_info *scq; | 1297 | set_bit(ATM_VF_PARTIAL, &vcc->flags); |
1351 | 1298 | if (vcc->qos.txtp.traffic_class == ATM_CBR) { | |
1352 | set_bit(ATM_VF_PARTIAL,&vcc->flags); | 1299 | /* Check requested cell rate and availability of SCD */ |
1353 | if (vcc->qos.txtp.traffic_class == ATM_CBR) | 1300 | if (vcc->qos.txtp.max_pcr == 0 && vcc->qos.txtp.pcr == 0 |
1354 | { | 1301 | && vcc->qos.txtp.min_pcr == 0) { |
1355 | /* Check requested cell rate and availability of SCD */ | 1302 | PRINTK |
1356 | if (vcc->qos.txtp.max_pcr == 0 && vcc->qos.txtp.pcr == 0 && | 1303 | ("nicstar%d: trying to open a CBR vc with cell rate = 0 \n", |
1357 | vcc->qos.txtp.min_pcr == 0) | 1304 | card->index); |
1358 | { | 1305 | clear_bit(ATM_VF_PARTIAL, &vcc->flags); |
1359 | PRINTK("nicstar%d: trying to open a CBR vc with cell rate = 0 \n", | 1306 | clear_bit(ATM_VF_ADDR, &vcc->flags); |
1360 | card->index); | 1307 | return -EINVAL; |
1361 | clear_bit(ATM_VF_PARTIAL,&vcc->flags); | 1308 | } |
1362 | clear_bit(ATM_VF_ADDR,&vcc->flags); | 1309 | |
1363 | return -EINVAL; | 1310 | tcr = atm_pcr_goal(&(vcc->qos.txtp)); |
1364 | } | 1311 | tcra = tcr >= 0 ? tcr : -tcr; |
1365 | 1312 | ||
1366 | tcr = atm_pcr_goal(&(vcc->qos.txtp)); | 1313 | PRINTK("nicstar%d: target cell rate = %d.\n", |
1367 | tcra = tcr >= 0 ? tcr : -tcr; | 1314 | card->index, vcc->qos.txtp.max_pcr); |
1368 | 1315 | ||
1369 | PRINTK("nicstar%d: target cell rate = %d.\n", card->index, | 1316 | tmpl = |
1370 | vcc->qos.txtp.max_pcr); | 1317 | (unsigned long)tcra *(unsigned long) |
1371 | 1318 | NS_TST_NUM_ENTRIES; | |
1372 | tmpl = (unsigned long)tcra * (unsigned long)NS_TST_NUM_ENTRIES; | 1319 | modl = tmpl % card->max_pcr; |
1373 | modl = tmpl % card->max_pcr; | 1320 | |
1374 | 1321 | n = (int)(tmpl / card->max_pcr); | |
1375 | n = (int)(tmpl / card->max_pcr); | 1322 | if (tcr > 0) { |
1376 | if (tcr > 0) | 1323 | if (modl > 0) |
1377 | { | 1324 | n++; |
1378 | if (modl > 0) n++; | 1325 | } else if (tcr == 0) { |
1379 | } | 1326 | if ((n = |
1380 | else if (tcr == 0) | 1327 | (card->tst_free_entries - |
1381 | { | 1328 | NS_TST_RESERVED)) <= 0) { |
1382 | if ((n = (card->tst_free_entries - NS_TST_RESERVED)) <= 0) | 1329 | PRINTK |
1383 | { | 1330 | ("nicstar%d: no CBR bandwidth free.\n", |
1384 | PRINTK("nicstar%d: no CBR bandwidth free.\n", card->index); | 1331 | card->index); |
1385 | clear_bit(ATM_VF_PARTIAL,&vcc->flags); | 1332 | clear_bit(ATM_VF_PARTIAL, &vcc->flags); |
1386 | clear_bit(ATM_VF_ADDR,&vcc->flags); | 1333 | clear_bit(ATM_VF_ADDR, &vcc->flags); |
1387 | return -EINVAL; | 1334 | return -EINVAL; |
1388 | } | 1335 | } |
1389 | } | 1336 | } |
1390 | 1337 | ||
1391 | if (n == 0) | 1338 | if (n == 0) { |
1392 | { | 1339 | printk |
1393 | printk("nicstar%d: selected bandwidth < granularity.\n", card->index); | 1340 | ("nicstar%d: selected bandwidth < granularity.\n", |
1394 | clear_bit(ATM_VF_PARTIAL,&vcc->flags); | 1341 | card->index); |
1395 | clear_bit(ATM_VF_ADDR,&vcc->flags); | 1342 | clear_bit(ATM_VF_PARTIAL, &vcc->flags); |
1396 | return -EINVAL; | 1343 | clear_bit(ATM_VF_ADDR, &vcc->flags); |
1397 | } | 1344 | return -EINVAL; |
1398 | 1345 | } | |
1399 | if (n > (card->tst_free_entries - NS_TST_RESERVED)) | 1346 | |
1400 | { | 1347 | if (n > (card->tst_free_entries - NS_TST_RESERVED)) { |
1401 | PRINTK("nicstar%d: not enough free CBR bandwidth.\n", card->index); | 1348 | PRINTK |
1402 | clear_bit(ATM_VF_PARTIAL,&vcc->flags); | 1349 | ("nicstar%d: not enough free CBR bandwidth.\n", |
1403 | clear_bit(ATM_VF_ADDR,&vcc->flags); | 1350 | card->index); |
1404 | return -EINVAL; | 1351 | clear_bit(ATM_VF_PARTIAL, &vcc->flags); |
1405 | } | 1352 | clear_bit(ATM_VF_ADDR, &vcc->flags); |
1406 | else | 1353 | return -EINVAL; |
1407 | card->tst_free_entries -= n; | 1354 | } else |
1408 | 1355 | card->tst_free_entries -= n; | |
1409 | XPRINTK("nicstar%d: writing %d tst entries.\n", card->index, n); | 1356 | |
1410 | for (frscdi = 0; frscdi < NS_FRSCD_NUM; frscdi++) | 1357 | XPRINTK("nicstar%d: writing %d tst entries.\n", |
1411 | { | 1358 | card->index, n); |
1412 | if (card->scd2vc[frscdi] == NULL) | 1359 | for (frscdi = 0; frscdi < NS_FRSCD_NUM; frscdi++) { |
1413 | { | 1360 | if (card->scd2vc[frscdi] == NULL) { |
1414 | card->scd2vc[frscdi] = vc; | 1361 | card->scd2vc[frscdi] = vc; |
1415 | break; | 1362 | break; |
1416 | } | 1363 | } |
1417 | } | 1364 | } |
1418 | if (frscdi == NS_FRSCD_NUM) | 1365 | if (frscdi == NS_FRSCD_NUM) { |
1419 | { | 1366 | PRINTK |
1420 | PRINTK("nicstar%d: no SCD available for CBR channel.\n", card->index); | 1367 | ("nicstar%d: no SCD available for CBR channel.\n", |
1421 | card->tst_free_entries += n; | 1368 | card->index); |
1422 | clear_bit(ATM_VF_PARTIAL,&vcc->flags); | 1369 | card->tst_free_entries += n; |
1423 | clear_bit(ATM_VF_ADDR,&vcc->flags); | 1370 | clear_bit(ATM_VF_PARTIAL, &vcc->flags); |
1424 | return -EBUSY; | 1371 | clear_bit(ATM_VF_ADDR, &vcc->flags); |
1425 | } | 1372 | return -EBUSY; |
1426 | 1373 | } | |
1427 | vc->cbr_scd = NS_FRSCD + frscdi * NS_FRSCD_SIZE; | 1374 | |
1428 | 1375 | vc->cbr_scd = NS_FRSCD + frscdi * NS_FRSCD_SIZE; | |
1429 | scq = get_scq(CBR_SCQSIZE, vc->cbr_scd); | 1376 | |
1430 | if (scq == NULL) | 1377 | scq = get_scq(card, CBR_SCQSIZE, vc->cbr_scd); |
1431 | { | 1378 | if (scq == NULL) { |
1432 | PRINTK("nicstar%d: can't get fixed rate SCQ.\n", card->index); | 1379 | PRINTK("nicstar%d: can't get fixed rate SCQ.\n", |
1433 | card->scd2vc[frscdi] = NULL; | 1380 | card->index); |
1434 | card->tst_free_entries += n; | 1381 | card->scd2vc[frscdi] = NULL; |
1435 | clear_bit(ATM_VF_PARTIAL,&vcc->flags); | 1382 | card->tst_free_entries += n; |
1436 | clear_bit(ATM_VF_ADDR,&vcc->flags); | 1383 | clear_bit(ATM_VF_PARTIAL, &vcc->flags); |
1437 | return -ENOMEM; | 1384 | clear_bit(ATM_VF_ADDR, &vcc->flags); |
1438 | } | 1385 | return -ENOMEM; |
1439 | vc->scq = scq; | 1386 | } |
1440 | u32d[0] = (u32) virt_to_bus(scq->base); | 1387 | vc->scq = scq; |
1441 | u32d[1] = (u32) 0x00000000; | 1388 | u32d[0] = scq_virt_to_bus(scq, scq->base); |
1442 | u32d[2] = (u32) 0xffffffff; | 1389 | u32d[1] = (u32) 0x00000000; |
1443 | u32d[3] = (u32) 0x00000000; | 1390 | u32d[2] = (u32) 0xffffffff; |
1444 | ns_write_sram(card, vc->cbr_scd, u32d, 4); | 1391 | u32d[3] = (u32) 0x00000000; |
1445 | 1392 | ns_write_sram(card, vc->cbr_scd, u32d, 4); | |
1446 | fill_tst(card, n, vc); | 1393 | |
1447 | } | 1394 | fill_tst(card, n, vc); |
1448 | else if (vcc->qos.txtp.traffic_class == ATM_UBR) | 1395 | } else if (vcc->qos.txtp.traffic_class == ATM_UBR) { |
1449 | { | 1396 | vc->cbr_scd = 0x00000000; |
1450 | vc->cbr_scd = 0x00000000; | 1397 | vc->scq = card->scq0; |
1451 | vc->scq = card->scq0; | 1398 | } |
1452 | } | 1399 | |
1453 | 1400 | if (vcc->qos.txtp.traffic_class != ATM_NONE) { | |
1454 | if (vcc->qos.txtp.traffic_class != ATM_NONE) | 1401 | vc->tx = 1; |
1455 | { | 1402 | vc->tx_vcc = vcc; |
1456 | vc->tx = 1; | 1403 | vc->tbd_count = 0; |
1457 | vc->tx_vcc = vcc; | 1404 | } |
1458 | vc->tbd_count = 0; | 1405 | if (vcc->qos.rxtp.traffic_class != ATM_NONE) { |
1459 | } | 1406 | u32 status; |
1460 | if (vcc->qos.rxtp.traffic_class != ATM_NONE) | 1407 | |
1461 | { | 1408 | vc->rx = 1; |
1462 | u32 status; | 1409 | vc->rx_vcc = vcc; |
1463 | 1410 | vc->rx_iov = NULL; | |
1464 | vc->rx = 1; | 1411 | |
1465 | vc->rx_vcc = vcc; | 1412 | /* Open the connection in hardware */ |
1466 | vc->rx_iov = NULL; | 1413 | if (vcc->qos.aal == ATM_AAL5) |
1467 | 1414 | status = NS_RCTE_AAL5 | NS_RCTE_CONNECTOPEN; | |
1468 | /* Open the connection in hardware */ | 1415 | else /* vcc->qos.aal == ATM_AAL0 */ |
1469 | if (vcc->qos.aal == ATM_AAL5) | 1416 | status = NS_RCTE_AAL0 | NS_RCTE_CONNECTOPEN; |
1470 | status = NS_RCTE_AAL5 | NS_RCTE_CONNECTOPEN; | ||
1471 | else /* vcc->qos.aal == ATM_AAL0 */ | ||
1472 | status = NS_RCTE_AAL0 | NS_RCTE_CONNECTOPEN; | ||
1473 | #ifdef RCQ_SUPPORT | 1417 | #ifdef RCQ_SUPPORT |
1474 | status |= NS_RCTE_RAWCELLINTEN; | 1418 | status |= NS_RCTE_RAWCELLINTEN; |
1475 | #endif /* RCQ_SUPPORT */ | 1419 | #endif /* RCQ_SUPPORT */ |
1476 | ns_write_sram(card, NS_RCT + (vpi << card->vcibits | vci) * | 1420 | ns_write_sram(card, |
1477 | NS_RCT_ENTRY_SIZE, &status, 1); | 1421 | NS_RCT + |
1478 | } | 1422 | (vpi << card->vcibits | vci) * |
1479 | 1423 | NS_RCT_ENTRY_SIZE, &status, 1); | |
1480 | } | 1424 | } |
1481 | |||
1482 | set_bit(ATM_VF_READY,&vcc->flags); | ||
1483 | return 0; | ||
1484 | } | ||
1485 | 1425 | ||
1426 | } | ||
1486 | 1427 | ||
1428 | set_bit(ATM_VF_READY, &vcc->flags); | ||
1429 | return 0; | ||
1430 | } | ||
1487 | 1431 | ||
1488 | static void ns_close(struct atm_vcc *vcc) | 1432 | static void ns_close(struct atm_vcc *vcc) |
1489 | { | 1433 | { |
1490 | vc_map *vc; | 1434 | vc_map *vc; |
1491 | ns_dev *card; | 1435 | ns_dev *card; |
1492 | u32 data; | 1436 | u32 data; |
1493 | int i; | 1437 | int i; |
1494 | 1438 | ||
1495 | vc = vcc->dev_data; | 1439 | vc = vcc->dev_data; |
1496 | card = vcc->dev->dev_data; | 1440 | card = vcc->dev->dev_data; |
1497 | PRINTK("nicstar%d: closing vpi.vci %d.%d \n", card->index, | 1441 | PRINTK("nicstar%d: closing vpi.vci %d.%d \n", card->index, |
1498 | (int) vcc->vpi, vcc->vci); | 1442 | (int)vcc->vpi, vcc->vci); |
1499 | 1443 | ||
1500 | clear_bit(ATM_VF_READY,&vcc->flags); | 1444 | clear_bit(ATM_VF_READY, &vcc->flags); |
1501 | 1445 | ||
1502 | if (vcc->qos.rxtp.traffic_class != ATM_NONE) | 1446 | if (vcc->qos.rxtp.traffic_class != ATM_NONE) { |
1503 | { | 1447 | u32 addr; |
1504 | u32 addr; | 1448 | unsigned long flags; |
1505 | unsigned long flags; | 1449 | |
1506 | 1450 | addr = | |
1507 | addr = NS_RCT + (vcc->vpi << card->vcibits | vcc->vci) * NS_RCT_ENTRY_SIZE; | 1451 | NS_RCT + |
1508 | spin_lock_irqsave(&card->res_lock, flags); | 1452 | (vcc->vpi << card->vcibits | vcc->vci) * NS_RCT_ENTRY_SIZE; |
1509 | while(CMD_BUSY(card)); | 1453 | spin_lock_irqsave(&card->res_lock, flags); |
1510 | writel(NS_CMD_CLOSE_CONNECTION | addr << 2, card->membase + CMD); | 1454 | while (CMD_BUSY(card)) ; |
1511 | spin_unlock_irqrestore(&card->res_lock, flags); | 1455 | writel(NS_CMD_CLOSE_CONNECTION | addr << 2, |
1512 | 1456 | card->membase + CMD); | |
1513 | vc->rx = 0; | 1457 | spin_unlock_irqrestore(&card->res_lock, flags); |
1514 | if (vc->rx_iov != NULL) | 1458 | |
1515 | { | 1459 | vc->rx = 0; |
1516 | struct sk_buff *iovb; | 1460 | if (vc->rx_iov != NULL) { |
1517 | u32 stat; | 1461 | struct sk_buff *iovb; |
1518 | 1462 | u32 stat; | |
1519 | stat = readl(card->membase + STAT); | 1463 | |
1520 | card->sbfqc = ns_stat_sfbqc_get(stat); | 1464 | stat = readl(card->membase + STAT); |
1521 | card->lbfqc = ns_stat_lfbqc_get(stat); | 1465 | card->sbfqc = ns_stat_sfbqc_get(stat); |
1522 | 1466 | card->lbfqc = ns_stat_lfbqc_get(stat); | |
1523 | PRINTK("nicstar%d: closing a VC with pending rx buffers.\n", | 1467 | |
1524 | card->index); | 1468 | PRINTK |
1525 | iovb = vc->rx_iov; | 1469 | ("nicstar%d: closing a VC with pending rx buffers.\n", |
1526 | recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data, | 1470 | card->index); |
1527 | NS_SKB(iovb)->iovcnt); | 1471 | iovb = vc->rx_iov; |
1528 | NS_SKB(iovb)->iovcnt = 0; | 1472 | recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data, |
1529 | NS_SKB(iovb)->vcc = NULL; | 1473 | NS_PRV_IOVCNT(iovb)); |
1530 | spin_lock_irqsave(&card->int_lock, flags); | 1474 | NS_PRV_IOVCNT(iovb) = 0; |
1531 | recycle_iov_buf(card, iovb); | 1475 | spin_lock_irqsave(&card->int_lock, flags); |
1532 | spin_unlock_irqrestore(&card->int_lock, flags); | 1476 | recycle_iov_buf(card, iovb); |
1533 | vc->rx_iov = NULL; | 1477 | spin_unlock_irqrestore(&card->int_lock, flags); |
1534 | } | 1478 | vc->rx_iov = NULL; |
1535 | } | 1479 | } |
1536 | 1480 | } | |
1537 | if (vcc->qos.txtp.traffic_class != ATM_NONE) | 1481 | |
1538 | { | 1482 | if (vcc->qos.txtp.traffic_class != ATM_NONE) { |
1539 | vc->tx = 0; | 1483 | vc->tx = 0; |
1540 | } | 1484 | } |
1541 | 1485 | ||
1542 | if (vcc->qos.txtp.traffic_class == ATM_CBR) | 1486 | if (vcc->qos.txtp.traffic_class == ATM_CBR) { |
1543 | { | 1487 | unsigned long flags; |
1544 | unsigned long flags; | 1488 | ns_scqe *scqep; |
1545 | ns_scqe *scqep; | 1489 | scq_info *scq; |
1546 | scq_info *scq; | 1490 | |
1547 | 1491 | scq = vc->scq; | |
1548 | scq = vc->scq; | 1492 | |
1549 | 1493 | for (;;) { | |
1550 | for (;;) | 1494 | spin_lock_irqsave(&scq->lock, flags); |
1551 | { | 1495 | scqep = scq->next; |
1552 | spin_lock_irqsave(&scq->lock, flags); | 1496 | if (scqep == scq->base) |
1553 | scqep = scq->next; | 1497 | scqep = scq->last; |
1554 | if (scqep == scq->base) | 1498 | else |
1555 | scqep = scq->last; | 1499 | scqep--; |
1556 | else | 1500 | if (scqep == scq->tail) { |
1557 | scqep--; | 1501 | spin_unlock_irqrestore(&scq->lock, flags); |
1558 | if (scqep == scq->tail) | 1502 | break; |
1559 | { | 1503 | } |
1560 | spin_unlock_irqrestore(&scq->lock, flags); | 1504 | /* If the last entry is not a TSR, place one in the SCQ in order to |
1561 | break; | 1505 | be able to completely drain it and then close. */ |
1562 | } | 1506 | if (!ns_scqe_is_tsr(scqep) && scq->tail != scq->next) { |
1563 | /* If the last entry is not a TSR, place one in the SCQ in order to | 1507 | ns_scqe tsr; |
1564 | be able to completely drain it and then close. */ | 1508 | u32 scdi, scqi; |
1565 | if (!ns_scqe_is_tsr(scqep) && scq->tail != scq->next) | 1509 | u32 data; |
1566 | { | 1510 | int index; |
1567 | ns_scqe tsr; | 1511 | |
1568 | u32 scdi, scqi; | 1512 | tsr.word_1 = ns_tsr_mkword_1(NS_TSR_INTENABLE); |
1569 | u32 data; | 1513 | scdi = (vc->cbr_scd - NS_FRSCD) / NS_FRSCD_SIZE; |
1570 | int index; | 1514 | scqi = scq->next - scq->base; |
1571 | 1515 | tsr.word_2 = ns_tsr_mkword_2(scdi, scqi); | |
1572 | tsr.word_1 = ns_tsr_mkword_1(NS_TSR_INTENABLE); | 1516 | tsr.word_3 = 0x00000000; |
1573 | scdi = (vc->cbr_scd - NS_FRSCD) / NS_FRSCD_SIZE; | 1517 | tsr.word_4 = 0x00000000; |
1574 | scqi = scq->next - scq->base; | 1518 | *scq->next = tsr; |
1575 | tsr.word_2 = ns_tsr_mkword_2(scdi, scqi); | 1519 | index = (int)scqi; |
1576 | tsr.word_3 = 0x00000000; | 1520 | scq->skb[index] = NULL; |
1577 | tsr.word_4 = 0x00000000; | 1521 | if (scq->next == scq->last) |
1578 | *scq->next = tsr; | 1522 | scq->next = scq->base; |
1579 | index = (int) scqi; | 1523 | else |
1580 | scq->skb[index] = NULL; | 1524 | scq->next++; |
1581 | if (scq->next == scq->last) | 1525 | data = scq_virt_to_bus(scq, scq->next); |
1582 | scq->next = scq->base; | 1526 | ns_write_sram(card, scq->scd, &data, 1); |
1583 | else | 1527 | } |
1584 | scq->next++; | 1528 | spin_unlock_irqrestore(&scq->lock, flags); |
1585 | data = (u32) virt_to_bus(scq->next); | 1529 | schedule(); |
1586 | ns_write_sram(card, scq->scd, &data, 1); | 1530 | } |
1587 | } | 1531 | |
1588 | spin_unlock_irqrestore(&scq->lock, flags); | 1532 | /* Free all TST entries */ |
1589 | schedule(); | 1533 | data = NS_TST_OPCODE_VARIABLE; |
1590 | } | 1534 | for (i = 0; i < NS_TST_NUM_ENTRIES; i++) { |
1591 | 1535 | if (card->tste2vc[i] == vc) { | |
1592 | /* Free all TST entries */ | 1536 | ns_write_sram(card, card->tst_addr + i, &data, |
1593 | data = NS_TST_OPCODE_VARIABLE; | 1537 | 1); |
1594 | for (i = 0; i < NS_TST_NUM_ENTRIES; i++) | 1538 | card->tste2vc[i] = NULL; |
1595 | { | 1539 | card->tst_free_entries++; |
1596 | if (card->tste2vc[i] == vc) | 1540 | } |
1597 | { | 1541 | } |
1598 | ns_write_sram(card, card->tst_addr + i, &data, 1); | 1542 | |
1599 | card->tste2vc[i] = NULL; | 1543 | card->scd2vc[(vc->cbr_scd - NS_FRSCD) / NS_FRSCD_SIZE] = NULL; |
1600 | card->tst_free_entries++; | 1544 | free_scq(card, vc->scq, vcc); |
1601 | } | 1545 | } |
1602 | } | 1546 | |
1603 | 1547 | /* remove all references to vcc before deleting it */ | |
1604 | card->scd2vc[(vc->cbr_scd - NS_FRSCD) / NS_FRSCD_SIZE] = NULL; | 1548 | if (vcc->qos.txtp.traffic_class != ATM_NONE) { |
1605 | free_scq(vc->scq, vcc); | 1549 | unsigned long flags; |
1606 | } | 1550 | scq_info *scq = card->scq0; |
1607 | 1551 | ||
1608 | /* remove all references to vcc before deleting it */ | 1552 | spin_lock_irqsave(&scq->lock, flags); |
1609 | if (vcc->qos.txtp.traffic_class != ATM_NONE) | 1553 | |
1610 | { | 1554 | for (i = 0; i < scq->num_entries; i++) { |
1611 | unsigned long flags; | 1555 | if (scq->skb[i] && ATM_SKB(scq->skb[i])->vcc == vcc) { |
1612 | scq_info *scq = card->scq0; | 1556 | ATM_SKB(scq->skb[i])->vcc = NULL; |
1613 | 1557 | atm_return(vcc, scq->skb[i]->truesize); | |
1614 | spin_lock_irqsave(&scq->lock, flags); | 1558 | PRINTK |
1615 | 1559 | ("nicstar: deleted pending vcc mapping\n"); | |
1616 | for(i = 0; i < scq->num_entries; i++) { | 1560 | } |
1617 | if(scq->skb[i] && ATM_SKB(scq->skb[i])->vcc == vcc) { | 1561 | } |
1618 | ATM_SKB(scq->skb[i])->vcc = NULL; | 1562 | |
1619 | atm_return(vcc, scq->skb[i]->truesize); | 1563 | spin_unlock_irqrestore(&scq->lock, flags); |
1620 | PRINTK("nicstar: deleted pending vcc mapping\n"); | 1564 | } |
1621 | } | 1565 | |
1622 | } | 1566 | vcc->dev_data = NULL; |
1623 | 1567 | clear_bit(ATM_VF_PARTIAL, &vcc->flags); | |
1624 | spin_unlock_irqrestore(&scq->lock, flags); | 1568 | clear_bit(ATM_VF_ADDR, &vcc->flags); |
1625 | } | ||
1626 | |||
1627 | vcc->dev_data = NULL; | ||
1628 | clear_bit(ATM_VF_PARTIAL,&vcc->flags); | ||
1629 | clear_bit(ATM_VF_ADDR,&vcc->flags); | ||
1630 | 1569 | ||
1631 | #ifdef RX_DEBUG | 1570 | #ifdef RX_DEBUG |
1632 | { | 1571 | { |
1633 | u32 stat, cfg; | 1572 | u32 stat, cfg; |
1634 | stat = readl(card->membase + STAT); | 1573 | stat = readl(card->membase + STAT); |
1635 | cfg = readl(card->membase + CFG); | 1574 | cfg = readl(card->membase + CFG); |
1636 | printk("STAT = 0x%08X CFG = 0x%08X \n", stat, cfg); | 1575 | printk("STAT = 0x%08X CFG = 0x%08X \n", stat, cfg); |
1637 | printk("TSQ: base = 0x%08X next = 0x%08X last = 0x%08X TSQT = 0x%08X \n", | 1576 | printk |
1638 | (u32) card->tsq.base, (u32) card->tsq.next,(u32) card->tsq.last, | 1577 | ("TSQ: base = 0x%p next = 0x%p last = 0x%p TSQT = 0x%08X \n", |
1639 | readl(card->membase + TSQT)); | 1578 | card->tsq.base, card->tsq.next, |
1640 | printk("RSQ: base = 0x%08X next = 0x%08X last = 0x%08X RSQT = 0x%08X \n", | 1579 | card->tsq.last, readl(card->membase + TSQT)); |
1641 | (u32) card->rsq.base, (u32) card->rsq.next,(u32) card->rsq.last, | 1580 | printk |
1642 | readl(card->membase + RSQT)); | 1581 | ("RSQ: base = 0x%p next = 0x%p last = 0x%p RSQT = 0x%08X \n", |
1643 | printk("Empty free buffer queue interrupt %s \n", | 1582 | card->rsq.base, card->rsq.next, |
1644 | card->efbie ? "enabled" : "disabled"); | 1583 | card->rsq.last, readl(card->membase + RSQT)); |
1645 | printk("SBCNT = %d count = %d LBCNT = %d count = %d \n", | 1584 | printk("Empty free buffer queue interrupt %s \n", |
1646 | ns_stat_sfbqc_get(stat), card->sbpool.count, | 1585 | card->efbie ? "enabled" : "disabled"); |
1647 | ns_stat_lfbqc_get(stat), card->lbpool.count); | 1586 | printk("SBCNT = %d count = %d LBCNT = %d count = %d \n", |
1648 | printk("hbpool.count = %d iovpool.count = %d \n", | 1587 | ns_stat_sfbqc_get(stat), card->sbpool.count, |
1649 | card->hbpool.count, card->iovpool.count); | 1588 | ns_stat_lfbqc_get(stat), card->lbpool.count); |
1650 | } | 1589 | printk("hbpool.count = %d iovpool.count = %d \n", |
1590 | card->hbpool.count, card->iovpool.count); | ||
1591 | } | ||
1651 | #endif /* RX_DEBUG */ | 1592 | #endif /* RX_DEBUG */ |
1652 | } | 1593 | } |
1653 | 1594 | ||
1654 | 1595 | static void fill_tst(ns_dev * card, int n, vc_map * vc) | |
1655 | |||
1656 | static void fill_tst(ns_dev *card, int n, vc_map *vc) | ||
1657 | { | 1596 | { |
1658 | u32 new_tst; | 1597 | u32 new_tst; |
1659 | unsigned long cl; | 1598 | unsigned long cl; |
1660 | int e, r; | 1599 | int e, r; |
1661 | u32 data; | 1600 | u32 data; |
1662 | 1601 | ||
1663 | /* It would be very complicated to keep the two TSTs synchronized while | 1602 | /* It would be very complicated to keep the two TSTs synchronized while |
1664 | assuring that writes are only made to the inactive TST. So, for now I | 1603 | assuring that writes are only made to the inactive TST. So, for now I |
1665 | will use only one TST. If problems occur, I will change this again */ | 1604 | will use only one TST. If problems occur, I will change this again */ |
1666 | 1605 | ||
1667 | new_tst = card->tst_addr; | 1606 | new_tst = card->tst_addr; |
1668 | 1607 | ||
1669 | /* Fill procedure */ | 1608 | /* Fill procedure */ |
1670 | 1609 | ||
1671 | for (e = 0; e < NS_TST_NUM_ENTRIES; e++) | 1610 | for (e = 0; e < NS_TST_NUM_ENTRIES; e++) { |
1672 | { | 1611 | if (card->tste2vc[e] == NULL) |
1673 | if (card->tste2vc[e] == NULL) | 1612 | break; |
1674 | break; | 1613 | } |
1675 | } | 1614 | if (e == NS_TST_NUM_ENTRIES) { |
1676 | if (e == NS_TST_NUM_ENTRIES) { | 1615 | printk("nicstar%d: No free TST entries found. \n", card->index); |
1677 | printk("nicstar%d: No free TST entries found. \n", card->index); | 1616 | return; |
1678 | return; | 1617 | } |
1679 | } | 1618 | |
1680 | 1619 | r = n; | |
1681 | r = n; | 1620 | cl = NS_TST_NUM_ENTRIES; |
1682 | cl = NS_TST_NUM_ENTRIES; | 1621 | data = ns_tste_make(NS_TST_OPCODE_FIXED, vc->cbr_scd); |
1683 | data = ns_tste_make(NS_TST_OPCODE_FIXED, vc->cbr_scd); | 1622 | |
1684 | 1623 | while (r > 0) { | |
1685 | while (r > 0) | 1624 | if (cl >= NS_TST_NUM_ENTRIES && card->tste2vc[e] == NULL) { |
1686 | { | 1625 | card->tste2vc[e] = vc; |
1687 | if (cl >= NS_TST_NUM_ENTRIES && card->tste2vc[e] == NULL) | 1626 | ns_write_sram(card, new_tst + e, &data, 1); |
1688 | { | 1627 | cl -= NS_TST_NUM_ENTRIES; |
1689 | card->tste2vc[e] = vc; | 1628 | r--; |
1690 | ns_write_sram(card, new_tst + e, &data, 1); | 1629 | } |
1691 | cl -= NS_TST_NUM_ENTRIES; | 1630 | |
1692 | r--; | 1631 | if (++e == NS_TST_NUM_ENTRIES) { |
1693 | } | 1632 | e = 0; |
1694 | 1633 | } | |
1695 | if (++e == NS_TST_NUM_ENTRIES) { | 1634 | cl += n; |
1696 | e = 0; | 1635 | } |
1697 | } | 1636 | |
1698 | cl += n; | 1637 | /* End of fill procedure */ |
1699 | } | 1638 | |
1700 | 1639 | data = ns_tste_make(NS_TST_OPCODE_END, new_tst); | |
1701 | /* End of fill procedure */ | 1640 | ns_write_sram(card, new_tst + NS_TST_NUM_ENTRIES, &data, 1); |
1702 | 1641 | ns_write_sram(card, card->tst_addr + NS_TST_NUM_ENTRIES, &data, 1); | |
1703 | data = ns_tste_make(NS_TST_OPCODE_END, new_tst); | 1642 | card->tst_addr = new_tst; |
1704 | ns_write_sram(card, new_tst + NS_TST_NUM_ENTRIES, &data, 1); | ||
1705 | ns_write_sram(card, card->tst_addr + NS_TST_NUM_ENTRIES, &data, 1); | ||
1706 | card->tst_addr = new_tst; | ||
1707 | } | 1643 | } |
1708 | 1644 | ||
1709 | |||
1710 | |||
1711 | static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb) | 1645 | static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb) |
1712 | { | 1646 | { |
1713 | ns_dev *card; | 1647 | ns_dev *card; |
1714 | vc_map *vc; | 1648 | vc_map *vc; |
1715 | scq_info *scq; | 1649 | scq_info *scq; |
1716 | unsigned long buflen; | 1650 | unsigned long buflen; |
1717 | ns_scqe scqe; | 1651 | ns_scqe scqe; |
1718 | u32 flags; /* TBD flags, not CPU flags */ | 1652 | u32 flags; /* TBD flags, not CPU flags */ |
1719 | 1653 | ||
1720 | card = vcc->dev->dev_data; | 1654 | card = vcc->dev->dev_data; |
1721 | TXPRINTK("nicstar%d: ns_send() called.\n", card->index); | 1655 | TXPRINTK("nicstar%d: ns_send() called.\n", card->index); |
1722 | if ((vc = (vc_map *) vcc->dev_data) == NULL) | 1656 | if ((vc = (vc_map *) vcc->dev_data) == NULL) { |
1723 | { | 1657 | printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n", |
1724 | printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n", card->index); | 1658 | card->index); |
1725 | atomic_inc(&vcc->stats->tx_err); | 1659 | atomic_inc(&vcc->stats->tx_err); |
1726 | dev_kfree_skb_any(skb); | 1660 | dev_kfree_skb_any(skb); |
1727 | return -EINVAL; | 1661 | return -EINVAL; |
1728 | } | 1662 | } |
1729 | |||
1730 | if (!vc->tx) | ||
1731 | { | ||
1732 | printk("nicstar%d: Trying to transmit on a non-tx VC.\n", card->index); | ||
1733 | atomic_inc(&vcc->stats->tx_err); | ||
1734 | dev_kfree_skb_any(skb); | ||
1735 | return -EINVAL; | ||
1736 | } | ||
1737 | |||
1738 | if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0) | ||
1739 | { | ||
1740 | printk("nicstar%d: Only AAL0 and AAL5 are supported.\n", card->index); | ||
1741 | atomic_inc(&vcc->stats->tx_err); | ||
1742 | dev_kfree_skb_any(skb); | ||
1743 | return -EINVAL; | ||
1744 | } | ||
1745 | |||
1746 | if (skb_shinfo(skb)->nr_frags != 0) | ||
1747 | { | ||
1748 | printk("nicstar%d: No scatter-gather yet.\n", card->index); | ||
1749 | atomic_inc(&vcc->stats->tx_err); | ||
1750 | dev_kfree_skb_any(skb); | ||
1751 | return -EINVAL; | ||
1752 | } | ||
1753 | |||
1754 | ATM_SKB(skb)->vcc = vcc; | ||
1755 | |||
1756 | if (vcc->qos.aal == ATM_AAL5) | ||
1757 | { | ||
1758 | buflen = (skb->len + 47 + 8) / 48 * 48; /* Multiple of 48 */ | ||
1759 | flags = NS_TBD_AAL5; | ||
1760 | scqe.word_2 = cpu_to_le32((u32) virt_to_bus(skb->data)); | ||
1761 | scqe.word_3 = cpu_to_le32((u32) skb->len); | ||
1762 | scqe.word_4 = ns_tbd_mkword_4(0, (u32) vcc->vpi, (u32) vcc->vci, 0, | ||
1763 | ATM_SKB(skb)->atm_options & ATM_ATMOPT_CLP ? 1 : 0); | ||
1764 | flags |= NS_TBD_EOPDU; | ||
1765 | } | ||
1766 | else /* (vcc->qos.aal == ATM_AAL0) */ | ||
1767 | { | ||
1768 | buflen = ATM_CELL_PAYLOAD; /* i.e., 48 bytes */ | ||
1769 | flags = NS_TBD_AAL0; | ||
1770 | scqe.word_2 = cpu_to_le32((u32) virt_to_bus(skb->data) + NS_AAL0_HEADER); | ||
1771 | scqe.word_3 = cpu_to_le32(0x00000000); | ||
1772 | if (*skb->data & 0x02) /* Payload type 1 - end of pdu */ | ||
1773 | flags |= NS_TBD_EOPDU; | ||
1774 | scqe.word_4 = cpu_to_le32(*((u32 *) skb->data) & ~NS_TBD_VC_MASK); | ||
1775 | /* Force the VPI/VCI to be the same as in VCC struct */ | ||
1776 | scqe.word_4 |= cpu_to_le32((((u32) vcc->vpi) << NS_TBD_VPI_SHIFT | | ||
1777 | ((u32) vcc->vci) << NS_TBD_VCI_SHIFT) & | ||
1778 | NS_TBD_VC_MASK); | ||
1779 | } | ||
1780 | |||
1781 | if (vcc->qos.txtp.traffic_class == ATM_CBR) | ||
1782 | { | ||
1783 | scqe.word_1 = ns_tbd_mkword_1_novbr(flags, (u32) buflen); | ||
1784 | scq = ((vc_map *) vcc->dev_data)->scq; | ||
1785 | } | ||
1786 | else | ||
1787 | { | ||
1788 | scqe.word_1 = ns_tbd_mkword_1(flags, (u32) 1, (u32) 1, (u32) buflen); | ||
1789 | scq = card->scq0; | ||
1790 | } | ||
1791 | |||
1792 | if (push_scqe(card, vc, scq, &scqe, skb) != 0) | ||
1793 | { | ||
1794 | atomic_inc(&vcc->stats->tx_err); | ||
1795 | dev_kfree_skb_any(skb); | ||
1796 | return -EIO; | ||
1797 | } | ||
1798 | atomic_inc(&vcc->stats->tx); | ||
1799 | |||
1800 | return 0; | ||
1801 | } | ||
1802 | |||
1803 | 1663 | ||
1664 | if (!vc->tx) { | ||
1665 | printk("nicstar%d: Trying to transmit on a non-tx VC.\n", | ||
1666 | card->index); | ||
1667 | atomic_inc(&vcc->stats->tx_err); | ||
1668 | dev_kfree_skb_any(skb); | ||
1669 | return -EINVAL; | ||
1670 | } | ||
1804 | 1671 | ||
1805 | static int push_scqe(ns_dev *card, vc_map *vc, scq_info *scq, ns_scqe *tbd, | 1672 | if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0) { |
1806 | struct sk_buff *skb) | 1673 | printk("nicstar%d: Only AAL0 and AAL5 are supported.\n", |
1807 | { | 1674 | card->index); |
1808 | unsigned long flags; | 1675 | atomic_inc(&vcc->stats->tx_err); |
1809 | ns_scqe tsr; | 1676 | dev_kfree_skb_any(skb); |
1810 | u32 scdi, scqi; | 1677 | return -EINVAL; |
1811 | int scq_is_vbr; | 1678 | } |
1812 | u32 data; | ||
1813 | int index; | ||
1814 | |||
1815 | spin_lock_irqsave(&scq->lock, flags); | ||
1816 | while (scq->tail == scq->next) | ||
1817 | { | ||
1818 | if (in_interrupt()) { | ||
1819 | spin_unlock_irqrestore(&scq->lock, flags); | ||
1820 | printk("nicstar%d: Error pushing TBD.\n", card->index); | ||
1821 | return 1; | ||
1822 | } | ||
1823 | |||
1824 | scq->full = 1; | ||
1825 | spin_unlock_irqrestore(&scq->lock, flags); | ||
1826 | interruptible_sleep_on_timeout(&scq->scqfull_waitq, SCQFULL_TIMEOUT); | ||
1827 | spin_lock_irqsave(&scq->lock, flags); | ||
1828 | |||
1829 | if (scq->full) { | ||
1830 | spin_unlock_irqrestore(&scq->lock, flags); | ||
1831 | printk("nicstar%d: Timeout pushing TBD.\n", card->index); | ||
1832 | return 1; | ||
1833 | } | ||
1834 | } | ||
1835 | *scq->next = *tbd; | ||
1836 | index = (int) (scq->next - scq->base); | ||
1837 | scq->skb[index] = skb; | ||
1838 | XPRINTK("nicstar%d: sending skb at 0x%x (pos %d).\n", | ||
1839 | card->index, (u32) skb, index); | ||
1840 | XPRINTK("nicstar%d: TBD written:\n0x%x\n0x%x\n0x%x\n0x%x\n at 0x%x.\n", | ||
1841 | card->index, le32_to_cpu(tbd->word_1), le32_to_cpu(tbd->word_2), | ||
1842 | le32_to_cpu(tbd->word_3), le32_to_cpu(tbd->word_4), | ||
1843 | (u32) scq->next); | ||
1844 | if (scq->next == scq->last) | ||
1845 | scq->next = scq->base; | ||
1846 | else | ||
1847 | scq->next++; | ||
1848 | |||
1849 | vc->tbd_count++; | ||
1850 | if (scq->num_entries == VBR_SCQ_NUM_ENTRIES) | ||
1851 | { | ||
1852 | scq->tbd_count++; | ||
1853 | scq_is_vbr = 1; | ||
1854 | } | ||
1855 | else | ||
1856 | scq_is_vbr = 0; | ||
1857 | |||
1858 | if (vc->tbd_count >= MAX_TBD_PER_VC || scq->tbd_count >= MAX_TBD_PER_SCQ) | ||
1859 | { | ||
1860 | int has_run = 0; | ||
1861 | |||
1862 | while (scq->tail == scq->next) | ||
1863 | { | ||
1864 | if (in_interrupt()) { | ||
1865 | data = (u32) virt_to_bus(scq->next); | ||
1866 | ns_write_sram(card, scq->scd, &data, 1); | ||
1867 | spin_unlock_irqrestore(&scq->lock, flags); | ||
1868 | printk("nicstar%d: Error pushing TSR.\n", card->index); | ||
1869 | return 0; | ||
1870 | } | ||
1871 | |||
1872 | scq->full = 1; | ||
1873 | if (has_run++) break; | ||
1874 | spin_unlock_irqrestore(&scq->lock, flags); | ||
1875 | interruptible_sleep_on_timeout(&scq->scqfull_waitq, SCQFULL_TIMEOUT); | ||
1876 | spin_lock_irqsave(&scq->lock, flags); | ||
1877 | } | ||
1878 | |||
1879 | if (!scq->full) | ||
1880 | { | ||
1881 | tsr.word_1 = ns_tsr_mkword_1(NS_TSR_INTENABLE); | ||
1882 | if (scq_is_vbr) | ||
1883 | scdi = NS_TSR_SCDISVBR; | ||
1884 | else | ||
1885 | scdi = (vc->cbr_scd - NS_FRSCD) / NS_FRSCD_SIZE; | ||
1886 | scqi = scq->next - scq->base; | ||
1887 | tsr.word_2 = ns_tsr_mkword_2(scdi, scqi); | ||
1888 | tsr.word_3 = 0x00000000; | ||
1889 | tsr.word_4 = 0x00000000; | ||
1890 | |||
1891 | *scq->next = tsr; | ||
1892 | index = (int) scqi; | ||
1893 | scq->skb[index] = NULL; | ||
1894 | XPRINTK("nicstar%d: TSR written:\n0x%x\n0x%x\n0x%x\n0x%x\n at 0x%x.\n", | ||
1895 | card->index, le32_to_cpu(tsr.word_1), le32_to_cpu(tsr.word_2), | ||
1896 | le32_to_cpu(tsr.word_3), le32_to_cpu(tsr.word_4), | ||
1897 | (u32) scq->next); | ||
1898 | if (scq->next == scq->last) | ||
1899 | scq->next = scq->base; | ||
1900 | else | ||
1901 | scq->next++; | ||
1902 | vc->tbd_count = 0; | ||
1903 | scq->tbd_count = 0; | ||
1904 | } | ||
1905 | else | ||
1906 | PRINTK("nicstar%d: Timeout pushing TSR.\n", card->index); | ||
1907 | } | ||
1908 | data = (u32) virt_to_bus(scq->next); | ||
1909 | ns_write_sram(card, scq->scd, &data, 1); | ||
1910 | |||
1911 | spin_unlock_irqrestore(&scq->lock, flags); | ||
1912 | |||
1913 | return 0; | ||
1914 | } | ||
1915 | 1679 | ||
1680 | if (skb_shinfo(skb)->nr_frags != 0) { | ||
1681 | printk("nicstar%d: No scatter-gather yet.\n", card->index); | ||
1682 | atomic_inc(&vcc->stats->tx_err); | ||
1683 | dev_kfree_skb_any(skb); | ||
1684 | return -EINVAL; | ||
1685 | } | ||
1686 | |||
1687 | ATM_SKB(skb)->vcc = vcc; | ||
1688 | |||
1689 | NS_PRV_DMA(skb) = pci_map_single(card->pcidev, skb->data, | ||
1690 | skb->len, PCI_DMA_TODEVICE); | ||
1691 | |||
1692 | if (vcc->qos.aal == ATM_AAL5) { | ||
1693 | buflen = (skb->len + 47 + 8) / 48 * 48; /* Multiple of 48 */ | ||
1694 | flags = NS_TBD_AAL5; | ||
1695 | scqe.word_2 = cpu_to_le32(NS_PRV_DMA(skb)); | ||
1696 | scqe.word_3 = cpu_to_le32(skb->len); | ||
1697 | scqe.word_4 = | ||
1698 | ns_tbd_mkword_4(0, (u32) vcc->vpi, (u32) vcc->vci, 0, | ||
1699 | ATM_SKB(skb)-> | ||
1700 | atm_options & ATM_ATMOPT_CLP ? 1 : 0); | ||
1701 | flags |= NS_TBD_EOPDU; | ||
1702 | } else { /* (vcc->qos.aal == ATM_AAL0) */ | ||
1703 | |||
1704 | buflen = ATM_CELL_PAYLOAD; /* i.e., 48 bytes */ | ||
1705 | flags = NS_TBD_AAL0; | ||
1706 | scqe.word_2 = cpu_to_le32(NS_PRV_DMA(skb) + NS_AAL0_HEADER); | ||
1707 | scqe.word_3 = cpu_to_le32(0x00000000); | ||
1708 | if (*skb->data & 0x02) /* Payload type 1 - end of pdu */ | ||
1709 | flags |= NS_TBD_EOPDU; | ||
1710 | scqe.word_4 = | ||
1711 | cpu_to_le32(*((u32 *) skb->data) & ~NS_TBD_VC_MASK); | ||
1712 | /* Force the VPI/VCI to be the same as in VCC struct */ | ||
1713 | scqe.word_4 |= | ||
1714 | cpu_to_le32((((u32) vcc-> | ||
1715 | vpi) << NS_TBD_VPI_SHIFT | ((u32) vcc-> | ||
1716 | vci) << | ||
1717 | NS_TBD_VCI_SHIFT) & NS_TBD_VC_MASK); | ||
1718 | } | ||
1719 | |||
1720 | if (vcc->qos.txtp.traffic_class == ATM_CBR) { | ||
1721 | scqe.word_1 = ns_tbd_mkword_1_novbr(flags, (u32) buflen); | ||
1722 | scq = ((vc_map *) vcc->dev_data)->scq; | ||
1723 | } else { | ||
1724 | scqe.word_1 = | ||
1725 | ns_tbd_mkword_1(flags, (u32) 1, (u32) 1, (u32) buflen); | ||
1726 | scq = card->scq0; | ||
1727 | } | ||
1728 | |||
1729 | if (push_scqe(card, vc, scq, &scqe, skb) != 0) { | ||
1730 | atomic_inc(&vcc->stats->tx_err); | ||
1731 | dev_kfree_skb_any(skb); | ||
1732 | return -EIO; | ||
1733 | } | ||
1734 | atomic_inc(&vcc->stats->tx); | ||
1916 | 1735 | ||
1736 | return 0; | ||
1737 | } | ||
1917 | 1738 | ||
1918 | static void process_tsq(ns_dev *card) | 1739 | static int push_scqe(ns_dev * card, vc_map * vc, scq_info * scq, ns_scqe * tbd, |
1740 | struct sk_buff *skb) | ||
1919 | { | 1741 | { |
1920 | u32 scdi; | 1742 | unsigned long flags; |
1921 | scq_info *scq; | 1743 | ns_scqe tsr; |
1922 | ns_tsi *previous = NULL, *one_ahead, *two_ahead; | 1744 | u32 scdi, scqi; |
1923 | int serviced_entries; /* flag indicating at least on entry was serviced */ | 1745 | int scq_is_vbr; |
1924 | 1746 | u32 data; | |
1925 | serviced_entries = 0; | 1747 | int index; |
1926 | 1748 | ||
1927 | if (card->tsq.next == card->tsq.last) | 1749 | spin_lock_irqsave(&scq->lock, flags); |
1928 | one_ahead = card->tsq.base; | 1750 | while (scq->tail == scq->next) { |
1929 | else | 1751 | if (in_interrupt()) { |
1930 | one_ahead = card->tsq.next + 1; | 1752 | spin_unlock_irqrestore(&scq->lock, flags); |
1931 | 1753 | printk("nicstar%d: Error pushing TBD.\n", card->index); | |
1932 | if (one_ahead == card->tsq.last) | 1754 | return 1; |
1933 | two_ahead = card->tsq.base; | 1755 | } |
1934 | else | 1756 | |
1935 | two_ahead = one_ahead + 1; | 1757 | scq->full = 1; |
1936 | 1758 | spin_unlock_irqrestore(&scq->lock, flags); | |
1937 | while (!ns_tsi_isempty(card->tsq.next) || !ns_tsi_isempty(one_ahead) || | 1759 | interruptible_sleep_on_timeout(&scq->scqfull_waitq, |
1938 | !ns_tsi_isempty(two_ahead)) | 1760 | SCQFULL_TIMEOUT); |
1939 | /* At most two empty, as stated in the 77201 errata */ | 1761 | spin_lock_irqsave(&scq->lock, flags); |
1940 | { | 1762 | |
1941 | serviced_entries = 1; | 1763 | if (scq->full) { |
1942 | 1764 | spin_unlock_irqrestore(&scq->lock, flags); | |
1943 | /* Skip the one or two possible empty entries */ | 1765 | printk("nicstar%d: Timeout pushing TBD.\n", |
1944 | while (ns_tsi_isempty(card->tsq.next)) { | 1766 | card->index); |
1945 | if (card->tsq.next == card->tsq.last) | 1767 | return 1; |
1946 | card->tsq.next = card->tsq.base; | 1768 | } |
1947 | else | 1769 | } |
1948 | card->tsq.next++; | 1770 | *scq->next = *tbd; |
1949 | } | 1771 | index = (int)(scq->next - scq->base); |
1950 | 1772 | scq->skb[index] = skb; | |
1951 | if (!ns_tsi_tmrof(card->tsq.next)) | 1773 | XPRINTK("nicstar%d: sending skb at 0x%p (pos %d).\n", |
1952 | { | 1774 | card->index, skb, index); |
1953 | scdi = ns_tsi_getscdindex(card->tsq.next); | 1775 | XPRINTK("nicstar%d: TBD written:\n0x%x\n0x%x\n0x%x\n0x%x\n at 0x%p.\n", |
1954 | if (scdi == NS_TSI_SCDISVBR) | 1776 | card->index, le32_to_cpu(tbd->word_1), le32_to_cpu(tbd->word_2), |
1955 | scq = card->scq0; | 1777 | le32_to_cpu(tbd->word_3), le32_to_cpu(tbd->word_4), |
1956 | else | 1778 | scq->next); |
1957 | { | 1779 | if (scq->next == scq->last) |
1958 | if (card->scd2vc[scdi] == NULL) | 1780 | scq->next = scq->base; |
1959 | { | 1781 | else |
1960 | printk("nicstar%d: could not find VC from SCD index.\n", | 1782 | scq->next++; |
1961 | card->index); | 1783 | |
1962 | ns_tsi_init(card->tsq.next); | 1784 | vc->tbd_count++; |
1963 | return; | 1785 | if (scq->num_entries == VBR_SCQ_NUM_ENTRIES) { |
1964 | } | 1786 | scq->tbd_count++; |
1965 | scq = card->scd2vc[scdi]->scq; | 1787 | scq_is_vbr = 1; |
1966 | } | 1788 | } else |
1967 | drain_scq(card, scq, ns_tsi_getscqpos(card->tsq.next)); | 1789 | scq_is_vbr = 0; |
1968 | scq->full = 0; | 1790 | |
1969 | wake_up_interruptible(&(scq->scqfull_waitq)); | 1791 | if (vc->tbd_count >= MAX_TBD_PER_VC |
1970 | } | 1792 | || scq->tbd_count >= MAX_TBD_PER_SCQ) { |
1971 | 1793 | int has_run = 0; | |
1972 | ns_tsi_init(card->tsq.next); | 1794 | |
1973 | previous = card->tsq.next; | 1795 | while (scq->tail == scq->next) { |
1974 | if (card->tsq.next == card->tsq.last) | 1796 | if (in_interrupt()) { |
1975 | card->tsq.next = card->tsq.base; | 1797 | data = scq_virt_to_bus(scq, scq->next); |
1976 | else | 1798 | ns_write_sram(card, scq->scd, &data, 1); |
1977 | card->tsq.next++; | 1799 | spin_unlock_irqrestore(&scq->lock, flags); |
1978 | 1800 | printk("nicstar%d: Error pushing TSR.\n", | |
1979 | if (card->tsq.next == card->tsq.last) | 1801 | card->index); |
1980 | one_ahead = card->tsq.base; | 1802 | return 0; |
1981 | else | 1803 | } |
1982 | one_ahead = card->tsq.next + 1; | 1804 | |
1983 | 1805 | scq->full = 1; | |
1984 | if (one_ahead == card->tsq.last) | 1806 | if (has_run++) |
1985 | two_ahead = card->tsq.base; | 1807 | break; |
1986 | else | 1808 | spin_unlock_irqrestore(&scq->lock, flags); |
1987 | two_ahead = one_ahead + 1; | 1809 | interruptible_sleep_on_timeout(&scq->scqfull_waitq, |
1988 | } | 1810 | SCQFULL_TIMEOUT); |
1989 | 1811 | spin_lock_irqsave(&scq->lock, flags); | |
1990 | if (serviced_entries) { | 1812 | } |
1991 | writel((((u32) previous) - ((u32) card->tsq.base)), | 1813 | |
1992 | card->membase + TSQH); | 1814 | if (!scq->full) { |
1993 | } | 1815 | tsr.word_1 = ns_tsr_mkword_1(NS_TSR_INTENABLE); |
1816 | if (scq_is_vbr) | ||
1817 | scdi = NS_TSR_SCDISVBR; | ||
1818 | else | ||
1819 | scdi = (vc->cbr_scd - NS_FRSCD) / NS_FRSCD_SIZE; | ||
1820 | scqi = scq->next - scq->base; | ||
1821 | tsr.word_2 = ns_tsr_mkword_2(scdi, scqi); | ||
1822 | tsr.word_3 = 0x00000000; | ||
1823 | tsr.word_4 = 0x00000000; | ||
1824 | |||
1825 | *scq->next = tsr; | ||
1826 | index = (int)scqi; | ||
1827 | scq->skb[index] = NULL; | ||
1828 | XPRINTK | ||
1829 | ("nicstar%d: TSR written:\n0x%x\n0x%x\n0x%x\n0x%x\n at 0x%p.\n", | ||
1830 | card->index, le32_to_cpu(tsr.word_1), | ||
1831 | le32_to_cpu(tsr.word_2), le32_to_cpu(tsr.word_3), | ||
1832 | le32_to_cpu(tsr.word_4), scq->next); | ||
1833 | if (scq->next == scq->last) | ||
1834 | scq->next = scq->base; | ||
1835 | else | ||
1836 | scq->next++; | ||
1837 | vc->tbd_count = 0; | ||
1838 | scq->tbd_count = 0; | ||
1839 | } else | ||
1840 | PRINTK("nicstar%d: Timeout pushing TSR.\n", | ||
1841 | card->index); | ||
1842 | } | ||
1843 | data = scq_virt_to_bus(scq, scq->next); | ||
1844 | ns_write_sram(card, scq->scd, &data, 1); | ||
1845 | |||
1846 | spin_unlock_irqrestore(&scq->lock, flags); | ||
1847 | |||
1848 | return 0; | ||
1994 | } | 1849 | } |
1995 | 1850 | ||
1996 | 1851 | static void process_tsq(ns_dev * card) | |
1997 | |||
1998 | static void drain_scq(ns_dev *card, scq_info *scq, int pos) | ||
1999 | { | 1852 | { |
2000 | struct atm_vcc *vcc; | 1853 | u32 scdi; |
2001 | struct sk_buff *skb; | 1854 | scq_info *scq; |
2002 | int i; | 1855 | ns_tsi *previous = NULL, *one_ahead, *two_ahead; |
2003 | unsigned long flags; | 1856 | int serviced_entries; /* flag indicating at least on entry was serviced */ |
2004 | 1857 | ||
2005 | XPRINTK("nicstar%d: drain_scq() called, scq at 0x%x, pos %d.\n", | 1858 | serviced_entries = 0; |
2006 | card->index, (u32) scq, pos); | 1859 | |
2007 | if (pos >= scq->num_entries) | 1860 | if (card->tsq.next == card->tsq.last) |
2008 | { | 1861 | one_ahead = card->tsq.base; |
2009 | printk("nicstar%d: Bad index on drain_scq().\n", card->index); | 1862 | else |
2010 | return; | 1863 | one_ahead = card->tsq.next + 1; |
2011 | } | 1864 | |
2012 | 1865 | if (one_ahead == card->tsq.last) | |
2013 | spin_lock_irqsave(&scq->lock, flags); | 1866 | two_ahead = card->tsq.base; |
2014 | i = (int) (scq->tail - scq->base); | 1867 | else |
2015 | if (++i == scq->num_entries) | 1868 | two_ahead = one_ahead + 1; |
2016 | i = 0; | 1869 | |
2017 | while (i != pos) | 1870 | while (!ns_tsi_isempty(card->tsq.next) || !ns_tsi_isempty(one_ahead) || |
2018 | { | 1871 | !ns_tsi_isempty(two_ahead)) |
2019 | skb = scq->skb[i]; | 1872 | /* At most two empty, as stated in the 77201 errata */ |
2020 | XPRINTK("nicstar%d: freeing skb at 0x%x (index %d).\n", | 1873 | { |
2021 | card->index, (u32) skb, i); | 1874 | serviced_entries = 1; |
2022 | if (skb != NULL) | 1875 | |
2023 | { | 1876 | /* Skip the one or two possible empty entries */ |
2024 | vcc = ATM_SKB(skb)->vcc; | 1877 | while (ns_tsi_isempty(card->tsq.next)) { |
2025 | if (vcc && vcc->pop != NULL) { | 1878 | if (card->tsq.next == card->tsq.last) |
2026 | vcc->pop(vcc, skb); | 1879 | card->tsq.next = card->tsq.base; |
2027 | } else { | 1880 | else |
2028 | dev_kfree_skb_irq(skb); | 1881 | card->tsq.next++; |
2029 | } | 1882 | } |
2030 | scq->skb[i] = NULL; | 1883 | |
2031 | } | 1884 | if (!ns_tsi_tmrof(card->tsq.next)) { |
2032 | if (++i == scq->num_entries) | 1885 | scdi = ns_tsi_getscdindex(card->tsq.next); |
2033 | i = 0; | 1886 | if (scdi == NS_TSI_SCDISVBR) |
2034 | } | 1887 | scq = card->scq0; |
2035 | scq->tail = scq->base + pos; | 1888 | else { |
2036 | spin_unlock_irqrestore(&scq->lock, flags); | 1889 | if (card->scd2vc[scdi] == NULL) { |
1890 | printk | ||
1891 | ("nicstar%d: could not find VC from SCD index.\n", | ||
1892 | card->index); | ||
1893 | ns_tsi_init(card->tsq.next); | ||
1894 | return; | ||
1895 | } | ||
1896 | scq = card->scd2vc[scdi]->scq; | ||
1897 | } | ||
1898 | drain_scq(card, scq, ns_tsi_getscqpos(card->tsq.next)); | ||
1899 | scq->full = 0; | ||
1900 | wake_up_interruptible(&(scq->scqfull_waitq)); | ||
1901 | } | ||
1902 | |||
1903 | ns_tsi_init(card->tsq.next); | ||
1904 | previous = card->tsq.next; | ||
1905 | if (card->tsq.next == card->tsq.last) | ||
1906 | card->tsq.next = card->tsq.base; | ||
1907 | else | ||
1908 | card->tsq.next++; | ||
1909 | |||
1910 | if (card->tsq.next == card->tsq.last) | ||
1911 | one_ahead = card->tsq.base; | ||
1912 | else | ||
1913 | one_ahead = card->tsq.next + 1; | ||
1914 | |||
1915 | if (one_ahead == card->tsq.last) | ||
1916 | two_ahead = card->tsq.base; | ||
1917 | else | ||
1918 | two_ahead = one_ahead + 1; | ||
1919 | } | ||
1920 | |||
1921 | if (serviced_entries) | ||
1922 | writel(PTR_DIFF(previous, card->tsq.base), | ||
1923 | card->membase + TSQH); | ||
2037 | } | 1924 | } |
2038 | 1925 | ||
1926 | static void drain_scq(ns_dev * card, scq_info * scq, int pos) | ||
1927 | { | ||
1928 | struct atm_vcc *vcc; | ||
1929 | struct sk_buff *skb; | ||
1930 | int i; | ||
1931 | unsigned long flags; | ||
1932 | |||
1933 | XPRINTK("nicstar%d: drain_scq() called, scq at 0x%p, pos %d.\n", | ||
1934 | card->index, scq, pos); | ||
1935 | if (pos >= scq->num_entries) { | ||
1936 | printk("nicstar%d: Bad index on drain_scq().\n", card->index); | ||
1937 | return; | ||
1938 | } | ||
1939 | |||
1940 | spin_lock_irqsave(&scq->lock, flags); | ||
1941 | i = (int)(scq->tail - scq->base); | ||
1942 | if (++i == scq->num_entries) | ||
1943 | i = 0; | ||
1944 | while (i != pos) { | ||
1945 | skb = scq->skb[i]; | ||
1946 | XPRINTK("nicstar%d: freeing skb at 0x%p (index %d).\n", | ||
1947 | card->index, skb, i); | ||
1948 | if (skb != NULL) { | ||
1949 | pci_unmap_single(card->pcidev, | ||
1950 | NS_PRV_DMA(skb), | ||
1951 | skb->len, | ||
1952 | PCI_DMA_TODEVICE); | ||
1953 | vcc = ATM_SKB(skb)->vcc; | ||
1954 | if (vcc && vcc->pop != NULL) { | ||
1955 | vcc->pop(vcc, skb); | ||
1956 | } else { | ||
1957 | dev_kfree_skb_irq(skb); | ||
1958 | } | ||
1959 | scq->skb[i] = NULL; | ||
1960 | } | ||
1961 | if (++i == scq->num_entries) | ||
1962 | i = 0; | ||
1963 | } | ||
1964 | scq->tail = scq->base + pos; | ||
1965 | spin_unlock_irqrestore(&scq->lock, flags); | ||
1966 | } | ||
2039 | 1967 | ||
2040 | 1968 | static void process_rsq(ns_dev * card) | |
2041 | static void process_rsq(ns_dev *card) | ||
2042 | { | 1969 | { |
2043 | ns_rsqe *previous; | 1970 | ns_rsqe *previous; |
2044 | 1971 | ||
2045 | if (!ns_rsqe_valid(card->rsq.next)) | 1972 | if (!ns_rsqe_valid(card->rsq.next)) |
2046 | return; | 1973 | return; |
2047 | do { | 1974 | do { |
2048 | dequeue_rx(card, card->rsq.next); | 1975 | dequeue_rx(card, card->rsq.next); |
2049 | ns_rsqe_init(card->rsq.next); | 1976 | ns_rsqe_init(card->rsq.next); |
2050 | previous = card->rsq.next; | 1977 | previous = card->rsq.next; |
2051 | if (card->rsq.next == card->rsq.last) | 1978 | if (card->rsq.next == card->rsq.last) |
2052 | card->rsq.next = card->rsq.base; | 1979 | card->rsq.next = card->rsq.base; |
2053 | else | 1980 | else |
2054 | card->rsq.next++; | 1981 | card->rsq.next++; |
2055 | } while (ns_rsqe_valid(card->rsq.next)); | 1982 | } while (ns_rsqe_valid(card->rsq.next)); |
2056 | writel((((u32) previous) - ((u32) card->rsq.base)), | 1983 | writel(PTR_DIFF(previous, card->rsq.base), card->membase + RSQH); |
2057 | card->membase + RSQH); | ||
2058 | } | 1984 | } |
2059 | 1985 | ||
1986 | static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe) | ||
1987 | { | ||
1988 | u32 vpi, vci; | ||
1989 | vc_map *vc; | ||
1990 | struct sk_buff *iovb; | ||
1991 | struct iovec *iov; | ||
1992 | struct atm_vcc *vcc; | ||
1993 | struct sk_buff *skb; | ||
1994 | unsigned short aal5_len; | ||
1995 | int len; | ||
1996 | u32 stat; | ||
1997 | u32 id; | ||
1998 | |||
1999 | stat = readl(card->membase + STAT); | ||
2000 | card->sbfqc = ns_stat_sfbqc_get(stat); | ||
2001 | card->lbfqc = ns_stat_lfbqc_get(stat); | ||
2002 | |||
2003 | id = le32_to_cpu(rsqe->buffer_handle); | ||
2004 | skb = idr_find(&card->idr, id); | ||
2005 | if (!skb) { | ||
2006 | RXPRINTK(KERN_ERR | ||
2007 | "nicstar%d: idr_find() failed!\n", card->index); | ||
2008 | return; | ||
2009 | } | ||
2010 | idr_remove(&card->idr, id); | ||
2011 | pci_dma_sync_single_for_cpu(card->pcidev, | ||
2012 | NS_PRV_DMA(skb), | ||
2013 | (NS_PRV_BUFTYPE(skb) == BUF_SM | ||
2014 | ? NS_SMSKBSIZE : NS_LGSKBSIZE), | ||
2015 | PCI_DMA_FROMDEVICE); | ||
2016 | pci_unmap_single(card->pcidev, | ||
2017 | NS_PRV_DMA(skb), | ||
2018 | (NS_PRV_BUFTYPE(skb) == BUF_SM | ||
2019 | ? NS_SMSKBSIZE : NS_LGSKBSIZE), | ||
2020 | PCI_DMA_FROMDEVICE); | ||
2021 | vpi = ns_rsqe_vpi(rsqe); | ||
2022 | vci = ns_rsqe_vci(rsqe); | ||
2023 | if (vpi >= 1UL << card->vpibits || vci >= 1UL << card->vcibits) { | ||
2024 | printk("nicstar%d: SDU received for out-of-range vc %d.%d.\n", | ||
2025 | card->index, vpi, vci); | ||
2026 | recycle_rx_buf(card, skb); | ||
2027 | return; | ||
2028 | } | ||
2029 | |||
2030 | vc = &(card->vcmap[vpi << card->vcibits | vci]); | ||
2031 | if (!vc->rx) { | ||
2032 | RXPRINTK("nicstar%d: SDU received on non-rx vc %d.%d.\n", | ||
2033 | card->index, vpi, vci); | ||
2034 | recycle_rx_buf(card, skb); | ||
2035 | return; | ||
2036 | } | ||
2037 | |||
2038 | vcc = vc->rx_vcc; | ||
2039 | |||
2040 | if (vcc->qos.aal == ATM_AAL0) { | ||
2041 | struct sk_buff *sb; | ||
2042 | unsigned char *cell; | ||
2043 | int i; | ||
2044 | |||
2045 | cell = skb->data; | ||
2046 | for (i = ns_rsqe_cellcount(rsqe); i; i--) { | ||
2047 | if ((sb = dev_alloc_skb(NS_SMSKBSIZE)) == NULL) { | ||
2048 | printk | ||
2049 | ("nicstar%d: Can't allocate buffers for aal0.\n", | ||
2050 | card->index); | ||
2051 | atomic_add(i, &vcc->stats->rx_drop); | ||
2052 | break; | ||
2053 | } | ||
2054 | if (!atm_charge(vcc, sb->truesize)) { | ||
2055 | RXPRINTK | ||
2056 | ("nicstar%d: atm_charge() dropped aal0 packets.\n", | ||
2057 | card->index); | ||
2058 | atomic_add(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */ | ||
2059 | dev_kfree_skb_any(sb); | ||
2060 | break; | ||
2061 | } | ||
2062 | /* Rebuild the header */ | ||
2063 | *((u32 *) sb->data) = le32_to_cpu(rsqe->word_1) << 4 | | ||
2064 | (ns_rsqe_clp(rsqe) ? 0x00000001 : 0x00000000); | ||
2065 | if (i == 1 && ns_rsqe_eopdu(rsqe)) | ||
2066 | *((u32 *) sb->data) |= 0x00000002; | ||
2067 | skb_put(sb, NS_AAL0_HEADER); | ||
2068 | memcpy(skb_tail_pointer(sb), cell, ATM_CELL_PAYLOAD); | ||
2069 | skb_put(sb, ATM_CELL_PAYLOAD); | ||
2070 | ATM_SKB(sb)->vcc = vcc; | ||
2071 | __net_timestamp(sb); | ||
2072 | vcc->push(vcc, sb); | ||
2073 | atomic_inc(&vcc->stats->rx); | ||
2074 | cell += ATM_CELL_PAYLOAD; | ||
2075 | } | ||
2076 | |||
2077 | recycle_rx_buf(card, skb); | ||
2078 | return; | ||
2079 | } | ||
2080 | |||
2081 | /* To reach this point, the AAL layer can only be AAL5 */ | ||
2082 | |||
2083 | if ((iovb = vc->rx_iov) == NULL) { | ||
2084 | iovb = skb_dequeue(&(card->iovpool.queue)); | ||
2085 | if (iovb == NULL) { /* No buffers in the queue */ | ||
2086 | iovb = alloc_skb(NS_IOVBUFSIZE, GFP_ATOMIC); | ||
2087 | if (iovb == NULL) { | ||
2088 | printk("nicstar%d: Out of iovec buffers.\n", | ||
2089 | card->index); | ||
2090 | atomic_inc(&vcc->stats->rx_drop); | ||
2091 | recycle_rx_buf(card, skb); | ||
2092 | return; | ||
2093 | } | ||
2094 | NS_PRV_BUFTYPE(iovb) = BUF_NONE; | ||
2095 | } else if (--card->iovpool.count < card->iovnr.min) { | ||
2096 | struct sk_buff *new_iovb; | ||
2097 | if ((new_iovb = | ||
2098 | alloc_skb(NS_IOVBUFSIZE, GFP_ATOMIC)) != NULL) { | ||
2099 | NS_PRV_BUFTYPE(iovb) = BUF_NONE; | ||
2100 | skb_queue_tail(&card->iovpool.queue, new_iovb); | ||
2101 | card->iovpool.count++; | ||
2102 | } | ||
2103 | } | ||
2104 | vc->rx_iov = iovb; | ||
2105 | NS_PRV_IOVCNT(iovb) = 0; | ||
2106 | iovb->len = 0; | ||
2107 | iovb->data = iovb->head; | ||
2108 | skb_reset_tail_pointer(iovb); | ||
2109 | /* IMPORTANT: a pointer to the sk_buff containing the small or large | ||
2110 | buffer is stored as iovec base, NOT a pointer to the | ||
2111 | small or large buffer itself. */ | ||
2112 | } else if (NS_PRV_IOVCNT(iovb) >= NS_MAX_IOVECS) { | ||
2113 | printk("nicstar%d: received too big AAL5 SDU.\n", card->index); | ||
2114 | atomic_inc(&vcc->stats->rx_err); | ||
2115 | recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data, | ||
2116 | NS_MAX_IOVECS); | ||
2117 | NS_PRV_IOVCNT(iovb) = 0; | ||
2118 | iovb->len = 0; | ||
2119 | iovb->data = iovb->head; | ||
2120 | skb_reset_tail_pointer(iovb); | ||
2121 | } | ||
2122 | iov = &((struct iovec *)iovb->data)[NS_PRV_IOVCNT(iovb)++]; | ||
2123 | iov->iov_base = (void *)skb; | ||
2124 | iov->iov_len = ns_rsqe_cellcount(rsqe) * 48; | ||
2125 | iovb->len += iov->iov_len; | ||
2060 | 2126 | ||
2127 | #ifdef EXTRA_DEBUG | ||
2128 | if (NS_PRV_IOVCNT(iovb) == 1) { | ||
2129 | if (NS_PRV_BUFTYPE(skb) != BUF_SM) { | ||
2130 | printk | ||
2131 | ("nicstar%d: Expected a small buffer, and this is not one.\n", | ||
2132 | card->index); | ||
2133 | which_list(card, skb); | ||
2134 | atomic_inc(&vcc->stats->rx_err); | ||
2135 | recycle_rx_buf(card, skb); | ||
2136 | vc->rx_iov = NULL; | ||
2137 | recycle_iov_buf(card, iovb); | ||
2138 | return; | ||
2139 | } | ||
2140 | } else { /* NS_PRV_IOVCNT(iovb) >= 2 */ | ||
2141 | |||
2142 | if (NS_PRV_BUFTYPE(skb) != BUF_LG) { | ||
2143 | printk | ||
2144 | ("nicstar%d: Expected a large buffer, and this is not one.\n", | ||
2145 | card->index); | ||
2146 | which_list(card, skb); | ||
2147 | atomic_inc(&vcc->stats->rx_err); | ||
2148 | recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data, | ||
2149 | NS_PRV_IOVCNT(iovb)); | ||
2150 | vc->rx_iov = NULL; | ||
2151 | recycle_iov_buf(card, iovb); | ||
2152 | return; | ||
2153 | } | ||
2154 | } | ||
2155 | #endif /* EXTRA_DEBUG */ | ||
2061 | 2156 | ||
2062 | static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe) | 2157 | if (ns_rsqe_eopdu(rsqe)) { |
2063 | { | 2158 | /* This works correctly regardless of the endianness of the host */ |
2064 | u32 vpi, vci; | 2159 | unsigned char *L1L2 = (unsigned char *) |
2065 | vc_map *vc; | 2160 | (skb->data + iov->iov_len - 6); |
2066 | struct sk_buff *iovb; | 2161 | aal5_len = L1L2[0] << 8 | L1L2[1]; |
2067 | struct iovec *iov; | 2162 | len = (aal5_len == 0x0000) ? 0x10000 : aal5_len; |
2068 | struct atm_vcc *vcc; | 2163 | if (ns_rsqe_crcerr(rsqe) || |
2069 | struct sk_buff *skb; | 2164 | len + 8 > iovb->len || len + (47 + 8) < iovb->len) { |
2070 | unsigned short aal5_len; | 2165 | printk("nicstar%d: AAL5 CRC error", card->index); |
2071 | int len; | 2166 | if (len + 8 > iovb->len || len + (47 + 8) < iovb->len) |
2072 | u32 stat; | 2167 | printk(" - PDU size mismatch.\n"); |
2073 | 2168 | else | |
2074 | stat = readl(card->membase + STAT); | 2169 | printk(".\n"); |
2075 | card->sbfqc = ns_stat_sfbqc_get(stat); | 2170 | atomic_inc(&vcc->stats->rx_err); |
2076 | card->lbfqc = ns_stat_lfbqc_get(stat); | 2171 | recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data, |
2077 | 2172 | NS_PRV_IOVCNT(iovb)); | |
2078 | skb = (struct sk_buff *) le32_to_cpu(rsqe->buffer_handle); | 2173 | vc->rx_iov = NULL; |
2079 | vpi = ns_rsqe_vpi(rsqe); | 2174 | recycle_iov_buf(card, iovb); |
2080 | vci = ns_rsqe_vci(rsqe); | 2175 | return; |
2081 | if (vpi >= 1UL << card->vpibits || vci >= 1UL << card->vcibits) | 2176 | } |
2082 | { | 2177 | |
2083 | printk("nicstar%d: SDU received for out-of-range vc %d.%d.\n", | 2178 | /* By this point we (hopefully) have a complete SDU without errors. */ |
2084 | card->index, vpi, vci); | 2179 | |
2085 | recycle_rx_buf(card, skb); | 2180 | if (NS_PRV_IOVCNT(iovb) == 1) { /* Just a small buffer */ |
2086 | return; | 2181 | /* skb points to a small buffer */ |
2087 | } | 2182 | if (!atm_charge(vcc, skb->truesize)) { |
2088 | 2183 | push_rxbufs(card, skb); | |
2089 | vc = &(card->vcmap[vpi << card->vcibits | vci]); | 2184 | atomic_inc(&vcc->stats->rx_drop); |
2090 | if (!vc->rx) | 2185 | } else { |
2091 | { | 2186 | skb_put(skb, len); |
2092 | RXPRINTK("nicstar%d: SDU received on non-rx vc %d.%d.\n", | 2187 | dequeue_sm_buf(card, skb); |
2093 | card->index, vpi, vci); | ||
2094 | recycle_rx_buf(card, skb); | ||
2095 | return; | ||
2096 | } | ||
2097 | |||
2098 | vcc = vc->rx_vcc; | ||
2099 | |||
2100 | if (vcc->qos.aal == ATM_AAL0) | ||
2101 | { | ||
2102 | struct sk_buff *sb; | ||
2103 | unsigned char *cell; | ||
2104 | int i; | ||
2105 | |||
2106 | cell = skb->data; | ||
2107 | for (i = ns_rsqe_cellcount(rsqe); i; i--) | ||
2108 | { | ||
2109 | if ((sb = dev_alloc_skb(NS_SMSKBSIZE)) == NULL) | ||
2110 | { | ||
2111 | printk("nicstar%d: Can't allocate buffers for aal0.\n", | ||
2112 | card->index); | ||
2113 | atomic_add(i,&vcc->stats->rx_drop); | ||
2114 | break; | ||
2115 | } | ||
2116 | if (!atm_charge(vcc, sb->truesize)) | ||
2117 | { | ||
2118 | RXPRINTK("nicstar%d: atm_charge() dropped aal0 packets.\n", | ||
2119 | card->index); | ||
2120 | atomic_add(i-1,&vcc->stats->rx_drop); /* already increased by 1 */ | ||
2121 | dev_kfree_skb_any(sb); | ||
2122 | break; | ||
2123 | } | ||
2124 | /* Rebuild the header */ | ||
2125 | *((u32 *) sb->data) = le32_to_cpu(rsqe->word_1) << 4 | | ||
2126 | (ns_rsqe_clp(rsqe) ? 0x00000001 : 0x00000000); | ||
2127 | if (i == 1 && ns_rsqe_eopdu(rsqe)) | ||
2128 | *((u32 *) sb->data) |= 0x00000002; | ||
2129 | skb_put(sb, NS_AAL0_HEADER); | ||
2130 | memcpy(skb_tail_pointer(sb), cell, ATM_CELL_PAYLOAD); | ||
2131 | skb_put(sb, ATM_CELL_PAYLOAD); | ||
2132 | ATM_SKB(sb)->vcc = vcc; | ||
2133 | __net_timestamp(sb); | ||
2134 | vcc->push(vcc, sb); | ||
2135 | atomic_inc(&vcc->stats->rx); | ||
2136 | cell += ATM_CELL_PAYLOAD; | ||
2137 | } | ||
2138 | |||
2139 | recycle_rx_buf(card, skb); | ||
2140 | return; | ||
2141 | } | ||
2142 | |||
2143 | /* To reach this point, the AAL layer can only be AAL5 */ | ||
2144 | |||
2145 | if ((iovb = vc->rx_iov) == NULL) | ||
2146 | { | ||
2147 | iovb = skb_dequeue(&(card->iovpool.queue)); | ||
2148 | if (iovb == NULL) /* No buffers in the queue */ | ||
2149 | { | ||
2150 | iovb = alloc_skb(NS_IOVBUFSIZE, GFP_ATOMIC); | ||
2151 | if (iovb == NULL) | ||
2152 | { | ||
2153 | printk("nicstar%d: Out of iovec buffers.\n", card->index); | ||
2154 | atomic_inc(&vcc->stats->rx_drop); | ||
2155 | recycle_rx_buf(card, skb); | ||
2156 | return; | ||
2157 | } | ||
2158 | NS_SKB_CB(iovb)->buf_type = BUF_NONE; | ||
2159 | } | ||
2160 | else | ||
2161 | if (--card->iovpool.count < card->iovnr.min) | ||
2162 | { | ||
2163 | struct sk_buff *new_iovb; | ||
2164 | if ((new_iovb = alloc_skb(NS_IOVBUFSIZE, GFP_ATOMIC)) != NULL) | ||
2165 | { | ||
2166 | NS_SKB_CB(iovb)->buf_type = BUF_NONE; | ||
2167 | skb_queue_tail(&card->iovpool.queue, new_iovb); | ||
2168 | card->iovpool.count++; | ||
2169 | } | ||
2170 | } | ||
2171 | vc->rx_iov = iovb; | ||
2172 | NS_SKB(iovb)->iovcnt = 0; | ||
2173 | iovb->len = 0; | ||
2174 | iovb->data = iovb->head; | ||
2175 | skb_reset_tail_pointer(iovb); | ||
2176 | NS_SKB(iovb)->vcc = vcc; | ||
2177 | /* IMPORTANT: a pointer to the sk_buff containing the small or large | ||
2178 | buffer is stored as iovec base, NOT a pointer to the | ||
2179 | small or large buffer itself. */ | ||
2180 | } | ||
2181 | else if (NS_SKB(iovb)->iovcnt >= NS_MAX_IOVECS) | ||
2182 | { | ||
2183 | printk("nicstar%d: received too big AAL5 SDU.\n", card->index); | ||
2184 | atomic_inc(&vcc->stats->rx_err); | ||
2185 | recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data, NS_MAX_IOVECS); | ||
2186 | NS_SKB(iovb)->iovcnt = 0; | ||
2187 | iovb->len = 0; | ||
2188 | iovb->data = iovb->head; | ||
2189 | skb_reset_tail_pointer(iovb); | ||
2190 | NS_SKB(iovb)->vcc = vcc; | ||
2191 | } | ||
2192 | iov = &((struct iovec *) iovb->data)[NS_SKB(iovb)->iovcnt++]; | ||
2193 | iov->iov_base = (void *) skb; | ||
2194 | iov->iov_len = ns_rsqe_cellcount(rsqe) * 48; | ||
2195 | iovb->len += iov->iov_len; | ||
2196 | |||
2197 | if (NS_SKB(iovb)->iovcnt == 1) | ||
2198 | { | ||
2199 | if (NS_SKB_CB(skb)->buf_type != BUF_SM) | ||
2200 | { | ||
2201 | printk("nicstar%d: Expected a small buffer, and this is not one.\n", | ||
2202 | card->index); | ||
2203 | which_list(card, skb); | ||
2204 | atomic_inc(&vcc->stats->rx_err); | ||
2205 | recycle_rx_buf(card, skb); | ||
2206 | vc->rx_iov = NULL; | ||
2207 | recycle_iov_buf(card, iovb); | ||
2208 | return; | ||
2209 | } | ||
2210 | } | ||
2211 | else /* NS_SKB(iovb)->iovcnt >= 2 */ | ||
2212 | { | ||
2213 | if (NS_SKB_CB(skb)->buf_type != BUF_LG) | ||
2214 | { | ||
2215 | printk("nicstar%d: Expected a large buffer, and this is not one.\n", | ||
2216 | card->index); | ||
2217 | which_list(card, skb); | ||
2218 | atomic_inc(&vcc->stats->rx_err); | ||
2219 | recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data, | ||
2220 | NS_SKB(iovb)->iovcnt); | ||
2221 | vc->rx_iov = NULL; | ||
2222 | recycle_iov_buf(card, iovb); | ||
2223 | return; | ||
2224 | } | ||
2225 | } | ||
2226 | |||
2227 | if (ns_rsqe_eopdu(rsqe)) | ||
2228 | { | ||
2229 | /* This works correctly regardless of the endianness of the host */ | ||
2230 | unsigned char *L1L2 = (unsigned char *)((u32)skb->data + | ||
2231 | iov->iov_len - 6); | ||
2232 | aal5_len = L1L2[0] << 8 | L1L2[1]; | ||
2233 | len = (aal5_len == 0x0000) ? 0x10000 : aal5_len; | ||
2234 | if (ns_rsqe_crcerr(rsqe) || | ||
2235 | len + 8 > iovb->len || len + (47 + 8) < iovb->len) | ||
2236 | { | ||
2237 | printk("nicstar%d: AAL5 CRC error", card->index); | ||
2238 | if (len + 8 > iovb->len || len + (47 + 8) < iovb->len) | ||
2239 | printk(" - PDU size mismatch.\n"); | ||
2240 | else | ||
2241 | printk(".\n"); | ||
2242 | atomic_inc(&vcc->stats->rx_err); | ||
2243 | recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data, | ||
2244 | NS_SKB(iovb)->iovcnt); | ||
2245 | vc->rx_iov = NULL; | ||
2246 | recycle_iov_buf(card, iovb); | ||
2247 | return; | ||
2248 | } | ||
2249 | |||
2250 | /* By this point we (hopefully) have a complete SDU without errors. */ | ||
2251 | |||
2252 | if (NS_SKB(iovb)->iovcnt == 1) /* Just a small buffer */ | ||
2253 | { | ||
2254 | /* skb points to a small buffer */ | ||
2255 | if (!atm_charge(vcc, skb->truesize)) | ||
2256 | { | ||
2257 | push_rxbufs(card, skb); | ||
2258 | atomic_inc(&vcc->stats->rx_drop); | ||
2259 | } | ||
2260 | else | ||
2261 | { | ||
2262 | skb_put(skb, len); | ||
2263 | dequeue_sm_buf(card, skb); | ||
2264 | #ifdef NS_USE_DESTRUCTORS | 2188 | #ifdef NS_USE_DESTRUCTORS |
2265 | skb->destructor = ns_sb_destructor; | 2189 | skb->destructor = ns_sb_destructor; |
2266 | #endif /* NS_USE_DESTRUCTORS */ | 2190 | #endif /* NS_USE_DESTRUCTORS */ |
2267 | ATM_SKB(skb)->vcc = vcc; | 2191 | ATM_SKB(skb)->vcc = vcc; |
2268 | __net_timestamp(skb); | 2192 | __net_timestamp(skb); |
2269 | vcc->push(vcc, skb); | 2193 | vcc->push(vcc, skb); |
2270 | atomic_inc(&vcc->stats->rx); | 2194 | atomic_inc(&vcc->stats->rx); |
2271 | } | 2195 | } |
2272 | } | 2196 | } else if (NS_PRV_IOVCNT(iovb) == 2) { /* One small plus one large buffer */ |
2273 | else if (NS_SKB(iovb)->iovcnt == 2) /* One small plus one large buffer */ | 2197 | struct sk_buff *sb; |
2274 | { | 2198 | |
2275 | struct sk_buff *sb; | 2199 | sb = (struct sk_buff *)(iov - 1)->iov_base; |
2276 | 2200 | /* skb points to a large buffer */ | |
2277 | sb = (struct sk_buff *) (iov - 1)->iov_base; | 2201 | |
2278 | /* skb points to a large buffer */ | 2202 | if (len <= NS_SMBUFSIZE) { |
2279 | 2203 | if (!atm_charge(vcc, sb->truesize)) { | |
2280 | if (len <= NS_SMBUFSIZE) | 2204 | push_rxbufs(card, sb); |
2281 | { | 2205 | atomic_inc(&vcc->stats->rx_drop); |
2282 | if (!atm_charge(vcc, sb->truesize)) | 2206 | } else { |
2283 | { | 2207 | skb_put(sb, len); |
2284 | push_rxbufs(card, sb); | 2208 | dequeue_sm_buf(card, sb); |
2285 | atomic_inc(&vcc->stats->rx_drop); | ||
2286 | } | ||
2287 | else | ||
2288 | { | ||
2289 | skb_put(sb, len); | ||
2290 | dequeue_sm_buf(card, sb); | ||
2291 | #ifdef NS_USE_DESTRUCTORS | 2209 | #ifdef NS_USE_DESTRUCTORS |
2292 | sb->destructor = ns_sb_destructor; | 2210 | sb->destructor = ns_sb_destructor; |
2293 | #endif /* NS_USE_DESTRUCTORS */ | 2211 | #endif /* NS_USE_DESTRUCTORS */ |
2294 | ATM_SKB(sb)->vcc = vcc; | 2212 | ATM_SKB(sb)->vcc = vcc; |
2295 | __net_timestamp(sb); | 2213 | __net_timestamp(sb); |
2296 | vcc->push(vcc, sb); | 2214 | vcc->push(vcc, sb); |
2297 | atomic_inc(&vcc->stats->rx); | 2215 | atomic_inc(&vcc->stats->rx); |
2298 | } | 2216 | } |
2299 | 2217 | ||
2300 | push_rxbufs(card, skb); | 2218 | push_rxbufs(card, skb); |
2301 | 2219 | ||
2302 | } | 2220 | } else { /* len > NS_SMBUFSIZE, the usual case */ |
2303 | else /* len > NS_SMBUFSIZE, the usual case */ | 2221 | |
2304 | { | 2222 | if (!atm_charge(vcc, skb->truesize)) { |
2305 | if (!atm_charge(vcc, skb->truesize)) | 2223 | push_rxbufs(card, skb); |
2306 | { | 2224 | atomic_inc(&vcc->stats->rx_drop); |
2307 | push_rxbufs(card, skb); | 2225 | } else { |
2308 | atomic_inc(&vcc->stats->rx_drop); | 2226 | dequeue_lg_buf(card, skb); |
2309 | } | ||
2310 | else | ||
2311 | { | ||
2312 | dequeue_lg_buf(card, skb); | ||
2313 | #ifdef NS_USE_DESTRUCTORS | 2227 | #ifdef NS_USE_DESTRUCTORS |
2314 | skb->destructor = ns_lb_destructor; | 2228 | skb->destructor = ns_lb_destructor; |
2315 | #endif /* NS_USE_DESTRUCTORS */ | 2229 | #endif /* NS_USE_DESTRUCTORS */ |
2316 | skb_push(skb, NS_SMBUFSIZE); | 2230 | skb_push(skb, NS_SMBUFSIZE); |
2317 | skb_copy_from_linear_data(sb, skb->data, NS_SMBUFSIZE); | 2231 | skb_copy_from_linear_data(sb, skb->data, |
2318 | skb_put(skb, len - NS_SMBUFSIZE); | 2232 | NS_SMBUFSIZE); |
2319 | ATM_SKB(skb)->vcc = vcc; | 2233 | skb_put(skb, len - NS_SMBUFSIZE); |
2320 | __net_timestamp(skb); | 2234 | ATM_SKB(skb)->vcc = vcc; |
2321 | vcc->push(vcc, skb); | 2235 | __net_timestamp(skb); |
2322 | atomic_inc(&vcc->stats->rx); | 2236 | vcc->push(vcc, skb); |
2323 | } | 2237 | atomic_inc(&vcc->stats->rx); |
2324 | 2238 | } | |
2325 | push_rxbufs(card, sb); | 2239 | |
2326 | 2240 | push_rxbufs(card, sb); | |
2327 | } | 2241 | |
2328 | 2242 | } | |
2329 | } | 2243 | |
2330 | else /* Must push a huge buffer */ | 2244 | } else { /* Must push a huge buffer */ |
2331 | { | 2245 | |
2332 | struct sk_buff *hb, *sb, *lb; | 2246 | struct sk_buff *hb, *sb, *lb; |
2333 | int remaining, tocopy; | 2247 | int remaining, tocopy; |
2334 | int j; | 2248 | int j; |
2335 | 2249 | ||
2336 | hb = skb_dequeue(&(card->hbpool.queue)); | 2250 | hb = skb_dequeue(&(card->hbpool.queue)); |
2337 | if (hb == NULL) /* No buffers in the queue */ | 2251 | if (hb == NULL) { /* No buffers in the queue */ |
2338 | { | 2252 | |
2339 | 2253 | hb = dev_alloc_skb(NS_HBUFSIZE); | |
2340 | hb = dev_alloc_skb(NS_HBUFSIZE); | 2254 | if (hb == NULL) { |
2341 | if (hb == NULL) | 2255 | printk |
2342 | { | 2256 | ("nicstar%d: Out of huge buffers.\n", |
2343 | printk("nicstar%d: Out of huge buffers.\n", card->index); | 2257 | card->index); |
2344 | atomic_inc(&vcc->stats->rx_drop); | 2258 | atomic_inc(&vcc->stats->rx_drop); |
2345 | recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data, | 2259 | recycle_iovec_rx_bufs(card, |
2346 | NS_SKB(iovb)->iovcnt); | 2260 | (struct iovec *) |
2347 | vc->rx_iov = NULL; | 2261 | iovb->data, |
2348 | recycle_iov_buf(card, iovb); | 2262 | NS_PRV_IOVCNT(iovb)); |
2349 | return; | 2263 | vc->rx_iov = NULL; |
2350 | } | 2264 | recycle_iov_buf(card, iovb); |
2351 | else if (card->hbpool.count < card->hbnr.min) | 2265 | return; |
2352 | { | 2266 | } else if (card->hbpool.count < card->hbnr.min) { |
2353 | struct sk_buff *new_hb; | 2267 | struct sk_buff *new_hb; |
2354 | if ((new_hb = dev_alloc_skb(NS_HBUFSIZE)) != NULL) | 2268 | if ((new_hb = |
2355 | { | 2269 | dev_alloc_skb(NS_HBUFSIZE)) != |
2356 | skb_queue_tail(&card->hbpool.queue, new_hb); | 2270 | NULL) { |
2357 | card->hbpool.count++; | 2271 | skb_queue_tail(&card->hbpool. |
2358 | } | 2272 | queue, new_hb); |
2359 | } | 2273 | card->hbpool.count++; |
2360 | NS_SKB_CB(hb)->buf_type = BUF_NONE; | 2274 | } |
2361 | } | 2275 | } |
2362 | else | 2276 | NS_PRV_BUFTYPE(hb) = BUF_NONE; |
2363 | if (--card->hbpool.count < card->hbnr.min) | 2277 | } else if (--card->hbpool.count < card->hbnr.min) { |
2364 | { | 2278 | struct sk_buff *new_hb; |
2365 | struct sk_buff *new_hb; | 2279 | if ((new_hb = |
2366 | if ((new_hb = dev_alloc_skb(NS_HBUFSIZE)) != NULL) | 2280 | dev_alloc_skb(NS_HBUFSIZE)) != NULL) { |
2367 | { | 2281 | NS_PRV_BUFTYPE(new_hb) = BUF_NONE; |
2368 | NS_SKB_CB(new_hb)->buf_type = BUF_NONE; | 2282 | skb_queue_tail(&card->hbpool.queue, |
2369 | skb_queue_tail(&card->hbpool.queue, new_hb); | 2283 | new_hb); |
2370 | card->hbpool.count++; | 2284 | card->hbpool.count++; |
2371 | } | 2285 | } |
2372 | if (card->hbpool.count < card->hbnr.min) | 2286 | if (card->hbpool.count < card->hbnr.min) { |
2373 | { | 2287 | if ((new_hb = |
2374 | if ((new_hb = dev_alloc_skb(NS_HBUFSIZE)) != NULL) | 2288 | dev_alloc_skb(NS_HBUFSIZE)) != |
2375 | { | 2289 | NULL) { |
2376 | NS_SKB_CB(new_hb)->buf_type = BUF_NONE; | 2290 | NS_PRV_BUFTYPE(new_hb) = |
2377 | skb_queue_tail(&card->hbpool.queue, new_hb); | 2291 | BUF_NONE; |
2378 | card->hbpool.count++; | 2292 | skb_queue_tail(&card->hbpool. |
2379 | } | 2293 | queue, new_hb); |
2380 | } | 2294 | card->hbpool.count++; |
2381 | } | 2295 | } |
2382 | 2296 | } | |
2383 | iov = (struct iovec *) iovb->data; | 2297 | } |
2384 | 2298 | ||
2385 | if (!atm_charge(vcc, hb->truesize)) | 2299 | iov = (struct iovec *)iovb->data; |
2386 | { | 2300 | |
2387 | recycle_iovec_rx_bufs(card, iov, NS_SKB(iovb)->iovcnt); | 2301 | if (!atm_charge(vcc, hb->truesize)) { |
2388 | if (card->hbpool.count < card->hbnr.max) | 2302 | recycle_iovec_rx_bufs(card, iov, |
2389 | { | 2303 | NS_PRV_IOVCNT(iovb)); |
2390 | skb_queue_tail(&card->hbpool.queue, hb); | 2304 | if (card->hbpool.count < card->hbnr.max) { |
2391 | card->hbpool.count++; | 2305 | skb_queue_tail(&card->hbpool.queue, hb); |
2392 | } | 2306 | card->hbpool.count++; |
2393 | else | 2307 | } else |
2394 | dev_kfree_skb_any(hb); | 2308 | dev_kfree_skb_any(hb); |
2395 | atomic_inc(&vcc->stats->rx_drop); | 2309 | atomic_inc(&vcc->stats->rx_drop); |
2396 | } | 2310 | } else { |
2397 | else | 2311 | /* Copy the small buffer to the huge buffer */ |
2398 | { | 2312 | sb = (struct sk_buff *)iov->iov_base; |
2399 | /* Copy the small buffer to the huge buffer */ | 2313 | skb_copy_from_linear_data(sb, hb->data, |
2400 | sb = (struct sk_buff *) iov->iov_base; | 2314 | iov->iov_len); |
2401 | skb_copy_from_linear_data(sb, hb->data, iov->iov_len); | 2315 | skb_put(hb, iov->iov_len); |
2402 | skb_put(hb, iov->iov_len); | 2316 | remaining = len - iov->iov_len; |
2403 | remaining = len - iov->iov_len; | 2317 | iov++; |
2404 | iov++; | 2318 | /* Free the small buffer */ |
2405 | /* Free the small buffer */ | 2319 | push_rxbufs(card, sb); |
2406 | push_rxbufs(card, sb); | 2320 | |
2407 | 2321 | /* Copy all large buffers to the huge buffer and free them */ | |
2408 | /* Copy all large buffers to the huge buffer and free them */ | 2322 | for (j = 1; j < NS_PRV_IOVCNT(iovb); j++) { |
2409 | for (j = 1; j < NS_SKB(iovb)->iovcnt; j++) | 2323 | lb = (struct sk_buff *)iov->iov_base; |
2410 | { | 2324 | tocopy = |
2411 | lb = (struct sk_buff *) iov->iov_base; | 2325 | min_t(int, remaining, iov->iov_len); |
2412 | tocopy = min_t(int, remaining, iov->iov_len); | 2326 | skb_copy_from_linear_data(lb, |
2413 | skb_copy_from_linear_data(lb, skb_tail_pointer(hb), tocopy); | 2327 | skb_tail_pointer |
2414 | skb_put(hb, tocopy); | 2328 | (hb), tocopy); |
2415 | iov++; | 2329 | skb_put(hb, tocopy); |
2416 | remaining -= tocopy; | 2330 | iov++; |
2417 | push_rxbufs(card, lb); | 2331 | remaining -= tocopy; |
2418 | } | 2332 | push_rxbufs(card, lb); |
2333 | } | ||
2419 | #ifdef EXTRA_DEBUG | 2334 | #ifdef EXTRA_DEBUG |
2420 | if (remaining != 0 || hb->len != len) | 2335 | if (remaining != 0 || hb->len != len) |
2421 | printk("nicstar%d: Huge buffer len mismatch.\n", card->index); | 2336 | printk |
2337 | ("nicstar%d: Huge buffer len mismatch.\n", | ||
2338 | card->index); | ||
2422 | #endif /* EXTRA_DEBUG */ | 2339 | #endif /* EXTRA_DEBUG */ |
2423 | ATM_SKB(hb)->vcc = vcc; | 2340 | ATM_SKB(hb)->vcc = vcc; |
2424 | #ifdef NS_USE_DESTRUCTORS | 2341 | #ifdef NS_USE_DESTRUCTORS |
2425 | hb->destructor = ns_hb_destructor; | 2342 | hb->destructor = ns_hb_destructor; |
2426 | #endif /* NS_USE_DESTRUCTORS */ | 2343 | #endif /* NS_USE_DESTRUCTORS */ |
2427 | __net_timestamp(hb); | 2344 | __net_timestamp(hb); |
2428 | vcc->push(vcc, hb); | 2345 | vcc->push(vcc, hb); |
2429 | atomic_inc(&vcc->stats->rx); | 2346 | atomic_inc(&vcc->stats->rx); |
2430 | } | 2347 | } |
2431 | } | 2348 | } |
2432 | 2349 | ||
2433 | vc->rx_iov = NULL; | 2350 | vc->rx_iov = NULL; |
2434 | recycle_iov_buf(card, iovb); | 2351 | recycle_iov_buf(card, iovb); |
2435 | } | 2352 | } |
2436 | 2353 | ||
2437 | } | 2354 | } |
2438 | 2355 | ||
2439 | |||
2440 | |||
2441 | #ifdef NS_USE_DESTRUCTORS | 2356 | #ifdef NS_USE_DESTRUCTORS |
2442 | 2357 | ||
2443 | static void ns_sb_destructor(struct sk_buff *sb) | 2358 | static void ns_sb_destructor(struct sk_buff *sb) |
2444 | { | 2359 | { |
2445 | ns_dev *card; | 2360 | ns_dev *card; |
2446 | u32 stat; | 2361 | u32 stat; |
2447 | 2362 | ||
2448 | card = (ns_dev *) ATM_SKB(sb)->vcc->dev->dev_data; | 2363 | card = (ns_dev *) ATM_SKB(sb)->vcc->dev->dev_data; |
2449 | stat = readl(card->membase + STAT); | 2364 | stat = readl(card->membase + STAT); |
2450 | card->sbfqc = ns_stat_sfbqc_get(stat); | 2365 | card->sbfqc = ns_stat_sfbqc_get(stat); |
2451 | card->lbfqc = ns_stat_lfbqc_get(stat); | 2366 | card->lbfqc = ns_stat_lfbqc_get(stat); |
2452 | 2367 | ||
2453 | do | 2368 | do { |
2454 | { | 2369 | sb = __dev_alloc_skb(NS_SMSKBSIZE, GFP_KERNEL); |
2455 | sb = __dev_alloc_skb(NS_SMSKBSIZE, GFP_KERNEL); | 2370 | if (sb == NULL) |
2456 | if (sb == NULL) | 2371 | break; |
2457 | break; | 2372 | NS_PRV_BUFTYPE(sb) = BUF_SM; |
2458 | NS_SKB_CB(sb)->buf_type = BUF_SM; | 2373 | skb_queue_tail(&card->sbpool.queue, sb); |
2459 | skb_queue_tail(&card->sbpool.queue, sb); | 2374 | skb_reserve(sb, NS_AAL0_HEADER); |
2460 | skb_reserve(sb, NS_AAL0_HEADER); | 2375 | push_rxbufs(card, sb); |
2461 | push_rxbufs(card, sb); | 2376 | } while (card->sbfqc < card->sbnr.min); |
2462 | } while (card->sbfqc < card->sbnr.min); | ||
2463 | } | 2377 | } |
2464 | 2378 | ||
2465 | |||
2466 | |||
2467 | static void ns_lb_destructor(struct sk_buff *lb) | 2379 | static void ns_lb_destructor(struct sk_buff *lb) |
2468 | { | 2380 | { |
2469 | ns_dev *card; | 2381 | ns_dev *card; |
2470 | u32 stat; | 2382 | u32 stat; |
2471 | 2383 | ||
2472 | card = (ns_dev *) ATM_SKB(lb)->vcc->dev->dev_data; | 2384 | card = (ns_dev *) ATM_SKB(lb)->vcc->dev->dev_data; |
2473 | stat = readl(card->membase + STAT); | 2385 | stat = readl(card->membase + STAT); |
2474 | card->sbfqc = ns_stat_sfbqc_get(stat); | 2386 | card->sbfqc = ns_stat_sfbqc_get(stat); |
2475 | card->lbfqc = ns_stat_lfbqc_get(stat); | 2387 | card->lbfqc = ns_stat_lfbqc_get(stat); |
2476 | 2388 | ||
2477 | do | 2389 | do { |
2478 | { | 2390 | lb = __dev_alloc_skb(NS_LGSKBSIZE, GFP_KERNEL); |
2479 | lb = __dev_alloc_skb(NS_LGSKBSIZE, GFP_KERNEL); | 2391 | if (lb == NULL) |
2480 | if (lb == NULL) | 2392 | break; |
2481 | break; | 2393 | NS_PRV_BUFTYPE(lb) = BUF_LG; |
2482 | NS_SKB_CB(lb)->buf_type = BUF_LG; | 2394 | skb_queue_tail(&card->lbpool.queue, lb); |
2483 | skb_queue_tail(&card->lbpool.queue, lb); | 2395 | skb_reserve(lb, NS_SMBUFSIZE); |
2484 | skb_reserve(lb, NS_SMBUFSIZE); | 2396 | push_rxbufs(card, lb); |
2485 | push_rxbufs(card, lb); | 2397 | } while (card->lbfqc < card->lbnr.min); |
2486 | } while (card->lbfqc < card->lbnr.min); | ||
2487 | } | 2398 | } |
2488 | 2399 | ||
2489 | |||
2490 | |||
2491 | static void ns_hb_destructor(struct sk_buff *hb) | 2400 | static void ns_hb_destructor(struct sk_buff *hb) |
2492 | { | 2401 | { |
2493 | ns_dev *card; | 2402 | ns_dev *card; |
2494 | 2403 | ||
2495 | card = (ns_dev *) ATM_SKB(hb)->vcc->dev->dev_data; | 2404 | card = (ns_dev *) ATM_SKB(hb)->vcc->dev->dev_data; |
2496 | 2405 | ||
2497 | while (card->hbpool.count < card->hbnr.init) | 2406 | while (card->hbpool.count < card->hbnr.init) { |
2498 | { | 2407 | hb = __dev_alloc_skb(NS_HBUFSIZE, GFP_KERNEL); |
2499 | hb = __dev_alloc_skb(NS_HBUFSIZE, GFP_KERNEL); | 2408 | if (hb == NULL) |
2500 | if (hb == NULL) | 2409 | break; |
2501 | break; | 2410 | NS_PRV_BUFTYPE(hb) = BUF_NONE; |
2502 | NS_SKB_CB(hb)->buf_type = BUF_NONE; | 2411 | skb_queue_tail(&card->hbpool.queue, hb); |
2503 | skb_queue_tail(&card->hbpool.queue, hb); | 2412 | card->hbpool.count++; |
2504 | card->hbpool.count++; | 2413 | } |
2505 | } | ||
2506 | } | 2414 | } |
2507 | 2415 | ||
2508 | #endif /* NS_USE_DESTRUCTORS */ | 2416 | #endif /* NS_USE_DESTRUCTORS */ |
2509 | 2417 | ||
2510 | 2418 | static void recycle_rx_buf(ns_dev * card, struct sk_buff *skb) | |
2511 | static void recycle_rx_buf(ns_dev *card, struct sk_buff *skb) | ||
2512 | { | 2419 | { |
2513 | struct ns_skb_cb *cb = NS_SKB_CB(skb); | 2420 | if (unlikely(NS_PRV_BUFTYPE(skb) == BUF_NONE)) { |
2514 | 2421 | printk("nicstar%d: What kind of rx buffer is this?\n", | |
2515 | if (unlikely(cb->buf_type == BUF_NONE)) { | 2422 | card->index); |
2516 | printk("nicstar%d: What kind of rx buffer is this?\n", card->index); | ||
2517 | dev_kfree_skb_any(skb); | 2423 | dev_kfree_skb_any(skb); |
2518 | } else | 2424 | } else |
2519 | push_rxbufs(card, skb); | 2425 | push_rxbufs(card, skb); |
2520 | } | 2426 | } |
2521 | 2427 | ||
2522 | 2428 | static void recycle_iovec_rx_bufs(ns_dev * card, struct iovec *iov, int count) | |
2523 | static void recycle_iovec_rx_bufs(ns_dev *card, struct iovec *iov, int count) | ||
2524 | { | 2429 | { |
2525 | while (count-- > 0) | 2430 | while (count-- > 0) |
2526 | recycle_rx_buf(card, (struct sk_buff *) (iov++)->iov_base); | 2431 | recycle_rx_buf(card, (struct sk_buff *)(iov++)->iov_base); |
2527 | } | 2432 | } |
2528 | 2433 | ||
2529 | 2434 | static void recycle_iov_buf(ns_dev * card, struct sk_buff *iovb) | |
2530 | static void recycle_iov_buf(ns_dev *card, struct sk_buff *iovb) | ||
2531 | { | 2435 | { |
2532 | if (card->iovpool.count < card->iovnr.max) | 2436 | if (card->iovpool.count < card->iovnr.max) { |
2533 | { | 2437 | skb_queue_tail(&card->iovpool.queue, iovb); |
2534 | skb_queue_tail(&card->iovpool.queue, iovb); | 2438 | card->iovpool.count++; |
2535 | card->iovpool.count++; | 2439 | } else |
2536 | } | 2440 | dev_kfree_skb_any(iovb); |
2537 | else | ||
2538 | dev_kfree_skb_any(iovb); | ||
2539 | } | 2441 | } |
2540 | 2442 | ||
2541 | 2443 | static void dequeue_sm_buf(ns_dev * card, struct sk_buff *sb) | |
2542 | |||
2543 | static void dequeue_sm_buf(ns_dev *card, struct sk_buff *sb) | ||
2544 | { | 2444 | { |
2545 | skb_unlink(sb, &card->sbpool.queue); | 2445 | skb_unlink(sb, &card->sbpool.queue); |
2546 | #ifdef NS_USE_DESTRUCTORS | 2446 | #ifdef NS_USE_DESTRUCTORS |
2547 | if (card->sbfqc < card->sbnr.min) | 2447 | if (card->sbfqc < card->sbnr.min) |
2548 | #else | 2448 | #else |
2549 | if (card->sbfqc < card->sbnr.init) | 2449 | if (card->sbfqc < card->sbnr.init) { |
2550 | { | 2450 | struct sk_buff *new_sb; |
2551 | struct sk_buff *new_sb; | 2451 | if ((new_sb = dev_alloc_skb(NS_SMSKBSIZE)) != NULL) { |
2552 | if ((new_sb = dev_alloc_skb(NS_SMSKBSIZE)) != NULL) | 2452 | NS_PRV_BUFTYPE(new_sb) = BUF_SM; |
2553 | { | 2453 | skb_queue_tail(&card->sbpool.queue, new_sb); |
2554 | NS_SKB_CB(new_sb)->buf_type = BUF_SM; | 2454 | skb_reserve(new_sb, NS_AAL0_HEADER); |
2555 | skb_queue_tail(&card->sbpool.queue, new_sb); | 2455 | push_rxbufs(card, new_sb); |
2556 | skb_reserve(new_sb, NS_AAL0_HEADER); | 2456 | } |
2557 | push_rxbufs(card, new_sb); | 2457 | } |
2558 | } | 2458 | if (card->sbfqc < card->sbnr.init) |
2559 | } | ||
2560 | if (card->sbfqc < card->sbnr.init) | ||
2561 | #endif /* NS_USE_DESTRUCTORS */ | 2459 | #endif /* NS_USE_DESTRUCTORS */ |
2562 | { | 2460 | { |
2563 | struct sk_buff *new_sb; | 2461 | struct sk_buff *new_sb; |
2564 | if ((new_sb = dev_alloc_skb(NS_SMSKBSIZE)) != NULL) | 2462 | if ((new_sb = dev_alloc_skb(NS_SMSKBSIZE)) != NULL) { |
2565 | { | 2463 | NS_PRV_BUFTYPE(new_sb) = BUF_SM; |
2566 | NS_SKB_CB(new_sb)->buf_type = BUF_SM; | 2464 | skb_queue_tail(&card->sbpool.queue, new_sb); |
2567 | skb_queue_tail(&card->sbpool.queue, new_sb); | 2465 | skb_reserve(new_sb, NS_AAL0_HEADER); |
2568 | skb_reserve(new_sb, NS_AAL0_HEADER); | 2466 | push_rxbufs(card, new_sb); |
2569 | push_rxbufs(card, new_sb); | 2467 | } |
2570 | } | 2468 | } |
2571 | } | ||
2572 | } | 2469 | } |
2573 | 2470 | ||
2574 | 2471 | static void dequeue_lg_buf(ns_dev * card, struct sk_buff *lb) | |
2575 | |||
2576 | static void dequeue_lg_buf(ns_dev *card, struct sk_buff *lb) | ||
2577 | { | 2472 | { |
2578 | skb_unlink(lb, &card->lbpool.queue); | 2473 | skb_unlink(lb, &card->lbpool.queue); |
2579 | #ifdef NS_USE_DESTRUCTORS | 2474 | #ifdef NS_USE_DESTRUCTORS |
2580 | if (card->lbfqc < card->lbnr.min) | 2475 | if (card->lbfqc < card->lbnr.min) |
2581 | #else | 2476 | #else |
2582 | if (card->lbfqc < card->lbnr.init) | 2477 | if (card->lbfqc < card->lbnr.init) { |
2583 | { | 2478 | struct sk_buff *new_lb; |
2584 | struct sk_buff *new_lb; | 2479 | if ((new_lb = dev_alloc_skb(NS_LGSKBSIZE)) != NULL) { |
2585 | if ((new_lb = dev_alloc_skb(NS_LGSKBSIZE)) != NULL) | 2480 | NS_PRV_BUFTYPE(new_lb) = BUF_LG; |
2586 | { | 2481 | skb_queue_tail(&card->lbpool.queue, new_lb); |
2587 | NS_SKB_CB(new_lb)->buf_type = BUF_LG; | 2482 | skb_reserve(new_lb, NS_SMBUFSIZE); |
2588 | skb_queue_tail(&card->lbpool.queue, new_lb); | 2483 | push_rxbufs(card, new_lb); |
2589 | skb_reserve(new_lb, NS_SMBUFSIZE); | 2484 | } |
2590 | push_rxbufs(card, new_lb); | 2485 | } |
2591 | } | 2486 | if (card->lbfqc < card->lbnr.init) |
2592 | } | ||
2593 | if (card->lbfqc < card->lbnr.init) | ||
2594 | #endif /* NS_USE_DESTRUCTORS */ | 2487 | #endif /* NS_USE_DESTRUCTORS */ |
2595 | { | 2488 | { |
2596 | struct sk_buff *new_lb; | 2489 | struct sk_buff *new_lb; |
2597 | if ((new_lb = dev_alloc_skb(NS_LGSKBSIZE)) != NULL) | 2490 | if ((new_lb = dev_alloc_skb(NS_LGSKBSIZE)) != NULL) { |
2598 | { | 2491 | NS_PRV_BUFTYPE(new_lb) = BUF_LG; |
2599 | NS_SKB_CB(new_lb)->buf_type = BUF_LG; | 2492 | skb_queue_tail(&card->lbpool.queue, new_lb); |
2600 | skb_queue_tail(&card->lbpool.queue, new_lb); | 2493 | skb_reserve(new_lb, NS_SMBUFSIZE); |
2601 | skb_reserve(new_lb, NS_SMBUFSIZE); | 2494 | push_rxbufs(card, new_lb); |
2602 | push_rxbufs(card, new_lb); | 2495 | } |
2603 | } | 2496 | } |
2604 | } | ||
2605 | } | 2497 | } |
2606 | 2498 | ||
2607 | 2499 | static int ns_proc_read(struct atm_dev *dev, loff_t * pos, char *page) | |
2608 | |||
2609 | static int ns_proc_read(struct atm_dev *dev, loff_t *pos, char *page) | ||
2610 | { | 2500 | { |
2611 | u32 stat; | 2501 | u32 stat; |
2612 | ns_dev *card; | 2502 | ns_dev *card; |
2613 | int left; | 2503 | int left; |
2614 | 2504 | ||
2615 | left = (int) *pos; | 2505 | left = (int)*pos; |
2616 | card = (ns_dev *) dev->dev_data; | 2506 | card = (ns_dev *) dev->dev_data; |
2617 | stat = readl(card->membase + STAT); | 2507 | stat = readl(card->membase + STAT); |
2618 | if (!left--) | 2508 | if (!left--) |
2619 | return sprintf(page, "Pool count min init max \n"); | 2509 | return sprintf(page, "Pool count min init max \n"); |
2620 | if (!left--) | 2510 | if (!left--) |
2621 | return sprintf(page, "Small %5d %5d %5d %5d \n", | 2511 | return sprintf(page, "Small %5d %5d %5d %5d \n", |
2622 | ns_stat_sfbqc_get(stat), card->sbnr.min, card->sbnr.init, | 2512 | ns_stat_sfbqc_get(stat), card->sbnr.min, |
2623 | card->sbnr.max); | 2513 | card->sbnr.init, card->sbnr.max); |
2624 | if (!left--) | 2514 | if (!left--) |
2625 | return sprintf(page, "Large %5d %5d %5d %5d \n", | 2515 | return sprintf(page, "Large %5d %5d %5d %5d \n", |
2626 | ns_stat_lfbqc_get(stat), card->lbnr.min, card->lbnr.init, | 2516 | ns_stat_lfbqc_get(stat), card->lbnr.min, |
2627 | card->lbnr.max); | 2517 | card->lbnr.init, card->lbnr.max); |
2628 | if (!left--) | 2518 | if (!left--) |
2629 | return sprintf(page, "Huge %5d %5d %5d %5d \n", card->hbpool.count, | 2519 | return sprintf(page, "Huge %5d %5d %5d %5d \n", |
2630 | card->hbnr.min, card->hbnr.init, card->hbnr.max); | 2520 | card->hbpool.count, card->hbnr.min, |
2631 | if (!left--) | 2521 | card->hbnr.init, card->hbnr.max); |
2632 | return sprintf(page, "Iovec %5d %5d %5d %5d \n", card->iovpool.count, | 2522 | if (!left--) |
2633 | card->iovnr.min, card->iovnr.init, card->iovnr.max); | 2523 | return sprintf(page, "Iovec %5d %5d %5d %5d \n", |
2634 | if (!left--) | 2524 | card->iovpool.count, card->iovnr.min, |
2635 | { | 2525 | card->iovnr.init, card->iovnr.max); |
2636 | int retval; | 2526 | if (!left--) { |
2637 | retval = sprintf(page, "Interrupt counter: %u \n", card->intcnt); | 2527 | int retval; |
2638 | card->intcnt = 0; | 2528 | retval = |
2639 | return retval; | 2529 | sprintf(page, "Interrupt counter: %u \n", card->intcnt); |
2640 | } | 2530 | card->intcnt = 0; |
2531 | return retval; | ||
2532 | } | ||
2641 | #if 0 | 2533 | #if 0 |
2642 | /* Dump 25.6 Mbps PHY registers */ | 2534 | /* Dump 25.6 Mbps PHY registers */ |
2643 | /* Now there's a 25.6 Mbps PHY driver this code isn't needed. I left it | 2535 | /* Now there's a 25.6 Mbps PHY driver this code isn't needed. I left it |
2644 | here just in case it's needed for debugging. */ | 2536 | here just in case it's needed for debugging. */ |
2645 | if (card->max_pcr == ATM_25_PCR && !left--) | 2537 | if (card->max_pcr == ATM_25_PCR && !left--) { |
2646 | { | 2538 | u32 phy_regs[4]; |
2647 | u32 phy_regs[4]; | 2539 | u32 i; |
2648 | u32 i; | 2540 | |
2649 | 2541 | for (i = 0; i < 4; i++) { | |
2650 | for (i = 0; i < 4; i++) | 2542 | while (CMD_BUSY(card)) ; |
2651 | { | 2543 | writel(NS_CMD_READ_UTILITY | 0x00000200 | i, |
2652 | while (CMD_BUSY(card)); | 2544 | card->membase + CMD); |
2653 | writel(NS_CMD_READ_UTILITY | 0x00000200 | i, card->membase + CMD); | 2545 | while (CMD_BUSY(card)) ; |
2654 | while (CMD_BUSY(card)); | 2546 | phy_regs[i] = readl(card->membase + DR0) & 0x000000FF; |
2655 | phy_regs[i] = readl(card->membase + DR0) & 0x000000FF; | 2547 | } |
2656 | } | 2548 | |
2657 | 2549 | return sprintf(page, "PHY regs: 0x%02X 0x%02X 0x%02X 0x%02X \n", | |
2658 | return sprintf(page, "PHY regs: 0x%02X 0x%02X 0x%02X 0x%02X \n", | 2550 | phy_regs[0], phy_regs[1], phy_regs[2], |
2659 | phy_regs[0], phy_regs[1], phy_regs[2], phy_regs[3]); | 2551 | phy_regs[3]); |
2660 | } | 2552 | } |
2661 | #endif /* 0 - Dump 25.6 Mbps PHY registers */ | 2553 | #endif /* 0 - Dump 25.6 Mbps PHY registers */ |
2662 | #if 0 | 2554 | #if 0 |
2663 | /* Dump TST */ | 2555 | /* Dump TST */ |
2664 | if (left-- < NS_TST_NUM_ENTRIES) | 2556 | if (left-- < NS_TST_NUM_ENTRIES) { |
2665 | { | 2557 | if (card->tste2vc[left + 1] == NULL) |
2666 | if (card->tste2vc[left + 1] == NULL) | 2558 | return sprintf(page, "%5d - VBR/UBR \n", left + 1); |
2667 | return sprintf(page, "%5d - VBR/UBR \n", left + 1); | 2559 | else |
2668 | else | 2560 | return sprintf(page, "%5d - %d %d \n", left + 1, |
2669 | return sprintf(page, "%5d - %d %d \n", left + 1, | 2561 | card->tste2vc[left + 1]->tx_vcc->vpi, |
2670 | card->tste2vc[left + 1]->tx_vcc->vpi, | 2562 | card->tste2vc[left + 1]->tx_vcc->vci); |
2671 | card->tste2vc[left + 1]->tx_vcc->vci); | 2563 | } |
2672 | } | ||
2673 | #endif /* 0 */ | 2564 | #endif /* 0 */ |
2674 | return 0; | 2565 | return 0; |
2675 | } | 2566 | } |
2676 | 2567 | ||
2677 | 2568 | static int ns_ioctl(struct atm_dev *dev, unsigned int cmd, void __user * arg) | |
2678 | |||
2679 | static int ns_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg) | ||
2680 | { | 2569 | { |
2681 | ns_dev *card; | 2570 | ns_dev *card; |
2682 | pool_levels pl; | 2571 | pool_levels pl; |
2683 | long btype; | 2572 | long btype; |
2684 | unsigned long flags; | 2573 | unsigned long flags; |
2685 | 2574 | ||
2686 | card = dev->dev_data; | 2575 | card = dev->dev_data; |
2687 | switch (cmd) | 2576 | switch (cmd) { |
2688 | { | 2577 | case NS_GETPSTAT: |
2689 | case NS_GETPSTAT: | 2578 | if (get_user |
2690 | if (get_user(pl.buftype, &((pool_levels __user *) arg)->buftype)) | 2579 | (pl.buftype, &((pool_levels __user *) arg)->buftype)) |
2691 | return -EFAULT; | 2580 | return -EFAULT; |
2692 | switch (pl.buftype) | 2581 | switch (pl.buftype) { |
2693 | { | 2582 | case NS_BUFTYPE_SMALL: |
2694 | case NS_BUFTYPE_SMALL: | 2583 | pl.count = |
2695 | pl.count = ns_stat_sfbqc_get(readl(card->membase + STAT)); | 2584 | ns_stat_sfbqc_get(readl(card->membase + STAT)); |
2696 | pl.level.min = card->sbnr.min; | 2585 | pl.level.min = card->sbnr.min; |
2697 | pl.level.init = card->sbnr.init; | 2586 | pl.level.init = card->sbnr.init; |
2698 | pl.level.max = card->sbnr.max; | 2587 | pl.level.max = card->sbnr.max; |
2699 | break; | 2588 | break; |
2700 | 2589 | ||
2701 | case NS_BUFTYPE_LARGE: | 2590 | case NS_BUFTYPE_LARGE: |
2702 | pl.count = ns_stat_lfbqc_get(readl(card->membase + STAT)); | 2591 | pl.count = |
2703 | pl.level.min = card->lbnr.min; | 2592 | ns_stat_lfbqc_get(readl(card->membase + STAT)); |
2704 | pl.level.init = card->lbnr.init; | 2593 | pl.level.min = card->lbnr.min; |
2705 | pl.level.max = card->lbnr.max; | 2594 | pl.level.init = card->lbnr.init; |
2706 | break; | 2595 | pl.level.max = card->lbnr.max; |
2707 | 2596 | break; | |
2708 | case NS_BUFTYPE_HUGE: | 2597 | |
2709 | pl.count = card->hbpool.count; | 2598 | case NS_BUFTYPE_HUGE: |
2710 | pl.level.min = card->hbnr.min; | 2599 | pl.count = card->hbpool.count; |
2711 | pl.level.init = card->hbnr.init; | 2600 | pl.level.min = card->hbnr.min; |
2712 | pl.level.max = card->hbnr.max; | 2601 | pl.level.init = card->hbnr.init; |
2713 | break; | 2602 | pl.level.max = card->hbnr.max; |
2714 | 2603 | break; | |
2715 | case NS_BUFTYPE_IOVEC: | 2604 | |
2716 | pl.count = card->iovpool.count; | 2605 | case NS_BUFTYPE_IOVEC: |
2717 | pl.level.min = card->iovnr.min; | 2606 | pl.count = card->iovpool.count; |
2718 | pl.level.init = card->iovnr.init; | 2607 | pl.level.min = card->iovnr.min; |
2719 | pl.level.max = card->iovnr.max; | 2608 | pl.level.init = card->iovnr.init; |
2720 | break; | 2609 | pl.level.max = card->iovnr.max; |
2721 | 2610 | break; | |
2722 | default: | 2611 | |
2723 | return -ENOIOCTLCMD; | 2612 | default: |
2724 | 2613 | return -ENOIOCTLCMD; | |
2725 | } | 2614 | |
2726 | if (!copy_to_user((pool_levels __user *) arg, &pl, sizeof(pl))) | 2615 | } |
2727 | return (sizeof(pl)); | 2616 | if (!copy_to_user((pool_levels __user *) arg, &pl, sizeof(pl))) |
2728 | else | 2617 | return (sizeof(pl)); |
2729 | return -EFAULT; | 2618 | else |
2730 | 2619 | return -EFAULT; | |
2731 | case NS_SETBUFLEV: | 2620 | |
2732 | if (!capable(CAP_NET_ADMIN)) | 2621 | case NS_SETBUFLEV: |
2733 | return -EPERM; | 2622 | if (!capable(CAP_NET_ADMIN)) |
2734 | if (copy_from_user(&pl, (pool_levels __user *) arg, sizeof(pl))) | 2623 | return -EPERM; |
2735 | return -EFAULT; | 2624 | if (copy_from_user(&pl, (pool_levels __user *) arg, sizeof(pl))) |
2736 | if (pl.level.min >= pl.level.init || pl.level.init >= pl.level.max) | 2625 | return -EFAULT; |
2737 | return -EINVAL; | 2626 | if (pl.level.min >= pl.level.init |
2738 | if (pl.level.min == 0) | 2627 | || pl.level.init >= pl.level.max) |
2739 | return -EINVAL; | 2628 | return -EINVAL; |
2740 | switch (pl.buftype) | 2629 | if (pl.level.min == 0) |
2741 | { | 2630 | return -EINVAL; |
2742 | case NS_BUFTYPE_SMALL: | 2631 | switch (pl.buftype) { |
2743 | if (pl.level.max > TOP_SB) | 2632 | case NS_BUFTYPE_SMALL: |
2744 | return -EINVAL; | 2633 | if (pl.level.max > TOP_SB) |
2745 | card->sbnr.min = pl.level.min; | 2634 | return -EINVAL; |
2746 | card->sbnr.init = pl.level.init; | 2635 | card->sbnr.min = pl.level.min; |
2747 | card->sbnr.max = pl.level.max; | 2636 | card->sbnr.init = pl.level.init; |
2748 | break; | 2637 | card->sbnr.max = pl.level.max; |
2749 | 2638 | break; | |
2750 | case NS_BUFTYPE_LARGE: | 2639 | |
2751 | if (pl.level.max > TOP_LB) | 2640 | case NS_BUFTYPE_LARGE: |
2752 | return -EINVAL; | 2641 | if (pl.level.max > TOP_LB) |
2753 | card->lbnr.min = pl.level.min; | 2642 | return -EINVAL; |
2754 | card->lbnr.init = pl.level.init; | 2643 | card->lbnr.min = pl.level.min; |
2755 | card->lbnr.max = pl.level.max; | 2644 | card->lbnr.init = pl.level.init; |
2756 | break; | 2645 | card->lbnr.max = pl.level.max; |
2757 | 2646 | break; | |
2758 | case NS_BUFTYPE_HUGE: | 2647 | |
2759 | if (pl.level.max > TOP_HB) | 2648 | case NS_BUFTYPE_HUGE: |
2760 | return -EINVAL; | 2649 | if (pl.level.max > TOP_HB) |
2761 | card->hbnr.min = pl.level.min; | 2650 | return -EINVAL; |
2762 | card->hbnr.init = pl.level.init; | 2651 | card->hbnr.min = pl.level.min; |
2763 | card->hbnr.max = pl.level.max; | 2652 | card->hbnr.init = pl.level.init; |
2764 | break; | 2653 | card->hbnr.max = pl.level.max; |
2765 | 2654 | break; | |
2766 | case NS_BUFTYPE_IOVEC: | 2655 | |
2767 | if (pl.level.max > TOP_IOVB) | 2656 | case NS_BUFTYPE_IOVEC: |
2768 | return -EINVAL; | 2657 | if (pl.level.max > TOP_IOVB) |
2769 | card->iovnr.min = pl.level.min; | 2658 | return -EINVAL; |
2770 | card->iovnr.init = pl.level.init; | 2659 | card->iovnr.min = pl.level.min; |
2771 | card->iovnr.max = pl.level.max; | 2660 | card->iovnr.init = pl.level.init; |
2772 | break; | 2661 | card->iovnr.max = pl.level.max; |
2773 | 2662 | break; | |
2774 | default: | 2663 | |
2775 | return -EINVAL; | 2664 | default: |
2776 | 2665 | return -EINVAL; | |
2777 | } | 2666 | |
2778 | return 0; | 2667 | } |
2779 | 2668 | return 0; | |
2780 | case NS_ADJBUFLEV: | 2669 | |
2781 | if (!capable(CAP_NET_ADMIN)) | 2670 | case NS_ADJBUFLEV: |
2782 | return -EPERM; | 2671 | if (!capable(CAP_NET_ADMIN)) |
2783 | btype = (long) arg; /* a long is the same size as a pointer or bigger */ | 2672 | return -EPERM; |
2784 | switch (btype) | 2673 | btype = (long)arg; /* a long is the same size as a pointer or bigger */ |
2785 | { | 2674 | switch (btype) { |
2786 | case NS_BUFTYPE_SMALL: | 2675 | case NS_BUFTYPE_SMALL: |
2787 | while (card->sbfqc < card->sbnr.init) | 2676 | while (card->sbfqc < card->sbnr.init) { |
2788 | { | 2677 | struct sk_buff *sb; |
2789 | struct sk_buff *sb; | 2678 | |
2790 | 2679 | sb = __dev_alloc_skb(NS_SMSKBSIZE, GFP_KERNEL); | |
2791 | sb = __dev_alloc_skb(NS_SMSKBSIZE, GFP_KERNEL); | 2680 | if (sb == NULL) |
2792 | if (sb == NULL) | 2681 | return -ENOMEM; |
2793 | return -ENOMEM; | 2682 | NS_PRV_BUFTYPE(sb) = BUF_SM; |
2794 | NS_SKB_CB(sb)->buf_type = BUF_SM; | 2683 | skb_queue_tail(&card->sbpool.queue, sb); |
2795 | skb_queue_tail(&card->sbpool.queue, sb); | 2684 | skb_reserve(sb, NS_AAL0_HEADER); |
2796 | skb_reserve(sb, NS_AAL0_HEADER); | 2685 | push_rxbufs(card, sb); |
2797 | push_rxbufs(card, sb); | 2686 | } |
2798 | } | 2687 | break; |
2799 | break; | 2688 | |
2800 | 2689 | case NS_BUFTYPE_LARGE: | |
2801 | case NS_BUFTYPE_LARGE: | 2690 | while (card->lbfqc < card->lbnr.init) { |
2802 | while (card->lbfqc < card->lbnr.init) | 2691 | struct sk_buff *lb; |
2803 | { | 2692 | |
2804 | struct sk_buff *lb; | 2693 | lb = __dev_alloc_skb(NS_LGSKBSIZE, GFP_KERNEL); |
2805 | 2694 | if (lb == NULL) | |
2806 | lb = __dev_alloc_skb(NS_LGSKBSIZE, GFP_KERNEL); | 2695 | return -ENOMEM; |
2807 | if (lb == NULL) | 2696 | NS_PRV_BUFTYPE(lb) = BUF_LG; |
2808 | return -ENOMEM; | 2697 | skb_queue_tail(&card->lbpool.queue, lb); |
2809 | NS_SKB_CB(lb)->buf_type = BUF_LG; | 2698 | skb_reserve(lb, NS_SMBUFSIZE); |
2810 | skb_queue_tail(&card->lbpool.queue, lb); | 2699 | push_rxbufs(card, lb); |
2811 | skb_reserve(lb, NS_SMBUFSIZE); | 2700 | } |
2812 | push_rxbufs(card, lb); | 2701 | break; |
2813 | } | 2702 | |
2814 | break; | 2703 | case NS_BUFTYPE_HUGE: |
2815 | 2704 | while (card->hbpool.count > card->hbnr.init) { | |
2816 | case NS_BUFTYPE_HUGE: | 2705 | struct sk_buff *hb; |
2817 | while (card->hbpool.count > card->hbnr.init) | 2706 | |
2818 | { | 2707 | spin_lock_irqsave(&card->int_lock, flags); |
2819 | struct sk_buff *hb; | 2708 | hb = skb_dequeue(&card->hbpool.queue); |
2820 | 2709 | card->hbpool.count--; | |
2821 | spin_lock_irqsave(&card->int_lock, flags); | 2710 | spin_unlock_irqrestore(&card->int_lock, flags); |
2822 | hb = skb_dequeue(&card->hbpool.queue); | 2711 | if (hb == NULL) |
2823 | card->hbpool.count--; | 2712 | printk |
2824 | spin_unlock_irqrestore(&card->int_lock, flags); | 2713 | ("nicstar%d: huge buffer count inconsistent.\n", |
2825 | if (hb == NULL) | 2714 | card->index); |
2826 | printk("nicstar%d: huge buffer count inconsistent.\n", | 2715 | else |
2827 | card->index); | 2716 | dev_kfree_skb_any(hb); |
2828 | else | 2717 | |
2829 | dev_kfree_skb_any(hb); | 2718 | } |
2830 | 2719 | while (card->hbpool.count < card->hbnr.init) { | |
2831 | } | 2720 | struct sk_buff *hb; |
2832 | while (card->hbpool.count < card->hbnr.init) | 2721 | |
2833 | { | 2722 | hb = __dev_alloc_skb(NS_HBUFSIZE, GFP_KERNEL); |
2834 | struct sk_buff *hb; | 2723 | if (hb == NULL) |
2835 | 2724 | return -ENOMEM; | |
2836 | hb = __dev_alloc_skb(NS_HBUFSIZE, GFP_KERNEL); | 2725 | NS_PRV_BUFTYPE(hb) = BUF_NONE; |
2837 | if (hb == NULL) | 2726 | spin_lock_irqsave(&card->int_lock, flags); |
2838 | return -ENOMEM; | 2727 | skb_queue_tail(&card->hbpool.queue, hb); |
2839 | NS_SKB_CB(hb)->buf_type = BUF_NONE; | 2728 | card->hbpool.count++; |
2840 | spin_lock_irqsave(&card->int_lock, flags); | 2729 | spin_unlock_irqrestore(&card->int_lock, flags); |
2841 | skb_queue_tail(&card->hbpool.queue, hb); | 2730 | } |
2842 | card->hbpool.count++; | 2731 | break; |
2843 | spin_unlock_irqrestore(&card->int_lock, flags); | 2732 | |
2844 | } | 2733 | case NS_BUFTYPE_IOVEC: |
2845 | break; | 2734 | while (card->iovpool.count > card->iovnr.init) { |
2846 | 2735 | struct sk_buff *iovb; | |
2847 | case NS_BUFTYPE_IOVEC: | 2736 | |
2848 | while (card->iovpool.count > card->iovnr.init) | 2737 | spin_lock_irqsave(&card->int_lock, flags); |
2849 | { | 2738 | iovb = skb_dequeue(&card->iovpool.queue); |
2850 | struct sk_buff *iovb; | 2739 | card->iovpool.count--; |
2851 | 2740 | spin_unlock_irqrestore(&card->int_lock, flags); | |
2852 | spin_lock_irqsave(&card->int_lock, flags); | 2741 | if (iovb == NULL) |
2853 | iovb = skb_dequeue(&card->iovpool.queue); | 2742 | printk |
2854 | card->iovpool.count--; | 2743 | ("nicstar%d: iovec buffer count inconsistent.\n", |
2855 | spin_unlock_irqrestore(&card->int_lock, flags); | 2744 | card->index); |
2856 | if (iovb == NULL) | 2745 | else |
2857 | printk("nicstar%d: iovec buffer count inconsistent.\n", | 2746 | dev_kfree_skb_any(iovb); |
2858 | card->index); | 2747 | |
2859 | else | 2748 | } |
2860 | dev_kfree_skb_any(iovb); | 2749 | while (card->iovpool.count < card->iovnr.init) { |
2861 | 2750 | struct sk_buff *iovb; | |
2862 | } | 2751 | |
2863 | while (card->iovpool.count < card->iovnr.init) | 2752 | iovb = alloc_skb(NS_IOVBUFSIZE, GFP_KERNEL); |
2864 | { | 2753 | if (iovb == NULL) |
2865 | struct sk_buff *iovb; | 2754 | return -ENOMEM; |
2866 | 2755 | NS_PRV_BUFTYPE(iovb) = BUF_NONE; | |
2867 | iovb = alloc_skb(NS_IOVBUFSIZE, GFP_KERNEL); | 2756 | spin_lock_irqsave(&card->int_lock, flags); |
2868 | if (iovb == NULL) | 2757 | skb_queue_tail(&card->iovpool.queue, iovb); |
2869 | return -ENOMEM; | 2758 | card->iovpool.count++; |
2870 | NS_SKB_CB(iovb)->buf_type = BUF_NONE; | 2759 | spin_unlock_irqrestore(&card->int_lock, flags); |
2871 | spin_lock_irqsave(&card->int_lock, flags); | 2760 | } |
2872 | skb_queue_tail(&card->iovpool.queue, iovb); | 2761 | break; |
2873 | card->iovpool.count++; | 2762 | |
2874 | spin_unlock_irqrestore(&card->int_lock, flags); | 2763 | default: |
2875 | } | 2764 | return -EINVAL; |
2876 | break; | 2765 | |
2877 | 2766 | } | |
2878 | default: | 2767 | return 0; |
2879 | return -EINVAL; | 2768 | |
2880 | 2769 | default: | |
2881 | } | 2770 | if (dev->phy && dev->phy->ioctl) { |
2882 | return 0; | 2771 | return dev->phy->ioctl(dev, cmd, arg); |
2883 | 2772 | } else { | |
2884 | default: | 2773 | printk("nicstar%d: %s == NULL \n", card->index, |
2885 | if (dev->phy && dev->phy->ioctl) { | 2774 | dev->phy ? "dev->phy->ioctl" : "dev->phy"); |
2886 | return dev->phy->ioctl(dev, cmd, arg); | 2775 | return -ENOIOCTLCMD; |
2887 | } | 2776 | } |
2888 | else { | 2777 | } |
2889 | printk("nicstar%d: %s == NULL \n", card->index, | ||
2890 | dev->phy ? "dev->phy->ioctl" : "dev->phy"); | ||
2891 | return -ENOIOCTLCMD; | ||
2892 | } | ||
2893 | } | ||
2894 | } | 2778 | } |
2895 | 2779 | ||
2896 | 2780 | #ifdef EXTRA_DEBUG | |
2897 | static void which_list(ns_dev *card, struct sk_buff *skb) | 2781 | static void which_list(ns_dev * card, struct sk_buff *skb) |
2898 | { | 2782 | { |
2899 | printk("skb buf_type: 0x%08x\n", NS_SKB_CB(skb)->buf_type); | 2783 | printk("skb buf_type: 0x%08x\n", NS_PRV_BUFTYPE(skb)); |
2900 | } | 2784 | } |
2901 | 2785 | #endif /* EXTRA_DEBUG */ | |
2902 | 2786 | ||
2903 | static void ns_poll(unsigned long arg) | 2787 | static void ns_poll(unsigned long arg) |
2904 | { | 2788 | { |
2905 | int i; | 2789 | int i; |
2906 | ns_dev *card; | 2790 | ns_dev *card; |
2907 | unsigned long flags; | 2791 | unsigned long flags; |
2908 | u32 stat_r, stat_w; | 2792 | u32 stat_r, stat_w; |
2909 | 2793 | ||
2910 | PRINTK("nicstar: Entering ns_poll().\n"); | 2794 | PRINTK("nicstar: Entering ns_poll().\n"); |
2911 | for (i = 0; i < num_cards; i++) | 2795 | for (i = 0; i < num_cards; i++) { |
2912 | { | 2796 | card = cards[i]; |
2913 | card = cards[i]; | 2797 | if (spin_is_locked(&card->int_lock)) { |
2914 | if (spin_is_locked(&card->int_lock)) { | 2798 | /* Probably it isn't worth spinning */ |
2915 | /* Probably it isn't worth spinning */ | 2799 | continue; |
2916 | continue; | 2800 | } |
2917 | } | 2801 | spin_lock_irqsave(&card->int_lock, flags); |
2918 | spin_lock_irqsave(&card->int_lock, flags); | 2802 | |
2919 | 2803 | stat_w = 0; | |
2920 | stat_w = 0; | 2804 | stat_r = readl(card->membase + STAT); |
2921 | stat_r = readl(card->membase + STAT); | 2805 | if (stat_r & NS_STAT_TSIF) |
2922 | if (stat_r & NS_STAT_TSIF) | 2806 | stat_w |= NS_STAT_TSIF; |
2923 | stat_w |= NS_STAT_TSIF; | 2807 | if (stat_r & NS_STAT_EOPDU) |
2924 | if (stat_r & NS_STAT_EOPDU) | 2808 | stat_w |= NS_STAT_EOPDU; |
2925 | stat_w |= NS_STAT_EOPDU; | 2809 | |
2926 | 2810 | process_tsq(card); | |
2927 | process_tsq(card); | 2811 | process_rsq(card); |
2928 | process_rsq(card); | 2812 | |
2929 | 2813 | writel(stat_w, card->membase + STAT); | |
2930 | writel(stat_w, card->membase + STAT); | 2814 | spin_unlock_irqrestore(&card->int_lock, flags); |
2931 | spin_unlock_irqrestore(&card->int_lock, flags); | 2815 | } |
2932 | } | 2816 | mod_timer(&ns_timer, jiffies + NS_POLL_PERIOD); |
2933 | mod_timer(&ns_timer, jiffies + NS_POLL_PERIOD); | 2817 | PRINTK("nicstar: Leaving ns_poll().\n"); |
2934 | PRINTK("nicstar: Leaving ns_poll().\n"); | ||
2935 | } | 2818 | } |
2936 | 2819 | ||
2937 | |||
2938 | |||
2939 | static int ns_parse_mac(char *mac, unsigned char *esi) | 2820 | static int ns_parse_mac(char *mac, unsigned char *esi) |
2940 | { | 2821 | { |
2941 | int i, j; | 2822 | int i, j; |
2942 | short byte1, byte0; | 2823 | short byte1, byte0; |
2943 | 2824 | ||
2944 | if (mac == NULL || esi == NULL) | 2825 | if (mac == NULL || esi == NULL) |
2945 | return -1; | 2826 | return -1; |
2946 | j = 0; | 2827 | j = 0; |
2947 | for (i = 0; i < 6; i++) | 2828 | for (i = 0; i < 6; i++) { |
2948 | { | 2829 | if ((byte1 = ns_h2i(mac[j++])) < 0) |
2949 | if ((byte1 = ns_h2i(mac[j++])) < 0) | 2830 | return -1; |
2950 | return -1; | 2831 | if ((byte0 = ns_h2i(mac[j++])) < 0) |
2951 | if ((byte0 = ns_h2i(mac[j++])) < 0) | 2832 | return -1; |
2952 | return -1; | 2833 | esi[i] = (unsigned char)(byte1 * 16 + byte0); |
2953 | esi[i] = (unsigned char) (byte1 * 16 + byte0); | 2834 | if (i < 5) { |
2954 | if (i < 5) | 2835 | if (mac[j++] != ':') |
2955 | { | 2836 | return -1; |
2956 | if (mac[j++] != ':') | 2837 | } |
2957 | return -1; | 2838 | } |
2958 | } | 2839 | return 0; |
2959 | } | ||
2960 | return 0; | ||
2961 | } | 2840 | } |
2962 | 2841 | ||
2963 | |||
2964 | |||
2965 | static short ns_h2i(char c) | 2842 | static short ns_h2i(char c) |
2966 | { | 2843 | { |
2967 | if (c >= '0' && c <= '9') | 2844 | if (c >= '0' && c <= '9') |
2968 | return (short) (c - '0'); | 2845 | return (short)(c - '0'); |
2969 | if (c >= 'A' && c <= 'F') | 2846 | if (c >= 'A' && c <= 'F') |
2970 | return (short) (c - 'A' + 10); | 2847 | return (short)(c - 'A' + 10); |
2971 | if (c >= 'a' && c <= 'f') | 2848 | if (c >= 'a' && c <= 'f') |
2972 | return (short) (c - 'a' + 10); | 2849 | return (short)(c - 'a' + 10); |
2973 | return -1; | 2850 | return -1; |
2974 | } | 2851 | } |
2975 | 2852 | ||
2976 | |||
2977 | |||
2978 | static void ns_phy_put(struct atm_dev *dev, unsigned char value, | 2853 | static void ns_phy_put(struct atm_dev *dev, unsigned char value, |
2979 | unsigned long addr) | 2854 | unsigned long addr) |
2980 | { | 2855 | { |
2981 | ns_dev *card; | 2856 | ns_dev *card; |
2982 | unsigned long flags; | 2857 | unsigned long flags; |
2983 | 2858 | ||
2984 | card = dev->dev_data; | 2859 | card = dev->dev_data; |
2985 | spin_lock_irqsave(&card->res_lock, flags); | 2860 | spin_lock_irqsave(&card->res_lock, flags); |
2986 | while(CMD_BUSY(card)); | 2861 | while (CMD_BUSY(card)) ; |
2987 | writel((unsigned long) value, card->membase + DR0); | 2862 | writel((u32) value, card->membase + DR0); |
2988 | writel(NS_CMD_WRITE_UTILITY | 0x00000200 | (addr & 0x000000FF), | 2863 | writel(NS_CMD_WRITE_UTILITY | 0x00000200 | (addr & 0x000000FF), |
2989 | card->membase + CMD); | 2864 | card->membase + CMD); |
2990 | spin_unlock_irqrestore(&card->res_lock, flags); | 2865 | spin_unlock_irqrestore(&card->res_lock, flags); |
2991 | } | 2866 | } |
2992 | 2867 | ||
2993 | |||
2994 | |||
2995 | static unsigned char ns_phy_get(struct atm_dev *dev, unsigned long addr) | 2868 | static unsigned char ns_phy_get(struct atm_dev *dev, unsigned long addr) |
2996 | { | 2869 | { |
2997 | ns_dev *card; | 2870 | ns_dev *card; |
2998 | unsigned long flags; | 2871 | unsigned long flags; |
2999 | unsigned long data; | 2872 | u32 data; |
3000 | 2873 | ||
3001 | card = dev->dev_data; | 2874 | card = dev->dev_data; |
3002 | spin_lock_irqsave(&card->res_lock, flags); | 2875 | spin_lock_irqsave(&card->res_lock, flags); |
3003 | while(CMD_BUSY(card)); | 2876 | while (CMD_BUSY(card)) ; |
3004 | writel(NS_CMD_READ_UTILITY | 0x00000200 | (addr & 0x000000FF), | 2877 | writel(NS_CMD_READ_UTILITY | 0x00000200 | (addr & 0x000000FF), |
3005 | card->membase + CMD); | 2878 | card->membase + CMD); |
3006 | while(CMD_BUSY(card)); | 2879 | while (CMD_BUSY(card)) ; |
3007 | data = readl(card->membase + DR0) & 0x000000FF; | 2880 | data = readl(card->membase + DR0) & 0x000000FF; |
3008 | spin_unlock_irqrestore(&card->res_lock, flags); | 2881 | spin_unlock_irqrestore(&card->res_lock, flags); |
3009 | return (unsigned char) data; | 2882 | return (unsigned char)data; |
3010 | } | 2883 | } |
3011 | 2884 | ||
3012 | |||
3013 | |||
3014 | module_init(nicstar_init); | 2885 | module_init(nicstar_init); |
3015 | module_exit(nicstar_cleanup); | 2886 | module_exit(nicstar_cleanup); |
diff --git a/drivers/atm/nicstar.h b/drivers/atm/nicstar.h index 6010e3daa6a2..9bc27ea5088e 100644 --- a/drivers/atm/nicstar.h +++ b/drivers/atm/nicstar.h | |||
@@ -1,5 +1,4 @@ | |||
1 | /****************************************************************************** | 1 | /* |
2 | * | ||
3 | * nicstar.h | 2 | * nicstar.h |
4 | * | 3 | * |
5 | * Header file for the nicstar device driver. | 4 | * Header file for the nicstar device driver. |
@@ -8,29 +7,26 @@ | |||
8 | * PowerPC support by Jay Talbott (jay_talbott@mcg.mot.com) April 1999 | 7 | * PowerPC support by Jay Talbott (jay_talbott@mcg.mot.com) April 1999 |
9 | * | 8 | * |
10 | * (C) INESC 1998 | 9 | * (C) INESC 1998 |
11 | * | 10 | */ |
12 | ******************************************************************************/ | ||
13 | |||
14 | 11 | ||
15 | #ifndef _LINUX_NICSTAR_H_ | 12 | #ifndef _LINUX_NICSTAR_H_ |
16 | #define _LINUX_NICSTAR_H_ | 13 | #define _LINUX_NICSTAR_H_ |
17 | 14 | ||
18 | 15 | /* Includes */ | |
19 | /* Includes *******************************************************************/ | ||
20 | 16 | ||
21 | #include <linux/types.h> | 17 | #include <linux/types.h> |
22 | #include <linux/pci.h> | 18 | #include <linux/pci.h> |
19 | #include <linux/idr.h> | ||
23 | #include <linux/uio.h> | 20 | #include <linux/uio.h> |
24 | #include <linux/skbuff.h> | 21 | #include <linux/skbuff.h> |
25 | #include <linux/atmdev.h> | 22 | #include <linux/atmdev.h> |
26 | #include <linux/atm_nicstar.h> | 23 | #include <linux/atm_nicstar.h> |
27 | 24 | ||
28 | 25 | /* Options */ | |
29 | /* Options ********************************************************************/ | ||
30 | 26 | ||
31 | #define NS_MAX_CARDS 4 /* Maximum number of NICStAR based cards | 27 | #define NS_MAX_CARDS 4 /* Maximum number of NICStAR based cards |
32 | controlled by the device driver. Must | 28 | controlled by the device driver. Must |
33 | be <= 5 */ | 29 | be <= 5 */ |
34 | 30 | ||
35 | #undef RCQ_SUPPORT /* Do not define this for now */ | 31 | #undef RCQ_SUPPORT /* Do not define this for now */ |
36 | 32 | ||
@@ -43,7 +39,7 @@ | |||
43 | #define NS_VPIBITS 2 /* 0, 1, 2, or 8 */ | 39 | #define NS_VPIBITS 2 /* 0, 1, 2, or 8 */ |
44 | 40 | ||
45 | #define NS_MAX_RCTSIZE 4096 /* Number of entries. 4096 or 16384. | 41 | #define NS_MAX_RCTSIZE 4096 /* Number of entries. 4096 or 16384. |
46 | Define 4096 only if (all) your card(s) | 42 | Define 4096 only if (all) your card(s) |
47 | have 32K x 32bit SRAM, in which case | 43 | have 32K x 32bit SRAM, in which case |
48 | setting this to 16384 will just waste a | 44 | setting this to 16384 will just waste a |
49 | lot of memory. | 45 | lot of memory. |
@@ -51,33 +47,32 @@ | |||
51 | 128K x 32bit SRAM will limit the maximum | 47 | 128K x 32bit SRAM will limit the maximum |
52 | VCI. */ | 48 | VCI. */ |
53 | 49 | ||
54 | /*#define NS_PCI_LATENCY 64*/ /* Must be a multiple of 32 */ | 50 | /*#define NS_PCI_LATENCY 64*//* Must be a multiple of 32 */ |
55 | 51 | ||
56 | /* Number of buffers initially allocated */ | 52 | /* Number of buffers initially allocated */ |
57 | #define NUM_SB 32 /* Must be even */ | 53 | #define NUM_SB 32 /* Must be even */ |
58 | #define NUM_LB 24 /* Must be even */ | 54 | #define NUM_LB 24 /* Must be even */ |
59 | #define NUM_HB 8 /* Pre-allocated huge buffers */ | 55 | #define NUM_HB 8 /* Pre-allocated huge buffers */ |
60 | #define NUM_IOVB 48 /* Iovec buffers */ | 56 | #define NUM_IOVB 48 /* Iovec buffers */ |
61 | 57 | ||
62 | /* Lower level for count of buffers */ | 58 | /* Lower level for count of buffers */ |
63 | #define MIN_SB 8 /* Must be even */ | 59 | #define MIN_SB 8 /* Must be even */ |
64 | #define MIN_LB 8 /* Must be even */ | 60 | #define MIN_LB 8 /* Must be even */ |
65 | #define MIN_HB 6 | 61 | #define MIN_HB 6 |
66 | #define MIN_IOVB 8 | 62 | #define MIN_IOVB 8 |
67 | 63 | ||
68 | /* Upper level for count of buffers */ | 64 | /* Upper level for count of buffers */ |
69 | #define MAX_SB 64 /* Must be even, <= 508 */ | 65 | #define MAX_SB 64 /* Must be even, <= 508 */ |
70 | #define MAX_LB 48 /* Must be even, <= 508 */ | 66 | #define MAX_LB 48 /* Must be even, <= 508 */ |
71 | #define MAX_HB 10 | 67 | #define MAX_HB 10 |
72 | #define MAX_IOVB 80 | 68 | #define MAX_IOVB 80 |
73 | 69 | ||
74 | /* These are the absolute maximum allowed for the ioctl() */ | 70 | /* These are the absolute maximum allowed for the ioctl() */ |
75 | #define TOP_SB 256 /* Must be even, <= 508 */ | 71 | #define TOP_SB 256 /* Must be even, <= 508 */ |
76 | #define TOP_LB 128 /* Must be even, <= 508 */ | 72 | #define TOP_LB 128 /* Must be even, <= 508 */ |
77 | #define TOP_HB 64 | 73 | #define TOP_HB 64 |
78 | #define TOP_IOVB 256 | 74 | #define TOP_IOVB 256 |
79 | 75 | ||
80 | |||
81 | #define MAX_TBD_PER_VC 1 /* Number of TBDs before a TSR */ | 76 | #define MAX_TBD_PER_VC 1 /* Number of TBDs before a TSR */ |
82 | #define MAX_TBD_PER_SCQ 10 /* Only meaningful for variable rate SCQs */ | 77 | #define MAX_TBD_PER_SCQ 10 /* Only meaningful for variable rate SCQs */ |
83 | 78 | ||
@@ -89,15 +84,12 @@ | |||
89 | 84 | ||
90 | #define PCR_TOLERANCE (1.0001) | 85 | #define PCR_TOLERANCE (1.0001) |
91 | 86 | ||
92 | 87 | /* ESI stuff */ | |
93 | |||
94 | /* ESI stuff ******************************************************************/ | ||
95 | 88 | ||
96 | #define NICSTAR_EPROM_MAC_ADDR_OFFSET 0x6C | 89 | #define NICSTAR_EPROM_MAC_ADDR_OFFSET 0x6C |
97 | #define NICSTAR_EPROM_MAC_ADDR_OFFSET_ALT 0xF6 | 90 | #define NICSTAR_EPROM_MAC_ADDR_OFFSET_ALT 0xF6 |
98 | 91 | ||
99 | 92 | /* #defines */ | |
100 | /* #defines *******************************************************************/ | ||
101 | 93 | ||
102 | #define NS_IOREMAP_SIZE 4096 | 94 | #define NS_IOREMAP_SIZE 4096 |
103 | 95 | ||
@@ -123,22 +115,19 @@ | |||
123 | #define NS_SMSKBSIZE (NS_SMBUFSIZE + NS_AAL0_HEADER) | 115 | #define NS_SMSKBSIZE (NS_SMBUFSIZE + NS_AAL0_HEADER) |
124 | #define NS_LGSKBSIZE (NS_SMBUFSIZE + NS_LGBUFSIZE) | 116 | #define NS_LGSKBSIZE (NS_SMBUFSIZE + NS_LGBUFSIZE) |
125 | 117 | ||
118 | /* NICStAR structures located in host memory */ | ||
126 | 119 | ||
127 | /* NICStAR structures located in host memory **********************************/ | 120 | /* |
128 | 121 | * RSQ - Receive Status Queue | |
129 | |||
130 | |||
131 | /* RSQ - Receive Status Queue | ||
132 | * | 122 | * |
133 | * Written by the NICStAR, read by the device driver. | 123 | * Written by the NICStAR, read by the device driver. |
134 | */ | 124 | */ |
135 | 125 | ||
136 | typedef struct ns_rsqe | 126 | typedef struct ns_rsqe { |
137 | { | 127 | u32 word_1; |
138 | u32 word_1; | 128 | u32 buffer_handle; |
139 | u32 buffer_handle; | 129 | u32 final_aal5_crc32; |
140 | u32 final_aal5_crc32; | 130 | u32 word_4; |
141 | u32 word_4; | ||
142 | } ns_rsqe; | 131 | } ns_rsqe; |
143 | 132 | ||
144 | #define ns_rsqe_vpi(ns_rsqep) \ | 133 | #define ns_rsqe_vpi(ns_rsqep) \ |
@@ -175,30 +164,27 @@ typedef struct ns_rsqe | |||
175 | #define ns_rsqe_cellcount(ns_rsqep) \ | 164 | #define ns_rsqe_cellcount(ns_rsqep) \ |
176 | (le32_to_cpu((ns_rsqep)->word_4) & 0x000001FF) | 165 | (le32_to_cpu((ns_rsqep)->word_4) & 0x000001FF) |
177 | #define ns_rsqe_init(ns_rsqep) \ | 166 | #define ns_rsqe_init(ns_rsqep) \ |
178 | ((ns_rsqep)->word_4 = cpu_to_le32(0x00000000)) | 167 | ((ns_rsqep)->word_4 = cpu_to_le32(0x00000000)) |
179 | 168 | ||
180 | #define NS_RSQ_NUM_ENTRIES (NS_RSQSIZE / 16) | 169 | #define NS_RSQ_NUM_ENTRIES (NS_RSQSIZE / 16) |
181 | #define NS_RSQ_ALIGNMENT NS_RSQSIZE | 170 | #define NS_RSQ_ALIGNMENT NS_RSQSIZE |
182 | 171 | ||
183 | 172 | /* | |
184 | 173 | * RCQ - Raw Cell Queue | |
185 | /* RCQ - Raw Cell Queue | ||
186 | * | 174 | * |
187 | * Written by the NICStAR, read by the device driver. | 175 | * Written by the NICStAR, read by the device driver. |
188 | */ | 176 | */ |
189 | 177 | ||
190 | typedef struct cell_payload | 178 | typedef struct cell_payload { |
191 | { | 179 | u32 word[12]; |
192 | u32 word[12]; | ||
193 | } cell_payload; | 180 | } cell_payload; |
194 | 181 | ||
195 | typedef struct ns_rcqe | 182 | typedef struct ns_rcqe { |
196 | { | 183 | u32 word_1; |
197 | u32 word_1; | 184 | u32 word_2; |
198 | u32 word_2; | 185 | u32 word_3; |
199 | u32 word_3; | 186 | u32 word_4; |
200 | u32 word_4; | 187 | cell_payload payload; |
201 | cell_payload payload; | ||
202 | } ns_rcqe; | 188 | } ns_rcqe; |
203 | 189 | ||
204 | #define NS_RCQE_SIZE 64 /* bytes */ | 190 | #define NS_RCQE_SIZE 64 /* bytes */ |
@@ -210,28 +196,25 @@ typedef struct ns_rcqe | |||
210 | #define ns_rcqe_nextbufhandle(ns_rcqep) \ | 196 | #define ns_rcqe_nextbufhandle(ns_rcqep) \ |
211 | (le32_to_cpu((ns_rcqep)->word_2)) | 197 | (le32_to_cpu((ns_rcqep)->word_2)) |
212 | 198 | ||
213 | 199 | /* | |
214 | 200 | * SCQ - Segmentation Channel Queue | |
215 | /* SCQ - Segmentation Channel Queue | ||
216 | * | 201 | * |
217 | * Written by the device driver, read by the NICStAR. | 202 | * Written by the device driver, read by the NICStAR. |
218 | */ | 203 | */ |
219 | 204 | ||
220 | typedef struct ns_scqe | 205 | typedef struct ns_scqe { |
221 | { | 206 | u32 word_1; |
222 | u32 word_1; | 207 | u32 word_2; |
223 | u32 word_2; | 208 | u32 word_3; |
224 | u32 word_3; | 209 | u32 word_4; |
225 | u32 word_4; | ||
226 | } ns_scqe; | 210 | } ns_scqe; |
227 | 211 | ||
228 | /* NOTE: SCQ entries can be either a TBD (Transmit Buffer Descriptors) | 212 | /* NOTE: SCQ entries can be either a TBD (Transmit Buffer Descriptors) |
229 | or TSR (Transmit Status Requests) */ | 213 | or TSR (Transmit Status Requests) */ |
230 | 214 | ||
231 | #define NS_SCQE_TYPE_TBD 0x00000000 | 215 | #define NS_SCQE_TYPE_TBD 0x00000000 |
232 | #define NS_SCQE_TYPE_TSR 0x80000000 | 216 | #define NS_SCQE_TYPE_TSR 0x80000000 |
233 | 217 | ||
234 | |||
235 | #define NS_TBD_EOPDU 0x40000000 | 218 | #define NS_TBD_EOPDU 0x40000000 |
236 | #define NS_TBD_AAL0 0x00000000 | 219 | #define NS_TBD_AAL0 0x00000000 |
237 | #define NS_TBD_AAL34 0x04000000 | 220 | #define NS_TBD_AAL34 0x04000000 |
@@ -253,10 +236,9 @@ typedef struct ns_scqe | |||
253 | #define ns_tbd_mkword_4(gfc, vpi, vci, pt, clp) \ | 236 | #define ns_tbd_mkword_4(gfc, vpi, vci, pt, clp) \ |
254 | (cpu_to_le32((gfc) << 28 | (vpi) << 20 | (vci) << 4 | (pt) << 1 | (clp))) | 237 | (cpu_to_le32((gfc) << 28 | (vpi) << 20 | (vci) << 4 | (pt) << 1 | (clp))) |
255 | 238 | ||
256 | |||
257 | #define NS_TSR_INTENABLE 0x20000000 | 239 | #define NS_TSR_INTENABLE 0x20000000 |
258 | 240 | ||
259 | #define NS_TSR_SCDISVBR 0xFFFF /* Use as scdi for VBR SCD */ | 241 | #define NS_TSR_SCDISVBR 0xFFFF /* Use as scdi for VBR SCD */ |
260 | 242 | ||
261 | #define ns_tsr_mkword_1(flags) \ | 243 | #define ns_tsr_mkword_1(flags) \ |
262 | (cpu_to_le32(NS_SCQE_TYPE_TSR | (flags))) | 244 | (cpu_to_le32(NS_SCQE_TYPE_TSR | (flags))) |
@@ -273,22 +255,20 @@ typedef struct ns_scqe | |||
273 | 255 | ||
274 | #define NS_SCQE_SIZE 16 | 256 | #define NS_SCQE_SIZE 16 |
275 | 257 | ||
276 | 258 | /* | |
277 | 259 | * TSQ - Transmit Status Queue | |
278 | /* TSQ - Transmit Status Queue | ||
279 | * | 260 | * |
280 | * Written by the NICStAR, read by the device driver. | 261 | * Written by the NICStAR, read by the device driver. |
281 | */ | 262 | */ |
282 | 263 | ||
283 | typedef struct ns_tsi | 264 | typedef struct ns_tsi { |
284 | { | 265 | u32 word_1; |
285 | u32 word_1; | 266 | u32 word_2; |
286 | u32 word_2; | ||
287 | } ns_tsi; | 267 | } ns_tsi; |
288 | 268 | ||
289 | /* NOTE: The first word can be a status word copied from the TSR which | 269 | /* NOTE: The first word can be a status word copied from the TSR which |
290 | originated the TSI, or a timer overflow indicator. In this last | 270 | originated the TSI, or a timer overflow indicator. In this last |
291 | case, the value of the first word is all zeroes. */ | 271 | case, the value of the first word is all zeroes. */ |
292 | 272 | ||
293 | #define NS_TSI_EMPTY 0x80000000 | 273 | #define NS_TSI_EMPTY 0x80000000 |
294 | #define NS_TSI_TIMESTAMP_MASK 0x00FFFFFF | 274 | #define NS_TSI_TIMESTAMP_MASK 0x00FFFFFF |
@@ -301,12 +281,10 @@ typedef struct ns_tsi | |||
301 | #define ns_tsi_init(ns_tsip) \ | 281 | #define ns_tsi_init(ns_tsip) \ |
302 | ((ns_tsip)->word_2 = cpu_to_le32(NS_TSI_EMPTY)) | 282 | ((ns_tsip)->word_2 = cpu_to_le32(NS_TSI_EMPTY)) |
303 | 283 | ||
304 | |||
305 | #define NS_TSQSIZE 8192 | 284 | #define NS_TSQSIZE 8192 |
306 | #define NS_TSQ_NUM_ENTRIES 1024 | 285 | #define NS_TSQ_NUM_ENTRIES 1024 |
307 | #define NS_TSQ_ALIGNMENT 8192 | 286 | #define NS_TSQ_ALIGNMENT 8192 |
308 | 287 | ||
309 | |||
310 | #define NS_TSI_SCDISVBR NS_TSR_SCDISVBR | 288 | #define NS_TSI_SCDISVBR NS_TSR_SCDISVBR |
311 | 289 | ||
312 | #define ns_tsi_tmrof(ns_tsip) \ | 290 | #define ns_tsi_tmrof(ns_tsip) \ |
@@ -316,26 +294,22 @@ typedef struct ns_tsi | |||
316 | #define ns_tsi_getscqpos(ns_tsip) \ | 294 | #define ns_tsi_getscqpos(ns_tsip) \ |
317 | (le32_to_cpu((ns_tsip)->word_1) & 0x00007FFF) | 295 | (le32_to_cpu((ns_tsip)->word_1) & 0x00007FFF) |
318 | 296 | ||
297 | /* NICStAR structures located in local SRAM */ | ||
319 | 298 | ||
320 | 299 | /* | |
321 | /* NICStAR structures located in local SRAM ***********************************/ | 300 | * RCT - Receive Connection Table |
322 | |||
323 | |||
324 | |||
325 | /* RCT - Receive Connection Table | ||
326 | * | 301 | * |
327 | * Written by both the NICStAR and the device driver. | 302 | * Written by both the NICStAR and the device driver. |
328 | */ | 303 | */ |
329 | 304 | ||
330 | typedef struct ns_rcte | 305 | typedef struct ns_rcte { |
331 | { | 306 | u32 word_1; |
332 | u32 word_1; | 307 | u32 buffer_handle; |
333 | u32 buffer_handle; | 308 | u32 dma_address; |
334 | u32 dma_address; | 309 | u32 aal5_crc32; |
335 | u32 aal5_crc32; | ||
336 | } ns_rcte; | 310 | } ns_rcte; |
337 | 311 | ||
338 | #define NS_RCTE_BSFB 0x00200000 /* Rev. D only */ | 312 | #define NS_RCTE_BSFB 0x00200000 /* Rev. D only */ |
339 | #define NS_RCTE_NZGFC 0x00100000 | 313 | #define NS_RCTE_NZGFC 0x00100000 |
340 | #define NS_RCTE_CONNECTOPEN 0x00080000 | 314 | #define NS_RCTE_CONNECTOPEN 0x00080000 |
341 | #define NS_RCTE_AALMASK 0x00070000 | 315 | #define NS_RCTE_AALMASK 0x00070000 |
@@ -358,25 +332,21 @@ typedef struct ns_rcte | |||
358 | #define NS_RCT_ENTRY_SIZE 4 /* Number of dwords */ | 332 | #define NS_RCT_ENTRY_SIZE 4 /* Number of dwords */ |
359 | 333 | ||
360 | /* NOTE: We could make macros to contruct the first word of the RCTE, | 334 | /* NOTE: We could make macros to contruct the first word of the RCTE, |
361 | but that doesn't seem to make much sense... */ | 335 | but that doesn't seem to make much sense... */ |
362 | 336 | ||
363 | 337 | /* | |
364 | 338 | * FBD - Free Buffer Descriptor | |
365 | /* FBD - Free Buffer Descriptor | ||
366 | * | 339 | * |
367 | * Written by the device driver using via the command register. | 340 | * Written by the device driver using via the command register. |
368 | */ | 341 | */ |
369 | 342 | ||
370 | typedef struct ns_fbd | 343 | typedef struct ns_fbd { |
371 | { | 344 | u32 buffer_handle; |
372 | u32 buffer_handle; | 345 | u32 dma_address; |
373 | u32 dma_address; | ||
374 | } ns_fbd; | 346 | } ns_fbd; |
375 | 347 | ||
376 | 348 | /* | |
377 | 349 | * TST - Transmit Schedule Table | |
378 | |||
379 | /* TST - Transmit Schedule Table | ||
380 | * | 350 | * |
381 | * Written by the device driver. | 351 | * Written by the device driver. |
382 | */ | 352 | */ |
@@ -385,40 +355,38 @@ typedef u32 ns_tste; | |||
385 | 355 | ||
386 | #define NS_TST_OPCODE_MASK 0x60000000 | 356 | #define NS_TST_OPCODE_MASK 0x60000000 |
387 | 357 | ||
388 | #define NS_TST_OPCODE_NULL 0x00000000 /* Insert null cell */ | 358 | #define NS_TST_OPCODE_NULL 0x00000000 /* Insert null cell */ |
389 | #define NS_TST_OPCODE_FIXED 0x20000000 /* Cell from a fixed rate channel */ | 359 | #define NS_TST_OPCODE_FIXED 0x20000000 /* Cell from a fixed rate channel */ |
390 | #define NS_TST_OPCODE_VARIABLE 0x40000000 | 360 | #define NS_TST_OPCODE_VARIABLE 0x40000000 |
391 | #define NS_TST_OPCODE_END 0x60000000 /* Jump */ | 361 | #define NS_TST_OPCODE_END 0x60000000 /* Jump */ |
392 | 362 | ||
393 | #define ns_tste_make(opcode, sramad) (opcode | sramad) | 363 | #define ns_tste_make(opcode, sramad) (opcode | sramad) |
394 | 364 | ||
395 | /* NOTE: | 365 | /* NOTE: |
396 | 366 | ||
397 | - When the opcode is FIXED, sramad specifies the SRAM address of the | 367 | - When the opcode is FIXED, sramad specifies the SRAM address of the |
398 | SCD for that fixed rate channel. | 368 | SCD for that fixed rate channel. |
399 | - When the opcode is END, sramad specifies the SRAM address of the | 369 | - When the opcode is END, sramad specifies the SRAM address of the |
400 | location of the next TST entry to read. | 370 | location of the next TST entry to read. |
401 | */ | 371 | */ |
402 | 372 | ||
403 | 373 | /* | |
404 | 374 | * SCD - Segmentation Channel Descriptor | |
405 | /* SCD - Segmentation Channel Descriptor | ||
406 | * | 375 | * |
407 | * Written by both the device driver and the NICStAR | 376 | * Written by both the device driver and the NICStAR |
408 | */ | 377 | */ |
409 | 378 | ||
410 | typedef struct ns_scd | 379 | typedef struct ns_scd { |
411 | { | 380 | u32 word_1; |
412 | u32 word_1; | 381 | u32 word_2; |
413 | u32 word_2; | 382 | u32 partial_aal5_crc; |
414 | u32 partial_aal5_crc; | 383 | u32 reserved; |
415 | u32 reserved; | 384 | ns_scqe cache_a; |
416 | ns_scqe cache_a; | 385 | ns_scqe cache_b; |
417 | ns_scqe cache_b; | ||
418 | } ns_scd; | 386 | } ns_scd; |
419 | 387 | ||
420 | #define NS_SCD_BASE_MASK_VAR 0xFFFFE000 /* Variable rate */ | 388 | #define NS_SCD_BASE_MASK_VAR 0xFFFFE000 /* Variable rate */ |
421 | #define NS_SCD_BASE_MASK_FIX 0xFFFFFC00 /* Fixed rate */ | 389 | #define NS_SCD_BASE_MASK_FIX 0xFFFFFC00 /* Fixed rate */ |
422 | #define NS_SCD_TAIL_MASK_VAR 0x00001FF0 | 390 | #define NS_SCD_TAIL_MASK_VAR 0x00001FF0 |
423 | #define NS_SCD_TAIL_MASK_FIX 0x000003F0 | 391 | #define NS_SCD_TAIL_MASK_FIX 0x000003F0 |
424 | #define NS_SCD_HEAD_MASK_VAR 0x00001FF0 | 392 | #define NS_SCD_HEAD_MASK_VAR 0x00001FF0 |
@@ -426,13 +394,9 @@ typedef struct ns_scd | |||
426 | #define NS_SCD_XMITFOREVER 0x02000000 | 394 | #define NS_SCD_XMITFOREVER 0x02000000 |
427 | 395 | ||
428 | /* NOTE: There are other fields in word 2 of the SCD, but as they should | 396 | /* NOTE: There are other fields in word 2 of the SCD, but as they should |
429 | not be needed in the device driver they are not defined here. */ | 397 | not be needed in the device driver they are not defined here. */ |
430 | |||
431 | |||
432 | |||
433 | |||
434 | /* NICStAR local SRAM memory map **********************************************/ | ||
435 | 398 | ||
399 | /* NICStAR local SRAM memory map */ | ||
436 | 400 | ||
437 | #define NS_RCT 0x00000 | 401 | #define NS_RCT 0x00000 |
438 | #define NS_RCT_32_END 0x03FFF | 402 | #define NS_RCT_32_END 0x03FFF |
@@ -455,100 +419,93 @@ typedef struct ns_scd | |||
455 | #define NS_LGFBQ 0x1FC00 | 419 | #define NS_LGFBQ 0x1FC00 |
456 | #define NS_LGFBQ_END 0x1FFFF | 420 | #define NS_LGFBQ_END 0x1FFFF |
457 | 421 | ||
458 | 422 | /* NISCtAR operation registers */ | |
459 | |||
460 | /* NISCtAR operation registers ************************************************/ | ||
461 | |||
462 | 423 | ||
463 | /* See Section 3.4 of `IDT77211 NICStAR User Manual' from www.idt.com */ | 424 | /* See Section 3.4 of `IDT77211 NICStAR User Manual' from www.idt.com */ |
464 | 425 | ||
465 | enum ns_regs | 426 | enum ns_regs { |
466 | { | 427 | DR0 = 0x00, /* Data Register 0 R/W */ |
467 | DR0 = 0x00, /* Data Register 0 R/W*/ | 428 | DR1 = 0x04, /* Data Register 1 W */ |
468 | DR1 = 0x04, /* Data Register 1 W */ | 429 | DR2 = 0x08, /* Data Register 2 W */ |
469 | DR2 = 0x08, /* Data Register 2 W */ | 430 | DR3 = 0x0C, /* Data Register 3 W */ |
470 | DR3 = 0x0C, /* Data Register 3 W */ | 431 | CMD = 0x10, /* Command W */ |
471 | CMD = 0x10, /* Command W */ | 432 | CFG = 0x14, /* Configuration R/W */ |
472 | CFG = 0x14, /* Configuration R/W */ | 433 | STAT = 0x18, /* Status R/W */ |
473 | STAT = 0x18, /* Status R/W */ | 434 | RSQB = 0x1C, /* Receive Status Queue Base W */ |
474 | RSQB = 0x1C, /* Receive Status Queue Base W */ | 435 | RSQT = 0x20, /* Receive Status Queue Tail R */ |
475 | RSQT = 0x20, /* Receive Status Queue Tail R */ | 436 | RSQH = 0x24, /* Receive Status Queue Head W */ |
476 | RSQH = 0x24, /* Receive Status Queue Head W */ | 437 | CDC = 0x28, /* Cell Drop Counter R/clear */ |
477 | CDC = 0x28, /* Cell Drop Counter R/clear */ | 438 | VPEC = 0x2C, /* VPI/VCI Lookup Error Count R/clear */ |
478 | VPEC = 0x2C, /* VPI/VCI Lookup Error Count R/clear */ | 439 | ICC = 0x30, /* Invalid Cell Count R/clear */ |
479 | ICC = 0x30, /* Invalid Cell Count R/clear */ | 440 | RAWCT = 0x34, /* Raw Cell Tail R */ |
480 | RAWCT = 0x34, /* Raw Cell Tail R */ | 441 | TMR = 0x38, /* Timer R */ |
481 | TMR = 0x38, /* Timer R */ | 442 | TSTB = 0x3C, /* Transmit Schedule Table Base R/W */ |
482 | TSTB = 0x3C, /* Transmit Schedule Table Base R/W */ | 443 | TSQB = 0x40, /* Transmit Status Queue Base W */ |
483 | TSQB = 0x40, /* Transmit Status Queue Base W */ | 444 | TSQT = 0x44, /* Transmit Status Queue Tail R */ |
484 | TSQT = 0x44, /* Transmit Status Queue Tail R */ | 445 | TSQH = 0x48, /* Transmit Status Queue Head W */ |
485 | TSQH = 0x48, /* Transmit Status Queue Head W */ | 446 | GP = 0x4C, /* General Purpose R/W */ |
486 | GP = 0x4C, /* General Purpose R/W */ | 447 | VPM = 0x50 /* VPI/VCI Mask W */ |
487 | VPM = 0x50 /* VPI/VCI Mask W */ | ||
488 | }; | 448 | }; |
489 | 449 | ||
490 | 450 | /* NICStAR commands issued to the CMD register */ | |
491 | /* NICStAR commands issued to the CMD register ********************************/ | ||
492 | |||
493 | 451 | ||
494 | /* Top 4 bits are command opcode, lower 28 are parameters. */ | 452 | /* Top 4 bits are command opcode, lower 28 are parameters. */ |
495 | 453 | ||
496 | #define NS_CMD_NO_OPERATION 0x00000000 | 454 | #define NS_CMD_NO_OPERATION 0x00000000 |
497 | /* params always 0 */ | 455 | /* params always 0 */ |
498 | 456 | ||
499 | #define NS_CMD_OPENCLOSE_CONNECTION 0x20000000 | 457 | #define NS_CMD_OPENCLOSE_CONNECTION 0x20000000 |
500 | /* b19{1=open,0=close} b18-2{SRAM addr} */ | 458 | /* b19{1=open,0=close} b18-2{SRAM addr} */ |
501 | 459 | ||
502 | #define NS_CMD_WRITE_SRAM 0x40000000 | 460 | #define NS_CMD_WRITE_SRAM 0x40000000 |
503 | /* b18-2{SRAM addr} b1-0{burst size} */ | 461 | /* b18-2{SRAM addr} b1-0{burst size} */ |
504 | 462 | ||
505 | #define NS_CMD_READ_SRAM 0x50000000 | 463 | #define NS_CMD_READ_SRAM 0x50000000 |
506 | /* b18-2{SRAM addr} */ | 464 | /* b18-2{SRAM addr} */ |
507 | 465 | ||
508 | #define NS_CMD_WRITE_FREEBUFQ 0x60000000 | 466 | #define NS_CMD_WRITE_FREEBUFQ 0x60000000 |
509 | /* b0{large buf indicator} */ | 467 | /* b0{large buf indicator} */ |
510 | 468 | ||
511 | #define NS_CMD_READ_UTILITY 0x80000000 | 469 | #define NS_CMD_READ_UTILITY 0x80000000 |
512 | /* b8{1=select UTL_CS1} b9{1=select UTL_CS0} b7-0{bus addr} */ | 470 | /* b8{1=select UTL_CS1} b9{1=select UTL_CS0} b7-0{bus addr} */ |
513 | 471 | ||
514 | #define NS_CMD_WRITE_UTILITY 0x90000000 | 472 | #define NS_CMD_WRITE_UTILITY 0x90000000 |
515 | /* b8{1=select UTL_CS1} b9{1=select UTL_CS0} b7-0{bus addr} */ | 473 | /* b8{1=select UTL_CS1} b9{1=select UTL_CS0} b7-0{bus addr} */ |
516 | 474 | ||
517 | #define NS_CMD_OPEN_CONNECTION (NS_CMD_OPENCLOSE_CONNECTION | 0x00080000) | 475 | #define NS_CMD_OPEN_CONNECTION (NS_CMD_OPENCLOSE_CONNECTION | 0x00080000) |
518 | #define NS_CMD_CLOSE_CONNECTION NS_CMD_OPENCLOSE_CONNECTION | 476 | #define NS_CMD_CLOSE_CONNECTION NS_CMD_OPENCLOSE_CONNECTION |
519 | 477 | ||
520 | 478 | /* NICStAR configuration bits */ | |
521 | /* NICStAR configuration bits *************************************************/ | 479 | |
522 | 480 | #define NS_CFG_SWRST 0x80000000 /* Software Reset */ | |
523 | #define NS_CFG_SWRST 0x80000000 /* Software Reset */ | 481 | #define NS_CFG_RXPATH 0x20000000 /* Receive Path Enable */ |
524 | #define NS_CFG_RXPATH 0x20000000 /* Receive Path Enable */ | 482 | #define NS_CFG_SMBUFSIZE_MASK 0x18000000 /* Small Receive Buffer Size */ |
525 | #define NS_CFG_SMBUFSIZE_MASK 0x18000000 /* Small Receive Buffer Size */ | 483 | #define NS_CFG_LGBUFSIZE_MASK 0x06000000 /* Large Receive Buffer Size */ |
526 | #define NS_CFG_LGBUFSIZE_MASK 0x06000000 /* Large Receive Buffer Size */ | 484 | #define NS_CFG_EFBIE 0x01000000 /* Empty Free Buffer Queue |
527 | #define NS_CFG_EFBIE 0x01000000 /* Empty Free Buffer Queue | 485 | Interrupt Enable */ |
528 | Interrupt Enable */ | 486 | #define NS_CFG_RSQSIZE_MASK 0x00C00000 /* Receive Status Queue Size */ |
529 | #define NS_CFG_RSQSIZE_MASK 0x00C00000 /* Receive Status Queue Size */ | 487 | #define NS_CFG_ICACCEPT 0x00200000 /* Invalid Cell Accept */ |
530 | #define NS_CFG_ICACCEPT 0x00200000 /* Invalid Cell Accept */ | 488 | #define NS_CFG_IGNOREGFC 0x00100000 /* Ignore General Flow Control */ |
531 | #define NS_CFG_IGNOREGFC 0x00100000 /* Ignore General Flow Control */ | 489 | #define NS_CFG_VPIBITS_MASK 0x000C0000 /* VPI/VCI Bits Size Select */ |
532 | #define NS_CFG_VPIBITS_MASK 0x000C0000 /* VPI/VCI Bits Size Select */ | 490 | #define NS_CFG_RCTSIZE_MASK 0x00030000 /* Receive Connection Table Size */ |
533 | #define NS_CFG_RCTSIZE_MASK 0x00030000 /* Receive Connection Table Size */ | 491 | #define NS_CFG_VCERRACCEPT 0x00008000 /* VPI/VCI Error Cell Accept */ |
534 | #define NS_CFG_VCERRACCEPT 0x00008000 /* VPI/VCI Error Cell Accept */ | 492 | #define NS_CFG_RXINT_MASK 0x00007000 /* End of Receive PDU Interrupt |
535 | #define NS_CFG_RXINT_MASK 0x00007000 /* End of Receive PDU Interrupt | 493 | Handling */ |
536 | Handling */ | 494 | #define NS_CFG_RAWIE 0x00000800 /* Raw Cell Qu' Interrupt Enable */ |
537 | #define NS_CFG_RAWIE 0x00000800 /* Raw Cell Qu' Interrupt Enable */ | 495 | #define NS_CFG_RSQAFIE 0x00000400 /* Receive Queue Almost Full |
538 | #define NS_CFG_RSQAFIE 0x00000400 /* Receive Queue Almost Full | 496 | Interrupt Enable */ |
539 | Interrupt Enable */ | 497 | #define NS_CFG_RXRM 0x00000200 /* Receive RM Cells */ |
540 | #define NS_CFG_RXRM 0x00000200 /* Receive RM Cells */ | 498 | #define NS_CFG_TMRROIE 0x00000080 /* Timer Roll Over Interrupt |
541 | #define NS_CFG_TMRROIE 0x00000080 /* Timer Roll Over Interrupt | 499 | Enable */ |
542 | Enable */ | 500 | #define NS_CFG_TXEN 0x00000020 /* Transmit Operation Enable */ |
543 | #define NS_CFG_TXEN 0x00000020 /* Transmit Operation Enable */ | 501 | #define NS_CFG_TXIE 0x00000010 /* Transmit Status Interrupt |
544 | #define NS_CFG_TXIE 0x00000010 /* Transmit Status Interrupt | 502 | Enable */ |
545 | Enable */ | 503 | #define NS_CFG_TXURIE 0x00000008 /* Transmit Under-run Interrupt |
546 | #define NS_CFG_TXURIE 0x00000008 /* Transmit Under-run Interrupt | 504 | Enable */ |
547 | Enable */ | 505 | #define NS_CFG_UMODE 0x00000004 /* Utopia Mode (cell/byte) Select */ |
548 | #define NS_CFG_UMODE 0x00000004 /* Utopia Mode (cell/byte) Select */ | 506 | #define NS_CFG_TSQFIE 0x00000002 /* Transmit Status Queue Full |
549 | #define NS_CFG_TSQFIE 0x00000002 /* Transmit Status Queue Full | 507 | Interrupt Enable */ |
550 | Interrupt Enable */ | 508 | #define NS_CFG_PHYIE 0x00000001 /* PHY Interrupt Enable */ |
551 | #define NS_CFG_PHYIE 0x00000001 /* PHY Interrupt Enable */ | ||
552 | 509 | ||
553 | #define NS_CFG_SMBUFSIZE_48 0x00000000 | 510 | #define NS_CFG_SMBUFSIZE_48 0x00000000 |
554 | #define NS_CFG_SMBUFSIZE_96 0x08000000 | 511 | #define NS_CFG_SMBUFSIZE_96 0x08000000 |
@@ -579,33 +536,29 @@ enum ns_regs | |||
579 | #define NS_CFG_RXINT_624US 0x00003000 | 536 | #define NS_CFG_RXINT_624US 0x00003000 |
580 | #define NS_CFG_RXINT_899US 0x00004000 | 537 | #define NS_CFG_RXINT_899US 0x00004000 |
581 | 538 | ||
582 | 539 | /* NICStAR STATus bits */ | |
583 | /* NICStAR STATus bits ********************************************************/ | 540 | |
584 | 541 | #define NS_STAT_SFBQC_MASK 0xFF000000 /* hi 8 bits Small Buffer Queue Count */ | |
585 | #define NS_STAT_SFBQC_MASK 0xFF000000 /* hi 8 bits Small Buffer Queue Count */ | 542 | #define NS_STAT_LFBQC_MASK 0x00FF0000 /* hi 8 bits Large Buffer Queue Count */ |
586 | #define NS_STAT_LFBQC_MASK 0x00FF0000 /* hi 8 bits Large Buffer Queue Count */ | 543 | #define NS_STAT_TSIF 0x00008000 /* Transmit Status Queue Indicator */ |
587 | #define NS_STAT_TSIF 0x00008000 /* Transmit Status Queue Indicator */ | 544 | #define NS_STAT_TXICP 0x00004000 /* Transmit Incomplete PDU */ |
588 | #define NS_STAT_TXICP 0x00004000 /* Transmit Incomplete PDU */ | 545 | #define NS_STAT_TSQF 0x00001000 /* Transmit Status Queue Full */ |
589 | #define NS_STAT_TSQF 0x00001000 /* Transmit Status Queue Full */ | 546 | #define NS_STAT_TMROF 0x00000800 /* Timer Overflow */ |
590 | #define NS_STAT_TMROF 0x00000800 /* Timer Overflow */ | 547 | #define NS_STAT_PHYI 0x00000400 /* PHY Device Interrupt */ |
591 | #define NS_STAT_PHYI 0x00000400 /* PHY Device Interrupt */ | 548 | #define NS_STAT_CMDBZ 0x00000200 /* Command Busy */ |
592 | #define NS_STAT_CMDBZ 0x00000200 /* Command Busy */ | 549 | #define NS_STAT_SFBQF 0x00000100 /* Small Buffer Queue Full */ |
593 | #define NS_STAT_SFBQF 0x00000100 /* Small Buffer Queue Full */ | 550 | #define NS_STAT_LFBQF 0x00000080 /* Large Buffer Queue Full */ |
594 | #define NS_STAT_LFBQF 0x00000080 /* Large Buffer Queue Full */ | 551 | #define NS_STAT_RSQF 0x00000040 /* Receive Status Queue Full */ |
595 | #define NS_STAT_RSQF 0x00000040 /* Receive Status Queue Full */ | 552 | #define NS_STAT_EOPDU 0x00000020 /* End of PDU */ |
596 | #define NS_STAT_EOPDU 0x00000020 /* End of PDU */ | 553 | #define NS_STAT_RAWCF 0x00000010 /* Raw Cell Flag */ |
597 | #define NS_STAT_RAWCF 0x00000010 /* Raw Cell Flag */ | 554 | #define NS_STAT_SFBQE 0x00000008 /* Small Buffer Queue Empty */ |
598 | #define NS_STAT_SFBQE 0x00000008 /* Small Buffer Queue Empty */ | 555 | #define NS_STAT_LFBQE 0x00000004 /* Large Buffer Queue Empty */ |
599 | #define NS_STAT_LFBQE 0x00000004 /* Large Buffer Queue Empty */ | 556 | #define NS_STAT_RSQAF 0x00000002 /* Receive Status Queue Almost Full */ |
600 | #define NS_STAT_RSQAF 0x00000002 /* Receive Status Queue Almost Full */ | ||
601 | 557 | ||
602 | #define ns_stat_sfbqc_get(stat) (((stat) & NS_STAT_SFBQC_MASK) >> 23) | 558 | #define ns_stat_sfbqc_get(stat) (((stat) & NS_STAT_SFBQC_MASK) >> 23) |
603 | #define ns_stat_lfbqc_get(stat) (((stat) & NS_STAT_LFBQC_MASK) >> 15) | 559 | #define ns_stat_lfbqc_get(stat) (((stat) & NS_STAT_LFBQC_MASK) >> 15) |
604 | 560 | ||
605 | 561 | /* #defines which depend on other #defines */ | |
606 | |||
607 | /* #defines which depend on other #defines ************************************/ | ||
608 | |||
609 | 562 | ||
610 | #define NS_TST0 NS_TST_FRSCD | 563 | #define NS_TST0 NS_TST_FRSCD |
611 | #define NS_TST1 (NS_TST_FRSCD + NS_TST_NUM_ENTRIES + 1) | 564 | #define NS_TST1 (NS_TST_FRSCD + NS_TST_NUM_ENTRIES + 1) |
@@ -672,8 +625,7 @@ enum ns_regs | |||
672 | #define NS_CFG_TSQFIE_OPT 0x00000000 | 625 | #define NS_CFG_TSQFIE_OPT 0x00000000 |
673 | #endif /* ENABLE_TSQFIE */ | 626 | #endif /* ENABLE_TSQFIE */ |
674 | 627 | ||
675 | 628 | /* PCI stuff */ | |
676 | /* PCI stuff ******************************************************************/ | ||
677 | 629 | ||
678 | #ifndef PCI_VENDOR_ID_IDT | 630 | #ifndef PCI_VENDOR_ID_IDT |
679 | #define PCI_VENDOR_ID_IDT 0x111D | 631 | #define PCI_VENDOR_ID_IDT 0x111D |
@@ -683,138 +635,124 @@ enum ns_regs | |||
683 | #define PCI_DEVICE_ID_IDT_IDT77201 0x0001 | 635 | #define PCI_DEVICE_ID_IDT_IDT77201 0x0001 |
684 | #endif /* PCI_DEVICE_ID_IDT_IDT77201 */ | 636 | #endif /* PCI_DEVICE_ID_IDT_IDT77201 */ |
685 | 637 | ||
638 | /* Device driver structures */ | ||
686 | 639 | ||
687 | 640 | struct ns_skb_prv { | |
688 | /* Device driver structures ***************************************************/ | 641 | u32 buf_type; /* BUF_SM/BUF_LG/BUF_NONE */ |
689 | 642 | u32 dma; | |
690 | 643 | int iovcnt; | |
691 | struct ns_skb_cb { | ||
692 | u32 buf_type; /* BUF_SM/BUF_LG/BUF_NONE */ | ||
693 | }; | 644 | }; |
694 | 645 | ||
695 | #define NS_SKB_CB(skb) ((struct ns_skb_cb *)((skb)->cb)) | 646 | #define NS_PRV_BUFTYPE(skb) \ |
696 | 647 | (((struct ns_skb_prv *)(ATM_SKB(skb)+1))->buf_type) | |
697 | typedef struct tsq_info | 648 | #define NS_PRV_DMA(skb) \ |
698 | { | 649 | (((struct ns_skb_prv *)(ATM_SKB(skb)+1))->dma) |
699 | void *org; | 650 | #define NS_PRV_IOVCNT(skb) \ |
700 | ns_tsi *base; | 651 | (((struct ns_skb_prv *)(ATM_SKB(skb)+1))->iovcnt) |
701 | ns_tsi *next; | 652 | |
702 | ns_tsi *last; | 653 | typedef struct tsq_info { |
654 | void *org; | ||
655 | dma_addr_t dma; | ||
656 | ns_tsi *base; | ||
657 | ns_tsi *next; | ||
658 | ns_tsi *last; | ||
703 | } tsq_info; | 659 | } tsq_info; |
704 | 660 | ||
705 | 661 | typedef struct scq_info { | |
706 | typedef struct scq_info | 662 | void *org; |
707 | { | 663 | dma_addr_t dma; |
708 | void *org; | 664 | ns_scqe *base; |
709 | ns_scqe *base; | 665 | ns_scqe *last; |
710 | ns_scqe *last; | 666 | ns_scqe *next; |
711 | ns_scqe *next; | 667 | volatile ns_scqe *tail; /* Not related to the nicstar register */ |
712 | volatile ns_scqe *tail; /* Not related to the nicstar register */ | 668 | unsigned num_entries; |
713 | unsigned num_entries; | 669 | struct sk_buff **skb; /* Pointer to an array of pointers |
714 | struct sk_buff **skb; /* Pointer to an array of pointers | 670 | to the sk_buffs used for tx */ |
715 | to the sk_buffs used for tx */ | 671 | u32 scd; /* SRAM address of the corresponding |
716 | u32 scd; /* SRAM address of the corresponding | 672 | SCD */ |
717 | SCD */ | 673 | int tbd_count; /* Only meaningful on variable rate */ |
718 | int tbd_count; /* Only meaningful on variable rate */ | 674 | wait_queue_head_t scqfull_waitq; |
719 | wait_queue_head_t scqfull_waitq; | 675 | volatile char full; /* SCQ full indicator */ |
720 | volatile char full; /* SCQ full indicator */ | 676 | spinlock_t lock; /* SCQ spinlock */ |
721 | spinlock_t lock; /* SCQ spinlock */ | ||
722 | } scq_info; | 677 | } scq_info; |
723 | 678 | ||
724 | 679 | typedef struct rsq_info { | |
725 | 680 | void *org; | |
726 | typedef struct rsq_info | 681 | dma_addr_t dma; |
727 | { | 682 | ns_rsqe *base; |
728 | void *org; | 683 | ns_rsqe *next; |
729 | ns_rsqe *base; | 684 | ns_rsqe *last; |
730 | ns_rsqe *next; | ||
731 | ns_rsqe *last; | ||
732 | } rsq_info; | 685 | } rsq_info; |
733 | 686 | ||
734 | 687 | typedef struct skb_pool { | |
735 | typedef struct skb_pool | 688 | volatile int count; /* number of buffers in the queue */ |
736 | { | 689 | struct sk_buff_head queue; |
737 | volatile int count; /* number of buffers in the queue */ | ||
738 | struct sk_buff_head queue; | ||
739 | } skb_pool; | 690 | } skb_pool; |
740 | 691 | ||
741 | /* NOTE: for small and large buffer pools, the count is not used, as the | 692 | /* NOTE: for small and large buffer pools, the count is not used, as the |
742 | actual value used for buffer management is the one read from the | 693 | actual value used for buffer management is the one read from the |
743 | card. */ | 694 | card. */ |
744 | 695 | ||
745 | 696 | typedef struct vc_map { | |
746 | typedef struct vc_map | 697 | volatile unsigned int tx:1; /* TX vc? */ |
747 | { | 698 | volatile unsigned int rx:1; /* RX vc? */ |
748 | volatile unsigned int tx:1; /* TX vc? */ | 699 | struct atm_vcc *tx_vcc, *rx_vcc; |
749 | volatile unsigned int rx:1; /* RX vc? */ | 700 | struct sk_buff *rx_iov; /* RX iovector skb */ |
750 | struct atm_vcc *tx_vcc, *rx_vcc; | 701 | scq_info *scq; /* To keep track of the SCQ */ |
751 | struct sk_buff *rx_iov; /* RX iovector skb */ | 702 | u32 cbr_scd; /* SRAM address of the corresponding |
752 | scq_info *scq; /* To keep track of the SCQ */ | 703 | SCD. 0x00000000 for UBR/VBR/ABR */ |
753 | u32 cbr_scd; /* SRAM address of the corresponding | 704 | int tbd_count; |
754 | SCD. 0x00000000 for UBR/VBR/ABR */ | ||
755 | int tbd_count; | ||
756 | } vc_map; | 705 | } vc_map; |
757 | 706 | ||
758 | 707 | typedef struct ns_dev { | |
759 | struct ns_skb_data | 708 | int index; /* Card ID to the device driver */ |
760 | { | 709 | int sram_size; /* In k x 32bit words. 32 or 128 */ |
761 | struct atm_vcc *vcc; | 710 | void __iomem *membase; /* Card's memory base address */ |
762 | int iovcnt; | 711 | unsigned long max_pcr; |
763 | }; | 712 | int rct_size; /* Number of entries */ |
764 | 713 | int vpibits; | |
765 | #define NS_SKB(skb) (((struct ns_skb_data *) (skb)->cb)) | 714 | int vcibits; |
766 | 715 | struct pci_dev *pcidev; | |
767 | 716 | struct idr idr; | |
768 | typedef struct ns_dev | 717 | struct atm_dev *atmdev; |
769 | { | 718 | tsq_info tsq; |
770 | int index; /* Card ID to the device driver */ | 719 | rsq_info rsq; |
771 | int sram_size; /* In k x 32bit words. 32 or 128 */ | 720 | scq_info *scq0, *scq1, *scq2; /* VBR SCQs */ |
772 | void __iomem *membase; /* Card's memory base address */ | 721 | skb_pool sbpool; /* Small buffers */ |
773 | unsigned long max_pcr; | 722 | skb_pool lbpool; /* Large buffers */ |
774 | int rct_size; /* Number of entries */ | 723 | skb_pool hbpool; /* Pre-allocated huge buffers */ |
775 | int vpibits; | 724 | skb_pool iovpool; /* iovector buffers */ |
776 | int vcibits; | 725 | volatile int efbie; /* Empty free buf. queue int. enabled */ |
777 | struct pci_dev *pcidev; | 726 | volatile u32 tst_addr; /* SRAM address of the TST in use */ |
778 | struct atm_dev *atmdev; | 727 | volatile int tst_free_entries; |
779 | tsq_info tsq; | 728 | vc_map vcmap[NS_MAX_RCTSIZE]; |
780 | rsq_info rsq; | 729 | vc_map *tste2vc[NS_TST_NUM_ENTRIES]; |
781 | scq_info *scq0, *scq1, *scq2; /* VBR SCQs */ | 730 | vc_map *scd2vc[NS_FRSCD_NUM]; |
782 | skb_pool sbpool; /* Small buffers */ | 731 | buf_nr sbnr; |
783 | skb_pool lbpool; /* Large buffers */ | 732 | buf_nr lbnr; |
784 | skb_pool hbpool; /* Pre-allocated huge buffers */ | 733 | buf_nr hbnr; |
785 | skb_pool iovpool; /* iovector buffers */ | 734 | buf_nr iovnr; |
786 | volatile int efbie; /* Empty free buf. queue int. enabled */ | 735 | int sbfqc; |
787 | volatile u32 tst_addr; /* SRAM address of the TST in use */ | 736 | int lbfqc; |
788 | volatile int tst_free_entries; | 737 | struct sk_buff *sm_handle; |
789 | vc_map vcmap[NS_MAX_RCTSIZE]; | 738 | u32 sm_addr; |
790 | vc_map *tste2vc[NS_TST_NUM_ENTRIES]; | 739 | struct sk_buff *lg_handle; |
791 | vc_map *scd2vc[NS_FRSCD_NUM]; | 740 | u32 lg_addr; |
792 | buf_nr sbnr; | 741 | struct sk_buff *rcbuf; /* Current raw cell buffer */ |
793 | buf_nr lbnr; | 742 | struct ns_rcqe *rawcell; |
794 | buf_nr hbnr; | 743 | u32 rawch; /* Raw cell queue head */ |
795 | buf_nr iovnr; | 744 | unsigned intcnt; /* Interrupt counter */ |
796 | int sbfqc; | 745 | spinlock_t int_lock; /* Interrupt lock */ |
797 | int lbfqc; | 746 | spinlock_t res_lock; /* Card resource lock */ |
798 | u32 sm_handle; | ||
799 | u32 sm_addr; | ||
800 | u32 lg_handle; | ||
801 | u32 lg_addr; | ||
802 | struct sk_buff *rcbuf; /* Current raw cell buffer */ | ||
803 | u32 rawch; /* Raw cell queue head */ | ||
804 | unsigned intcnt; /* Interrupt counter */ | ||
805 | spinlock_t int_lock; /* Interrupt lock */ | ||
806 | spinlock_t res_lock; /* Card resource lock */ | ||
807 | } ns_dev; | 747 | } ns_dev; |
808 | 748 | ||
809 | |||
810 | /* NOTE: Each tste2vc entry relates a given TST entry to the corresponding | 749 | /* NOTE: Each tste2vc entry relates a given TST entry to the corresponding |
811 | CBR vc. If the entry is not allocated, it must be NULL. | 750 | CBR vc. If the entry is not allocated, it must be NULL. |
812 | 751 | ||
813 | There are two TSTs so the driver can modify them on the fly | 752 | There are two TSTs so the driver can modify them on the fly |
814 | without stopping the transmission. | 753 | without stopping the transmission. |
815 | |||
816 | scd2vc allows us to find out unused fixed rate SCDs, because | ||
817 | they must have a NULL pointer here. */ | ||
818 | 754 | ||
755 | scd2vc allows us to find out unused fixed rate SCDs, because | ||
756 | they must have a NULL pointer here. */ | ||
819 | 757 | ||
820 | #endif /* _LINUX_NICSTAR_H_ */ | 758 | #endif /* _LINUX_NICSTAR_H_ */ |
diff --git a/drivers/atm/nicstarmac.c b/drivers/atm/nicstarmac.c index 842e26c45557..f594526f8c6d 100644 --- a/drivers/atm/nicstarmac.c +++ b/drivers/atm/nicstarmac.c | |||
@@ -13,15 +13,15 @@ typedef void __iomem *virt_addr_t; | |||
13 | 13 | ||
14 | #define CYCLE_DELAY 5 | 14 | #define CYCLE_DELAY 5 |
15 | 15 | ||
16 | /* This was the original definition | 16 | /* |
17 | This was the original definition | ||
17 | #define osp_MicroDelay(microsec) \ | 18 | #define osp_MicroDelay(microsec) \ |
18 | do { int _i = 4*microsec; while (--_i > 0) { __SLOW_DOWN_IO; }} while (0) | 19 | do { int _i = 4*microsec; while (--_i > 0) { __SLOW_DOWN_IO; }} while (0) |
19 | */ | 20 | */ |
20 | #define osp_MicroDelay(microsec) {unsigned long useconds = (microsec); \ | 21 | #define osp_MicroDelay(microsec) {unsigned long useconds = (microsec); \ |
21 | udelay((useconds));} | 22 | udelay((useconds));} |
22 | 23 | /* | |
23 | 24 | * The following tables represent the timing diagrams found in | |
24 | /* The following tables represent the timing diagrams found in | ||
25 | * the Data Sheet for the Xicor X25020 EEProm. The #defines below | 25 | * the Data Sheet for the Xicor X25020 EEProm. The #defines below |
26 | * represent the bits in the NICStAR's General Purpose register | 26 | * represent the bits in the NICStAR's General Purpose register |
27 | * that must be toggled for the corresponding actions on the EEProm | 27 | * that must be toggled for the corresponding actions on the EEProm |
@@ -31,86 +31,80 @@ typedef void __iomem *virt_addr_t; | |||
31 | /* Write Data To EEProm from SI line on rising edge of CLK */ | 31 | /* Write Data To EEProm from SI line on rising edge of CLK */ |
32 | /* Read Data From EEProm on falling edge of CLK */ | 32 | /* Read Data From EEProm on falling edge of CLK */ |
33 | 33 | ||
34 | #define CS_HIGH 0x0002 /* Chip select high */ | 34 | #define CS_HIGH 0x0002 /* Chip select high */ |
35 | #define CS_LOW 0x0000 /* Chip select low (active low)*/ | 35 | #define CS_LOW 0x0000 /* Chip select low (active low) */ |
36 | #define CLK_HIGH 0x0004 /* Clock high */ | 36 | #define CLK_HIGH 0x0004 /* Clock high */ |
37 | #define CLK_LOW 0x0000 /* Clock low */ | 37 | #define CLK_LOW 0x0000 /* Clock low */ |
38 | #define SI_HIGH 0x0001 /* Serial input data high */ | 38 | #define SI_HIGH 0x0001 /* Serial input data high */ |
39 | #define SI_LOW 0x0000 /* Serial input data low */ | 39 | #define SI_LOW 0x0000 /* Serial input data low */ |
40 | 40 | ||
41 | /* Read Status Register = 0000 0101b */ | 41 | /* Read Status Register = 0000 0101b */ |
42 | #if 0 | 42 | #if 0 |
43 | static u_int32_t rdsrtab[] = | 43 | static u_int32_t rdsrtab[] = { |
44 | { | 44 | CS_HIGH | CLK_HIGH, |
45 | CS_HIGH | CLK_HIGH, | 45 | CS_LOW | CLK_LOW, |
46 | CS_LOW | CLK_LOW, | 46 | CLK_HIGH, /* 0 */ |
47 | CLK_HIGH, /* 0 */ | 47 | CLK_LOW, |
48 | CLK_LOW, | 48 | CLK_HIGH, /* 0 */ |
49 | CLK_HIGH, /* 0 */ | 49 | CLK_LOW, |
50 | CLK_LOW, | 50 | CLK_HIGH, /* 0 */ |
51 | CLK_HIGH, /* 0 */ | 51 | CLK_LOW, |
52 | CLK_LOW, | 52 | CLK_HIGH, /* 0 */ |
53 | CLK_HIGH, /* 0 */ | 53 | CLK_LOW, |
54 | CLK_LOW, | 54 | CLK_HIGH, /* 0 */ |
55 | CLK_HIGH, /* 0 */ | 55 | CLK_LOW | SI_HIGH, |
56 | CLK_LOW | SI_HIGH, | 56 | CLK_HIGH | SI_HIGH, /* 1 */ |
57 | CLK_HIGH | SI_HIGH, /* 1 */ | 57 | CLK_LOW | SI_LOW, |
58 | CLK_LOW | SI_LOW, | 58 | CLK_HIGH, /* 0 */ |
59 | CLK_HIGH, /* 0 */ | 59 | CLK_LOW | SI_HIGH, |
60 | CLK_LOW | SI_HIGH, | 60 | CLK_HIGH | SI_HIGH /* 1 */ |
61 | CLK_HIGH | SI_HIGH /* 1 */ | ||
62 | }; | 61 | }; |
63 | #endif /* 0 */ | 62 | #endif /* 0 */ |
64 | |||
65 | 63 | ||
66 | /* Read from EEPROM = 0000 0011b */ | 64 | /* Read from EEPROM = 0000 0011b */ |
67 | static u_int32_t readtab[] = | 65 | static u_int32_t readtab[] = { |
68 | { | 66 | /* |
69 | /* | 67 | CS_HIGH | CLK_HIGH, |
70 | CS_HIGH | CLK_HIGH, | 68 | */ |
71 | */ | 69 | CS_LOW | CLK_LOW, |
72 | CS_LOW | CLK_LOW, | 70 | CLK_HIGH, /* 0 */ |
73 | CLK_HIGH, /* 0 */ | 71 | CLK_LOW, |
74 | CLK_LOW, | 72 | CLK_HIGH, /* 0 */ |
75 | CLK_HIGH, /* 0 */ | 73 | CLK_LOW, |
76 | CLK_LOW, | 74 | CLK_HIGH, /* 0 */ |
77 | CLK_HIGH, /* 0 */ | 75 | CLK_LOW, |
78 | CLK_LOW, | 76 | CLK_HIGH, /* 0 */ |
79 | CLK_HIGH, /* 0 */ | 77 | CLK_LOW, |
80 | CLK_LOW, | 78 | CLK_HIGH, /* 0 */ |
81 | CLK_HIGH, /* 0 */ | 79 | CLK_LOW, |
82 | CLK_LOW, | 80 | CLK_HIGH, /* 0 */ |
83 | CLK_HIGH, /* 0 */ | 81 | CLK_LOW | SI_HIGH, |
84 | CLK_LOW | SI_HIGH, | 82 | CLK_HIGH | SI_HIGH, /* 1 */ |
85 | CLK_HIGH | SI_HIGH, /* 1 */ | 83 | CLK_LOW | SI_HIGH, |
86 | CLK_LOW | SI_HIGH, | 84 | CLK_HIGH | SI_HIGH /* 1 */ |
87 | CLK_HIGH | SI_HIGH /* 1 */ | ||
88 | }; | 85 | }; |
89 | 86 | ||
90 | |||
91 | /* Clock to read from/write to the eeprom */ | 87 | /* Clock to read from/write to the eeprom */ |
92 | static u_int32_t clocktab[] = | 88 | static u_int32_t clocktab[] = { |
93 | { | 89 | CLK_LOW, |
94 | CLK_LOW, | 90 | CLK_HIGH, |
95 | CLK_HIGH, | 91 | CLK_LOW, |
96 | CLK_LOW, | 92 | CLK_HIGH, |
97 | CLK_HIGH, | 93 | CLK_LOW, |
98 | CLK_LOW, | 94 | CLK_HIGH, |
99 | CLK_HIGH, | 95 | CLK_LOW, |
100 | CLK_LOW, | 96 | CLK_HIGH, |
101 | CLK_HIGH, | 97 | CLK_LOW, |
102 | CLK_LOW, | 98 | CLK_HIGH, |
103 | CLK_HIGH, | 99 | CLK_LOW, |
104 | CLK_LOW, | 100 | CLK_HIGH, |
105 | CLK_HIGH, | 101 | CLK_LOW, |
106 | CLK_LOW, | 102 | CLK_HIGH, |
107 | CLK_HIGH, | 103 | CLK_LOW, |
108 | CLK_LOW, | 104 | CLK_HIGH, |
109 | CLK_HIGH, | 105 | CLK_LOW |
110 | CLK_LOW | ||
111 | }; | 106 | }; |
112 | 107 | ||
113 | |||
114 | #define NICSTAR_REG_WRITE(bs, reg, val) \ | 108 | #define NICSTAR_REG_WRITE(bs, reg, val) \ |
115 | while ( readl(bs + STAT) & 0x0200 ) ; \ | 109 | while ( readl(bs + STAT) & 0x0200 ) ; \ |
116 | writel((val),(base)+(reg)) | 110 | writel((val),(base)+(reg)) |
@@ -124,153 +118,131 @@ static u_int32_t clocktab[] = | |||
124 | * register. | 118 | * register. |
125 | */ | 119 | */ |
126 | #if 0 | 120 | #if 0 |
127 | u_int32_t | 121 | u_int32_t nicstar_read_eprom_status(virt_addr_t base) |
128 | nicstar_read_eprom_status( virt_addr_t base ) | ||
129 | { | 122 | { |
130 | u_int32_t val; | 123 | u_int32_t val; |
131 | u_int32_t rbyte; | 124 | u_int32_t rbyte; |
132 | int32_t i, j; | 125 | int32_t i, j; |
133 | 126 | ||
134 | /* Send read instruction */ | 127 | /* Send read instruction */ |
135 | val = NICSTAR_REG_READ( base, NICSTAR_REG_GENERAL_PURPOSE ) & 0xFFFFFFF0; | 128 | val = NICSTAR_REG_READ(base, NICSTAR_REG_GENERAL_PURPOSE) & 0xFFFFFFF0; |
136 | 129 | ||
137 | for (i=0; i<ARRAY_SIZE(rdsrtab); i++) | 130 | for (i = 0; i < ARRAY_SIZE(rdsrtab); i++) { |
138 | { | 131 | NICSTAR_REG_WRITE(base, NICSTAR_REG_GENERAL_PURPOSE, |
139 | NICSTAR_REG_WRITE( base, NICSTAR_REG_GENERAL_PURPOSE, | 132 | (val | rdsrtab[i])); |
140 | (val | rdsrtab[i]) ); | 133 | osp_MicroDelay(CYCLE_DELAY); |
141 | osp_MicroDelay( CYCLE_DELAY ); | 134 | } |
142 | } | 135 | |
143 | 136 | /* Done sending instruction - now pull data off of bit 16, MSB first */ | |
144 | /* Done sending instruction - now pull data off of bit 16, MSB first */ | 137 | /* Data clocked out of eeprom on falling edge of clock */ |
145 | /* Data clocked out of eeprom on falling edge of clock */ | 138 | |
146 | 139 | rbyte = 0; | |
147 | rbyte = 0; | 140 | for (i = 7, j = 0; i >= 0; i--) { |
148 | for (i=7, j=0; i>=0; i--) | 141 | NICSTAR_REG_WRITE(base, NICSTAR_REG_GENERAL_PURPOSE, |
149 | { | 142 | (val | clocktab[j++])); |
150 | NICSTAR_REG_WRITE( base, NICSTAR_REG_GENERAL_PURPOSE, | 143 | rbyte |= (((NICSTAR_REG_READ(base, NICSTAR_REG_GENERAL_PURPOSE) |
151 | (val | clocktab[j++]) ); | 144 | & 0x00010000) >> 16) << i); |
152 | rbyte |= (((NICSTAR_REG_READ( base, NICSTAR_REG_GENERAL_PURPOSE) | 145 | NICSTAR_REG_WRITE(base, NICSTAR_REG_GENERAL_PURPOSE, |
153 | & 0x00010000) >> 16) << i); | 146 | (val | clocktab[j++])); |
154 | NICSTAR_REG_WRITE( base, NICSTAR_REG_GENERAL_PURPOSE, | 147 | osp_MicroDelay(CYCLE_DELAY); |
155 | (val | clocktab[j++]) ); | 148 | } |
156 | osp_MicroDelay( CYCLE_DELAY ); | 149 | NICSTAR_REG_WRITE(base, NICSTAR_REG_GENERAL_PURPOSE, 2); |
157 | } | 150 | osp_MicroDelay(CYCLE_DELAY); |
158 | NICSTAR_REG_WRITE( base, NICSTAR_REG_GENERAL_PURPOSE, 2 ); | 151 | return rbyte; |
159 | osp_MicroDelay( CYCLE_DELAY ); | ||
160 | return rbyte; | ||
161 | } | 152 | } |
162 | #endif /* 0 */ | 153 | #endif /* 0 */ |
163 | |||
164 | 154 | ||
165 | /* | 155 | /* |
166 | * This routine will clock the Read_data function into the X2520 | 156 | * This routine will clock the Read_data function into the X2520 |
167 | * eeprom, followed by the address to read from, through the NicSTaR's General | 157 | * eeprom, followed by the address to read from, through the NicSTaR's General |
168 | * Purpose register. | 158 | * Purpose register. |
169 | */ | 159 | */ |
170 | 160 | ||
171 | static u_int8_t | 161 | static u_int8_t read_eprom_byte(virt_addr_t base, u_int8_t offset) |
172 | read_eprom_byte(virt_addr_t base, u_int8_t offset) | ||
173 | { | 162 | { |
174 | u_int32_t val = 0; | 163 | u_int32_t val = 0; |
175 | int i,j=0; | 164 | int i, j = 0; |
176 | u_int8_t tempread = 0; | 165 | u_int8_t tempread = 0; |
177 | 166 | ||
178 | val = NICSTAR_REG_READ( base, NICSTAR_REG_GENERAL_PURPOSE ) & 0xFFFFFFF0; | 167 | val = NICSTAR_REG_READ(base, NICSTAR_REG_GENERAL_PURPOSE) & 0xFFFFFFF0; |
179 | 168 | ||
180 | /* Send READ instruction */ | 169 | /* Send READ instruction */ |
181 | for (i=0; i<ARRAY_SIZE(readtab); i++) | 170 | for (i = 0; i < ARRAY_SIZE(readtab); i++) { |
182 | { | 171 | NICSTAR_REG_WRITE(base, NICSTAR_REG_GENERAL_PURPOSE, |
183 | NICSTAR_REG_WRITE( base, NICSTAR_REG_GENERAL_PURPOSE, | 172 | (val | readtab[i])); |
184 | (val | readtab[i]) ); | 173 | osp_MicroDelay(CYCLE_DELAY); |
185 | osp_MicroDelay( CYCLE_DELAY ); | 174 | } |
186 | } | 175 | |
187 | 176 | /* Next, we need to send the byte address to read from */ | |
188 | /* Next, we need to send the byte address to read from */ | 177 | for (i = 7; i >= 0; i--) { |
189 | for (i=7; i>=0; i--) | 178 | NICSTAR_REG_WRITE(base, NICSTAR_REG_GENERAL_PURPOSE, |
190 | { | 179 | (val | clocktab[j++] | ((offset >> i) & 1))); |
191 | NICSTAR_REG_WRITE( base, NICSTAR_REG_GENERAL_PURPOSE, | 180 | osp_MicroDelay(CYCLE_DELAY); |
192 | (val | clocktab[j++] | ((offset >> i) & 1) ) ); | 181 | NICSTAR_REG_WRITE(base, NICSTAR_REG_GENERAL_PURPOSE, |
193 | osp_MicroDelay(CYCLE_DELAY); | 182 | (val | clocktab[j++] | ((offset >> i) & 1))); |
194 | NICSTAR_REG_WRITE( base, NICSTAR_REG_GENERAL_PURPOSE, | 183 | osp_MicroDelay(CYCLE_DELAY); |
195 | (val | clocktab[j++] | ((offset >> i) & 1) ) ); | 184 | } |
196 | osp_MicroDelay( CYCLE_DELAY ); | 185 | |
197 | } | 186 | j = 0; |
198 | 187 | ||
199 | j = 0; | 188 | /* Now, we can read data from the eeprom by clocking it in */ |
200 | 189 | for (i = 7; i >= 0; i--) { | |
201 | /* Now, we can read data from the eeprom by clocking it in */ | 190 | NICSTAR_REG_WRITE(base, NICSTAR_REG_GENERAL_PURPOSE, |
202 | for (i=7; i>=0; i--) | 191 | (val | clocktab[j++])); |
203 | { | 192 | osp_MicroDelay(CYCLE_DELAY); |
204 | NICSTAR_REG_WRITE( base, NICSTAR_REG_GENERAL_PURPOSE, | 193 | tempread |= |
205 | (val | clocktab[j++]) ); | 194 | (((NICSTAR_REG_READ(base, NICSTAR_REG_GENERAL_PURPOSE) |
206 | osp_MicroDelay( CYCLE_DELAY ); | 195 | & 0x00010000) >> 16) << i); |
207 | tempread |= (((NICSTAR_REG_READ( base, NICSTAR_REG_GENERAL_PURPOSE ) | 196 | NICSTAR_REG_WRITE(base, NICSTAR_REG_GENERAL_PURPOSE, |
208 | & 0x00010000) >> 16) << i); | 197 | (val | clocktab[j++])); |
209 | NICSTAR_REG_WRITE( base, NICSTAR_REG_GENERAL_PURPOSE, | 198 | osp_MicroDelay(CYCLE_DELAY); |
210 | (val | clocktab[j++]) ); | 199 | } |
211 | osp_MicroDelay( CYCLE_DELAY ); | 200 | |
212 | } | 201 | NICSTAR_REG_WRITE(base, NICSTAR_REG_GENERAL_PURPOSE, 2); |
213 | 202 | osp_MicroDelay(CYCLE_DELAY); | |
214 | NICSTAR_REG_WRITE( base, NICSTAR_REG_GENERAL_PURPOSE, 2 ); | 203 | return tempread; |
215 | osp_MicroDelay( CYCLE_DELAY ); | ||
216 | return tempread; | ||
217 | } | 204 | } |
218 | 205 | ||
219 | 206 | static void nicstar_init_eprom(virt_addr_t base) | |
220 | static void | ||
221 | nicstar_init_eprom( virt_addr_t base ) | ||
222 | { | 207 | { |
223 | u_int32_t val; | 208 | u_int32_t val; |
224 | 209 | ||
225 | /* | 210 | /* |
226 | * turn chip select off | 211 | * turn chip select off |
227 | */ | 212 | */ |
228 | val = NICSTAR_REG_READ(base, NICSTAR_REG_GENERAL_PURPOSE) & 0xFFFFFFF0; | 213 | val = NICSTAR_REG_READ(base, NICSTAR_REG_GENERAL_PURPOSE) & 0xFFFFFFF0; |
229 | 214 | ||
230 | NICSTAR_REG_WRITE(base, NICSTAR_REG_GENERAL_PURPOSE, | 215 | NICSTAR_REG_WRITE(base, NICSTAR_REG_GENERAL_PURPOSE, |
231 | (val | CS_HIGH | CLK_HIGH)); | 216 | (val | CS_HIGH | CLK_HIGH)); |
232 | osp_MicroDelay( CYCLE_DELAY ); | 217 | osp_MicroDelay(CYCLE_DELAY); |
233 | 218 | ||
234 | NICSTAR_REG_WRITE(base, NICSTAR_REG_GENERAL_PURPOSE, | 219 | NICSTAR_REG_WRITE(base, NICSTAR_REG_GENERAL_PURPOSE, |
235 | (val | CS_HIGH | CLK_LOW)); | 220 | (val | CS_HIGH | CLK_LOW)); |
236 | osp_MicroDelay( CYCLE_DELAY ); | 221 | osp_MicroDelay(CYCLE_DELAY); |
237 | 222 | ||
238 | NICSTAR_REG_WRITE(base, NICSTAR_REG_GENERAL_PURPOSE, | 223 | NICSTAR_REG_WRITE(base, NICSTAR_REG_GENERAL_PURPOSE, |
239 | (val | CS_HIGH | CLK_HIGH)); | 224 | (val | CS_HIGH | CLK_HIGH)); |
240 | osp_MicroDelay( CYCLE_DELAY ); | 225 | osp_MicroDelay(CYCLE_DELAY); |
241 | 226 | ||
242 | NICSTAR_REG_WRITE(base, NICSTAR_REG_GENERAL_PURPOSE, | 227 | NICSTAR_REG_WRITE(base, NICSTAR_REG_GENERAL_PURPOSE, |
243 | (val | CS_HIGH | CLK_LOW)); | 228 | (val | CS_HIGH | CLK_LOW)); |
244 | osp_MicroDelay( CYCLE_DELAY ); | 229 | osp_MicroDelay(CYCLE_DELAY); |
245 | } | 230 | } |
246 | 231 | ||
247 | |||
248 | /* | 232 | /* |
249 | * This routine will be the interface to the ReadPromByte function | 233 | * This routine will be the interface to the ReadPromByte function |
250 | * above. | 234 | * above. |
251 | */ | 235 | */ |
252 | 236 | ||
253 | static void | 237 | static void |
254 | nicstar_read_eprom( | 238 | nicstar_read_eprom(virt_addr_t base, |
255 | virt_addr_t base, | 239 | u_int8_t prom_offset, u_int8_t * buffer, u_int32_t nbytes) |
256 | u_int8_t prom_offset, | ||
257 | u_int8_t *buffer, | ||
258 | u_int32_t nbytes ) | ||
259 | { | 240 | { |
260 | u_int i; | 241 | u_int i; |
261 | |||
262 | for (i=0; i<nbytes; i++) | ||
263 | { | ||
264 | buffer[i] = read_eprom_byte( base, prom_offset ); | ||
265 | ++prom_offset; | ||
266 | osp_MicroDelay( CYCLE_DELAY ); | ||
267 | } | ||
268 | } | ||
269 | |||
270 | 242 | ||
271 | /* | 243 | for (i = 0; i < nbytes; i++) { |
272 | void osp_MicroDelay(int x) { | 244 | buffer[i] = read_eprom_byte(base, prom_offset); |
273 | 245 | ++prom_offset; | |
246 | osp_MicroDelay(CYCLE_DELAY); | ||
247 | } | ||
274 | } | 248 | } |
275 | */ | ||
276 | |||
diff --git a/drivers/isdn/i4l/isdn_ppp.c b/drivers/isdn/i4l/isdn_ppp.c index f37b8f68d0aa..8c46baee621b 100644 --- a/drivers/isdn/i4l/isdn_ppp.c +++ b/drivers/isdn/i4l/isdn_ppp.c | |||
@@ -449,14 +449,9 @@ static int get_filter(void __user *arg, struct sock_filter **p) | |||
449 | 449 | ||
450 | /* uprog.len is unsigned short, so no overflow here */ | 450 | /* uprog.len is unsigned short, so no overflow here */ |
451 | len = uprog.len * sizeof(struct sock_filter); | 451 | len = uprog.len * sizeof(struct sock_filter); |
452 | code = kmalloc(len, GFP_KERNEL); | 452 | code = memdup_user(uprog.filter, len); |
453 | if (code == NULL) | 453 | if (IS_ERR(code)) |
454 | return -ENOMEM; | 454 | return PTR_ERR(code); |
455 | |||
456 | if (copy_from_user(code, uprog.filter, len)) { | ||
457 | kfree(code); | ||
458 | return -EFAULT; | ||
459 | } | ||
460 | 455 | ||
461 | err = sk_chk_filter(code, uprog.len); | 456 | err = sk_chk_filter(code, uprog.len); |
462 | if (err) { | 457 | if (err) { |
diff --git a/drivers/isdn/pcbit/drv.c b/drivers/isdn/pcbit/drv.c index 123c1d6c43b4..1507d2e83fbb 100644 --- a/drivers/isdn/pcbit/drv.c +++ b/drivers/isdn/pcbit/drv.c | |||
@@ -411,14 +411,10 @@ static int pcbit_writecmd(const u_char __user *buf, int len, int driver, int cha | |||
411 | return -EINVAL; | 411 | return -EINVAL; |
412 | } | 412 | } |
413 | 413 | ||
414 | cbuf = kmalloc(len, GFP_KERNEL); | 414 | cbuf = memdup_user(buf, len); |
415 | if (!cbuf) | 415 | if (IS_ERR(cbuf)) |
416 | return -ENOMEM; | 416 | return PTR_ERR(cbuf); |
417 | 417 | ||
418 | if (copy_from_user(cbuf, buf, len)) { | ||
419 | kfree(cbuf); | ||
420 | return -EFAULT; | ||
421 | } | ||
422 | memcpy_toio(dev->sh_mem, cbuf, len); | 418 | memcpy_toio(dev->sh_mem, cbuf, len); |
423 | kfree(cbuf); | 419 | kfree(cbuf); |
424 | return len; | 420 | return len; |
diff --git a/drivers/isdn/sc/ioctl.c b/drivers/isdn/sc/ioctl.c index 1081091bbfaf..43c5dc3516e5 100644 --- a/drivers/isdn/sc/ioctl.c +++ b/drivers/isdn/sc/ioctl.c | |||
@@ -215,19 +215,13 @@ int sc_ioctl(int card, scs_ioctl *data) | |||
215 | pr_debug("%s: DCBIOSETSPID: ioctl received\n", | 215 | pr_debug("%s: DCBIOSETSPID: ioctl received\n", |
216 | sc_adapter[card]->devicename); | 216 | sc_adapter[card]->devicename); |
217 | 217 | ||
218 | spid = kmalloc(SCIOC_SPIDSIZE, GFP_KERNEL); | ||
219 | if(!spid) { | ||
220 | kfree(rcvmsg); | ||
221 | return -ENOMEM; | ||
222 | } | ||
223 | |||
224 | /* | 218 | /* |
225 | * Get the spid from user space | 219 | * Get the spid from user space |
226 | */ | 220 | */ |
227 | if (copy_from_user(spid, data->dataptr, SCIOC_SPIDSIZE)) { | 221 | spid = memdup_user(data->dataptr, SCIOC_SPIDSIZE); |
222 | if (IS_ERR(spid)) { | ||
228 | kfree(rcvmsg); | 223 | kfree(rcvmsg); |
229 | kfree(spid); | 224 | return PTR_ERR(spid); |
230 | return -EFAULT; | ||
231 | } | 225 | } |
232 | 226 | ||
233 | pr_debug("%s: SCIOCSETSPID: setting channel %d spid to %s\n", | 227 | pr_debug("%s: SCIOCSETSPID: setting channel %d spid to %s\n", |
@@ -296,18 +290,13 @@ int sc_ioctl(int card, scs_ioctl *data) | |||
296 | pr_debug("%s: SCIOSETDN: ioctl received\n", | 290 | pr_debug("%s: SCIOSETDN: ioctl received\n", |
297 | sc_adapter[card]->devicename); | 291 | sc_adapter[card]->devicename); |
298 | 292 | ||
299 | dn = kmalloc(SCIOC_DNSIZE, GFP_KERNEL); | ||
300 | if (!dn) { | ||
301 | kfree(rcvmsg); | ||
302 | return -ENOMEM; | ||
303 | } | ||
304 | /* | 293 | /* |
305 | * Get the spid from user space | 294 | * Get the spid from user space |
306 | */ | 295 | */ |
307 | if (copy_from_user(dn, data->dataptr, SCIOC_DNSIZE)) { | 296 | dn = memdup_user(data->dataptr, SCIOC_DNSIZE); |
297 | if (IS_ERR(dn)) { | ||
308 | kfree(rcvmsg); | 298 | kfree(rcvmsg); |
309 | kfree(dn); | 299 | return PTR_ERR(dn); |
310 | return -EFAULT; | ||
311 | } | 300 | } |
312 | 301 | ||
313 | pr_debug("%s: SCIOCSETDN: setting channel %d dn to %s\n", | 302 | pr_debug("%s: SCIOCSETDN: setting channel %d dn to %s\n", |
diff --git a/drivers/net/3c527.h b/drivers/net/3c527.h index 75e28fef797b..d693b8d15cde 100644 --- a/drivers/net/3c527.h +++ b/drivers/net/3c527.h | |||
@@ -34,7 +34,7 @@ struct mc32_mailbox | |||
34 | { | 34 | { |
35 | u16 mbox; | 35 | u16 mbox; |
36 | u16 data[1]; | 36 | u16 data[1]; |
37 | } __attribute((packed)); | 37 | } __packed; |
38 | 38 | ||
39 | struct skb_header | 39 | struct skb_header |
40 | { | 40 | { |
@@ -43,7 +43,7 @@ struct skb_header | |||
43 | u16 next; /* Do not change! */ | 43 | u16 next; /* Do not change! */ |
44 | u16 length; | 44 | u16 length; |
45 | u32 data; | 45 | u32 data; |
46 | } __attribute((packed)); | 46 | } __packed; |
47 | 47 | ||
48 | struct mc32_stats | 48 | struct mc32_stats |
49 | { | 49 | { |
@@ -68,7 +68,7 @@ struct mc32_stats | |||
68 | u32 dataA[6]; | 68 | u32 dataA[6]; |
69 | u16 dataB[5]; | 69 | u16 dataB[5]; |
70 | u32 dataC[14]; | 70 | u32 dataC[14]; |
71 | } __attribute((packed)); | 71 | } __packed; |
72 | 72 | ||
73 | #define STATUS_MASK 0x0F | 73 | #define STATUS_MASK 0x0F |
74 | #define COMPLETED (1<<7) | 74 | #define COMPLETED (1<<7) |
diff --git a/drivers/net/8139cp.c b/drivers/net/8139cp.c index 9c149750e2bf..e949ba80127d 100644 --- a/drivers/net/8139cp.c +++ b/drivers/net/8139cp.c | |||
@@ -322,7 +322,7 @@ struct cp_dma_stats { | |||
322 | __le32 rx_ok_mcast; | 322 | __le32 rx_ok_mcast; |
323 | __le16 tx_abort; | 323 | __le16 tx_abort; |
324 | __le16 tx_underrun; | 324 | __le16 tx_underrun; |
325 | } __attribute__((packed)); | 325 | } __packed; |
326 | 326 | ||
327 | struct cp_extra_stats { | 327 | struct cp_extra_stats { |
328 | unsigned long rx_frags; | 328 | unsigned long rx_frags; |
diff --git a/drivers/net/8139too.c b/drivers/net/8139too.c index 4ba72933f0da..cc7d46238801 100644 --- a/drivers/net/8139too.c +++ b/drivers/net/8139too.c | |||
@@ -662,7 +662,7 @@ static const struct ethtool_ops rtl8139_ethtool_ops; | |||
662 | /* read MMIO register */ | 662 | /* read MMIO register */ |
663 | #define RTL_R8(reg) ioread8 (ioaddr + (reg)) | 663 | #define RTL_R8(reg) ioread8 (ioaddr + (reg)) |
664 | #define RTL_R16(reg) ioread16 (ioaddr + (reg)) | 664 | #define RTL_R16(reg) ioread16 (ioaddr + (reg)) |
665 | #define RTL_R32(reg) ((unsigned long) ioread32 (ioaddr + (reg))) | 665 | #define RTL_R32(reg) ioread32 (ioaddr + (reg)) |
666 | 666 | ||
667 | 667 | ||
668 | static const u16 rtl8139_intr_mask = | 668 | static const u16 rtl8139_intr_mask = |
@@ -861,7 +861,7 @@ retry: | |||
861 | 861 | ||
862 | /* if unknown chip, assume array element #0, original RTL-8139 in this case */ | 862 | /* if unknown chip, assume array element #0, original RTL-8139 in this case */ |
863 | dev_dbg(&pdev->dev, "unknown chip version, assuming RTL-8139\n"); | 863 | dev_dbg(&pdev->dev, "unknown chip version, assuming RTL-8139\n"); |
864 | dev_dbg(&pdev->dev, "TxConfig = 0x%lx\n", RTL_R32 (TxConfig)); | 864 | dev_dbg(&pdev->dev, "TxConfig = 0x%x\n", RTL_R32 (TxConfig)); |
865 | tp->chipset = 0; | 865 | tp->chipset = 0; |
866 | 866 | ||
867 | match: | 867 | match: |
@@ -1642,7 +1642,7 @@ static void rtl8139_tx_timeout_task (struct work_struct *work) | |||
1642 | netdev_dbg(dev, "Tx queue start entry %ld dirty entry %ld\n", | 1642 | netdev_dbg(dev, "Tx queue start entry %ld dirty entry %ld\n", |
1643 | tp->cur_tx, tp->dirty_tx); | 1643 | tp->cur_tx, tp->dirty_tx); |
1644 | for (i = 0; i < NUM_TX_DESC; i++) | 1644 | for (i = 0; i < NUM_TX_DESC; i++) |
1645 | netdev_dbg(dev, "Tx descriptor %d is %08lx%s\n", | 1645 | netdev_dbg(dev, "Tx descriptor %d is %08x%s\n", |
1646 | i, RTL_R32(TxStatus0 + (i * 4)), | 1646 | i, RTL_R32(TxStatus0 + (i * 4)), |
1647 | i == tp->dirty_tx % NUM_TX_DESC ? | 1647 | i == tp->dirty_tx % NUM_TX_DESC ? |
1648 | " (queue head)" : ""); | 1648 | " (queue head)" : ""); |
@@ -2486,7 +2486,7 @@ static void __set_rx_mode (struct net_device *dev) | |||
2486 | int rx_mode; | 2486 | int rx_mode; |
2487 | u32 tmp; | 2487 | u32 tmp; |
2488 | 2488 | ||
2489 | netdev_dbg(dev, "rtl8139_set_rx_mode(%04x) done -- Rx config %08lx\n", | 2489 | netdev_dbg(dev, "rtl8139_set_rx_mode(%04x) done -- Rx config %08x\n", |
2490 | dev->flags, RTL_R32(RxConfig)); | 2490 | dev->flags, RTL_R32(RxConfig)); |
2491 | 2491 | ||
2492 | /* Note: do not reorder, GCC is clever about common statements. */ | 2492 | /* Note: do not reorder, GCC is clever about common statements. */ |
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index 2decc597bda7..fe113d0e9456 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig | |||
@@ -1659,6 +1659,7 @@ config R6040 | |||
1659 | depends on NET_PCI && PCI | 1659 | depends on NET_PCI && PCI |
1660 | select CRC32 | 1660 | select CRC32 |
1661 | select MII | 1661 | select MII |
1662 | select PHYLIB | ||
1662 | help | 1663 | help |
1663 | This is a driver for the R6040 Fast Ethernet MACs found in the | 1664 | This is a driver for the R6040 Fast Ethernet MACs found in the |
1664 | the RDC R-321x System-on-chips. | 1665 | the RDC R-321x System-on-chips. |
diff --git a/drivers/net/arcnet/capmode.c b/drivers/net/arcnet/capmode.c index 355797f70048..42fce91b71fc 100644 --- a/drivers/net/arcnet/capmode.c +++ b/drivers/net/arcnet/capmode.c | |||
@@ -37,69 +37,6 @@ | |||
37 | 37 | ||
38 | #define VERSION "arcnet: cap mode (`c') encapsulation support loaded.\n" | 38 | #define VERSION "arcnet: cap mode (`c') encapsulation support loaded.\n" |
39 | 39 | ||
40 | |||
41 | static void rx(struct net_device *dev, int bufnum, | ||
42 | struct archdr *pkthdr, int length); | ||
43 | static int build_header(struct sk_buff *skb, | ||
44 | struct net_device *dev, | ||
45 | unsigned short type, | ||
46 | uint8_t daddr); | ||
47 | static int prepare_tx(struct net_device *dev, struct archdr *pkt, int length, | ||
48 | int bufnum); | ||
49 | static int ack_tx(struct net_device *dev, int acked); | ||
50 | |||
51 | |||
52 | static struct ArcProto capmode_proto = | ||
53 | { | ||
54 | 'r', | ||
55 | XMTU, | ||
56 | 0, | ||
57 | rx, | ||
58 | build_header, | ||
59 | prepare_tx, | ||
60 | NULL, | ||
61 | ack_tx | ||
62 | }; | ||
63 | |||
64 | |||
65 | static void arcnet_cap_init(void) | ||
66 | { | ||
67 | int count; | ||
68 | |||
69 | for (count = 1; count <= 8; count++) | ||
70 | if (arc_proto_map[count] == arc_proto_default) | ||
71 | arc_proto_map[count] = &capmode_proto; | ||
72 | |||
73 | /* for cap mode, we only set the bcast proto if there's no better one */ | ||
74 | if (arc_bcast_proto == arc_proto_default) | ||
75 | arc_bcast_proto = &capmode_proto; | ||
76 | |||
77 | arc_proto_default = &capmode_proto; | ||
78 | arc_raw_proto = &capmode_proto; | ||
79 | } | ||
80 | |||
81 | |||
82 | #ifdef MODULE | ||
83 | |||
84 | static int __init capmode_module_init(void) | ||
85 | { | ||
86 | printk(VERSION); | ||
87 | arcnet_cap_init(); | ||
88 | return 0; | ||
89 | } | ||
90 | |||
91 | static void __exit capmode_module_exit(void) | ||
92 | { | ||
93 | arcnet_unregister_proto(&capmode_proto); | ||
94 | } | ||
95 | module_init(capmode_module_init); | ||
96 | module_exit(capmode_module_exit); | ||
97 | |||
98 | MODULE_LICENSE("GPL"); | ||
99 | #endif /* MODULE */ | ||
100 | |||
101 | |||
102 | |||
103 | /* packet receiver */ | 40 | /* packet receiver */ |
104 | static void rx(struct net_device *dev, int bufnum, | 41 | static void rx(struct net_device *dev, int bufnum, |
105 | struct archdr *pkthdr, int length) | 42 | struct archdr *pkthdr, int length) |
@@ -231,65 +168,107 @@ static int prepare_tx(struct net_device *dev, struct archdr *pkt, int length, | |||
231 | BUGMSG(D_DURING, "prepare_tx: length=%d ofs=%d\n", | 168 | BUGMSG(D_DURING, "prepare_tx: length=%d ofs=%d\n", |
232 | length,ofs); | 169 | length,ofs); |
233 | 170 | ||
234 | // Copy the arcnet-header + the protocol byte down: | 171 | /* Copy the arcnet-header + the protocol byte down: */ |
235 | lp->hw.copy_to_card(dev, bufnum, 0, hard, ARC_HDR_SIZE); | 172 | lp->hw.copy_to_card(dev, bufnum, 0, hard, ARC_HDR_SIZE); |
236 | lp->hw.copy_to_card(dev, bufnum, ofs, &pkt->soft.cap.proto, | 173 | lp->hw.copy_to_card(dev, bufnum, ofs, &pkt->soft.cap.proto, |
237 | sizeof(pkt->soft.cap.proto)); | 174 | sizeof(pkt->soft.cap.proto)); |
238 | 175 | ||
239 | // Skip the extra integer we have written into it as a cookie | 176 | /* Skip the extra integer we have written into it as a cookie |
240 | // but write the rest of the message: | 177 | but write the rest of the message: */ |
241 | lp->hw.copy_to_card(dev, bufnum, ofs+1, | 178 | lp->hw.copy_to_card(dev, bufnum, ofs+1, |
242 | ((unsigned char*)&pkt->soft.cap.mes),length-1); | 179 | ((unsigned char*)&pkt->soft.cap.mes),length-1); |
243 | 180 | ||
244 | lp->lastload_dest = hard->dest; | 181 | lp->lastload_dest = hard->dest; |
245 | 182 | ||
246 | return 1; /* done */ | 183 | return 1; /* done */ |
247 | } | 184 | } |
248 | 185 | ||
249 | |||
250 | static int ack_tx(struct net_device *dev, int acked) | 186 | static int ack_tx(struct net_device *dev, int acked) |
251 | { | 187 | { |
252 | struct arcnet_local *lp = netdev_priv(dev); | 188 | struct arcnet_local *lp = netdev_priv(dev); |
253 | struct sk_buff *ackskb; | 189 | struct sk_buff *ackskb; |
254 | struct archdr *ackpkt; | 190 | struct archdr *ackpkt; |
255 | int length=sizeof(struct arc_cap); | 191 | int length=sizeof(struct arc_cap); |
256 | 192 | ||
257 | BUGMSG(D_DURING, "capmode: ack_tx: protocol: %x: result: %d\n", | 193 | BUGMSG(D_DURING, "capmode: ack_tx: protocol: %x: result: %d\n", |
258 | lp->outgoing.skb->protocol, acked); | 194 | lp->outgoing.skb->protocol, acked); |
259 | 195 | ||
260 | BUGLVL(D_SKB) arcnet_dump_skb(dev, lp->outgoing.skb, "ack_tx"); | 196 | BUGLVL(D_SKB) arcnet_dump_skb(dev, lp->outgoing.skb, "ack_tx"); |
261 | 197 | ||
262 | /* Now alloc a skb to send back up through the layers: */ | 198 | /* Now alloc a skb to send back up through the layers: */ |
263 | ackskb = alloc_skb(length + ARC_HDR_SIZE , GFP_ATOMIC); | 199 | ackskb = alloc_skb(length + ARC_HDR_SIZE , GFP_ATOMIC); |
264 | if (ackskb == NULL) { | 200 | if (ackskb == NULL) { |
265 | BUGMSG(D_NORMAL, "Memory squeeze, can't acknowledge.\n"); | 201 | BUGMSG(D_NORMAL, "Memory squeeze, can't acknowledge.\n"); |
266 | goto free_outskb; | 202 | goto free_outskb; |
267 | } | 203 | } |
204 | |||
205 | skb_put(ackskb, length + ARC_HDR_SIZE ); | ||
206 | ackskb->dev = dev; | ||
207 | |||
208 | skb_reset_mac_header(ackskb); | ||
209 | ackpkt = (struct archdr *)skb_mac_header(ackskb); | ||
210 | /* skb_pull(ackskb, ARC_HDR_SIZE); */ | ||
268 | 211 | ||
269 | skb_put(ackskb, length + ARC_HDR_SIZE ); | 212 | skb_copy_from_linear_data(lp->outgoing.skb, ackpkt, |
270 | ackskb->dev = dev; | 213 | ARC_HDR_SIZE + sizeof(struct arc_cap)); |
214 | ackpkt->soft.cap.proto = 0; /* using protocol 0 for acknowledge */ | ||
215 | ackpkt->soft.cap.mes.ack=acked; | ||
271 | 216 | ||
272 | skb_reset_mac_header(ackskb); | 217 | BUGMSG(D_PROTO, "Ackknowledge for cap packet %x.\n", |
273 | ackpkt = (struct archdr *)skb_mac_header(ackskb); | 218 | *((int*)&ackpkt->soft.cap.cookie[0])); |
274 | /* skb_pull(ackskb, ARC_HDR_SIZE); */ | ||
275 | 219 | ||
220 | ackskb->protocol = cpu_to_be16(ETH_P_ARCNET); | ||
276 | 221 | ||
277 | skb_copy_from_linear_data(lp->outgoing.skb, ackpkt, | 222 | BUGLVL(D_SKB) arcnet_dump_skb(dev, ackskb, "ack_tx_recv"); |
278 | ARC_HDR_SIZE + sizeof(struct arc_cap)); | 223 | netif_rx(ackskb); |
279 | ackpkt->soft.cap.proto=0; /* using protocol 0 for acknowledge */ | ||
280 | ackpkt->soft.cap.mes.ack=acked; | ||
281 | 224 | ||
282 | BUGMSG(D_PROTO, "Ackknowledge for cap packet %x.\n", | 225 | free_outskb: |
283 | *((int*)&ackpkt->soft.cap.cookie[0])); | 226 | dev_kfree_skb_irq(lp->outgoing.skb); |
227 | lp->outgoing.proto = NULL; /* We are always finished when in this protocol */ | ||
284 | 228 | ||
285 | ackskb->protocol = cpu_to_be16(ETH_P_ARCNET); | 229 | return 0; |
230 | } | ||
286 | 231 | ||
287 | BUGLVL(D_SKB) arcnet_dump_skb(dev, ackskb, "ack_tx_recv"); | 232 | static struct ArcProto capmode_proto = |
288 | netif_rx(ackskb); | 233 | { |
234 | 'r', | ||
235 | XMTU, | ||
236 | 0, | ||
237 | rx, | ||
238 | build_header, | ||
239 | prepare_tx, | ||
240 | NULL, | ||
241 | ack_tx | ||
242 | }; | ||
289 | 243 | ||
290 | free_outskb: | 244 | static void arcnet_cap_init(void) |
291 | dev_kfree_skb_irq(lp->outgoing.skb); | 245 | { |
292 | lp->outgoing.proto = NULL; /* We are always finished when in this protocol */ | 246 | int count; |
293 | 247 | ||
294 | return 0; | 248 | for (count = 1; count <= 8; count++) |
249 | if (arc_proto_map[count] == arc_proto_default) | ||
250 | arc_proto_map[count] = &capmode_proto; | ||
251 | |||
252 | /* for cap mode, we only set the bcast proto if there's no better one */ | ||
253 | if (arc_bcast_proto == arc_proto_default) | ||
254 | arc_bcast_proto = &capmode_proto; | ||
255 | |||
256 | arc_proto_default = &capmode_proto; | ||
257 | arc_raw_proto = &capmode_proto; | ||
295 | } | 258 | } |
259 | |||
260 | static int __init capmode_module_init(void) | ||
261 | { | ||
262 | printk(VERSION); | ||
263 | arcnet_cap_init(); | ||
264 | return 0; | ||
265 | } | ||
266 | |||
267 | static void __exit capmode_module_exit(void) | ||
268 | { | ||
269 | arcnet_unregister_proto(&capmode_proto); | ||
270 | } | ||
271 | module_init(capmode_module_init); | ||
272 | module_exit(capmode_module_exit); | ||
273 | |||
274 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/net/arm/ixp4xx_eth.c b/drivers/net/arm/ixp4xx_eth.c index 24df0325090c..ee2f8425dbe7 100644 --- a/drivers/net/arm/ixp4xx_eth.c +++ b/drivers/net/arm/ixp4xx_eth.c | |||
@@ -738,6 +738,17 @@ static void eth_set_mcast_list(struct net_device *dev) | |||
738 | struct netdev_hw_addr *ha; | 738 | struct netdev_hw_addr *ha; |
739 | u8 diffs[ETH_ALEN], *addr; | 739 | u8 diffs[ETH_ALEN], *addr; |
740 | int i; | 740 | int i; |
741 | static const u8 allmulti[] = { 0x01, 0x00, 0x00, 0x00, 0x00, 0x00 }; | ||
742 | |||
743 | if (dev->flags & IFF_ALLMULTI) { | ||
744 | for (i = 0; i < ETH_ALEN; i++) { | ||
745 | __raw_writel(allmulti[i], &port->regs->mcast_addr[i]); | ||
746 | __raw_writel(allmulti[i], &port->regs->mcast_mask[i]); | ||
747 | } | ||
748 | __raw_writel(DEFAULT_RX_CNTRL0 | RX_CNTRL0_ADDR_FLTR_EN, | ||
749 | &port->regs->rx_control[0]); | ||
750 | return; | ||
751 | } | ||
741 | 752 | ||
742 | if ((dev->flags & IFF_PROMISC) || netdev_mc_empty(dev)) { | 753 | if ((dev->flags & IFF_PROMISC) || netdev_mc_empty(dev)) { |
743 | __raw_writel(DEFAULT_RX_CNTRL0 & ~RX_CNTRL0_ADDR_FLTR_EN, | 754 | __raw_writel(DEFAULT_RX_CNTRL0 & ~RX_CNTRL0_ADDR_FLTR_EN, |
diff --git a/drivers/net/atl1c/atl1c.h b/drivers/net/atl1c/atl1c.h index 84ae905bf732..52abbbdf8a08 100644 --- a/drivers/net/atl1c/atl1c.h +++ b/drivers/net/atl1c/atl1c.h | |||
@@ -73,7 +73,8 @@ | |||
73 | #define FULL_DUPLEX 2 | 73 | #define FULL_DUPLEX 2 |
74 | 74 | ||
75 | #define AT_RX_BUF_SIZE (ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN) | 75 | #define AT_RX_BUF_SIZE (ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN) |
76 | #define MAX_JUMBO_FRAME_SIZE (9*1024) | 76 | #define MAX_JUMBO_FRAME_SIZE (6*1024) |
77 | #define MAX_TSO_FRAME_SIZE (7*1024) | ||
77 | #define MAX_TX_OFFLOAD_THRESH (9*1024) | 78 | #define MAX_TX_OFFLOAD_THRESH (9*1024) |
78 | 79 | ||
79 | #define AT_MAX_RECEIVE_QUEUE 4 | 80 | #define AT_MAX_RECEIVE_QUEUE 4 |
@@ -87,10 +88,11 @@ | |||
87 | #define AT_MAX_INT_WORK 5 | 88 | #define AT_MAX_INT_WORK 5 |
88 | #define AT_TWSI_EEPROM_TIMEOUT 100 | 89 | #define AT_TWSI_EEPROM_TIMEOUT 100 |
89 | #define AT_HW_MAX_IDLE_DELAY 10 | 90 | #define AT_HW_MAX_IDLE_DELAY 10 |
90 | #define AT_SUSPEND_LINK_TIMEOUT 28 | 91 | #define AT_SUSPEND_LINK_TIMEOUT 100 |
91 | 92 | ||
92 | #define AT_ASPM_L0S_TIMER 6 | 93 | #define AT_ASPM_L0S_TIMER 6 |
93 | #define AT_ASPM_L1_TIMER 12 | 94 | #define AT_ASPM_L1_TIMER 12 |
95 | #define AT_LCKDET_TIMER 12 | ||
94 | 96 | ||
95 | #define ATL1C_PCIE_L0S_L1_DISABLE 0x01 | 97 | #define ATL1C_PCIE_L0S_L1_DISABLE 0x01 |
96 | #define ATL1C_PCIE_PHY_RESET 0x02 | 98 | #define ATL1C_PCIE_PHY_RESET 0x02 |
@@ -316,6 +318,7 @@ enum atl1c_nic_type { | |||
316 | athr_l2c_b, | 318 | athr_l2c_b, |
317 | athr_l2c_b2, | 319 | athr_l2c_b2, |
318 | athr_l1d, | 320 | athr_l1d, |
321 | athr_l1d_2, | ||
319 | }; | 322 | }; |
320 | 323 | ||
321 | enum atl1c_trans_queue { | 324 | enum atl1c_trans_queue { |
@@ -392,6 +395,8 @@ struct atl1c_hw { | |||
392 | u16 subsystem_id; | 395 | u16 subsystem_id; |
393 | u16 subsystem_vendor_id; | 396 | u16 subsystem_vendor_id; |
394 | u8 revision_id; | 397 | u8 revision_id; |
398 | u16 phy_id1; | ||
399 | u16 phy_id2; | ||
395 | 400 | ||
396 | u32 intr_mask; | 401 | u32 intr_mask; |
397 | u8 dmaw_dly_cnt; | 402 | u8 dmaw_dly_cnt; |
diff --git a/drivers/net/atl1c/atl1c_hw.c b/drivers/net/atl1c/atl1c_hw.c index f1389d664a21..d8501f060957 100644 --- a/drivers/net/atl1c/atl1c_hw.c +++ b/drivers/net/atl1c/atl1c_hw.c | |||
@@ -37,6 +37,9 @@ int atl1c_check_eeprom_exist(struct atl1c_hw *hw) | |||
37 | if (data & TWSI_DEBUG_DEV_EXIST) | 37 | if (data & TWSI_DEBUG_DEV_EXIST) |
38 | return 1; | 38 | return 1; |
39 | 39 | ||
40 | AT_READ_REG(hw, REG_MASTER_CTRL, &data); | ||
41 | if (data & MASTER_CTRL_OTP_SEL) | ||
42 | return 1; | ||
40 | return 0; | 43 | return 0; |
41 | } | 44 | } |
42 | 45 | ||
@@ -69,6 +72,8 @@ static int atl1c_get_permanent_address(struct atl1c_hw *hw) | |||
69 | u32 i; | 72 | u32 i; |
70 | u32 otp_ctrl_data; | 73 | u32 otp_ctrl_data; |
71 | u32 twsi_ctrl_data; | 74 | u32 twsi_ctrl_data; |
75 | u32 ltssm_ctrl_data; | ||
76 | u32 wol_data; | ||
72 | u8 eth_addr[ETH_ALEN]; | 77 | u8 eth_addr[ETH_ALEN]; |
73 | u16 phy_data; | 78 | u16 phy_data; |
74 | bool raise_vol = false; | 79 | bool raise_vol = false; |
@@ -104,6 +109,15 @@ static int atl1c_get_permanent_address(struct atl1c_hw *hw) | |||
104 | udelay(20); | 109 | udelay(20); |
105 | raise_vol = true; | 110 | raise_vol = true; |
106 | } | 111 | } |
112 | /* close open bit of ReadOnly*/ | ||
113 | AT_READ_REG(hw, REG_LTSSM_ID_CTRL, <ssm_ctrl_data); | ||
114 | ltssm_ctrl_data &= ~LTSSM_ID_EN_WRO; | ||
115 | AT_WRITE_REG(hw, REG_LTSSM_ID_CTRL, ltssm_ctrl_data); | ||
116 | |||
117 | /* clear any WOL settings */ | ||
118 | AT_WRITE_REG(hw, REG_WOL_CTRL, 0); | ||
119 | AT_READ_REG(hw, REG_WOL_CTRL, &wol_data); | ||
120 | |||
107 | 121 | ||
108 | AT_READ_REG(hw, REG_TWSI_CTRL, &twsi_ctrl_data); | 122 | AT_READ_REG(hw, REG_TWSI_CTRL, &twsi_ctrl_data); |
109 | twsi_ctrl_data |= TWSI_CTRL_SW_LDSTART; | 123 | twsi_ctrl_data |= TWSI_CTRL_SW_LDSTART; |
@@ -119,17 +133,15 @@ static int atl1c_get_permanent_address(struct atl1c_hw *hw) | |||
119 | } | 133 | } |
120 | /* Disable OTP_CLK */ | 134 | /* Disable OTP_CLK */ |
121 | if ((hw->nic_type == athr_l1c || hw->nic_type == athr_l2c)) { | 135 | if ((hw->nic_type == athr_l1c || hw->nic_type == athr_l2c)) { |
122 | if (otp_ctrl_data & OTP_CTRL_CLK_EN) { | 136 | otp_ctrl_data &= ~OTP_CTRL_CLK_EN; |
123 | otp_ctrl_data &= ~OTP_CTRL_CLK_EN; | 137 | AT_WRITE_REG(hw, REG_OTP_CTRL, otp_ctrl_data); |
124 | AT_WRITE_REG(hw, REG_OTP_CTRL, otp_ctrl_data); | 138 | msleep(1); |
125 | AT_WRITE_FLUSH(hw); | ||
126 | msleep(1); | ||
127 | } | ||
128 | } | 139 | } |
129 | if (raise_vol) { | 140 | if (raise_vol) { |
130 | if (hw->nic_type == athr_l2c_b || | 141 | if (hw->nic_type == athr_l2c_b || |
131 | hw->nic_type == athr_l2c_b2 || | 142 | hw->nic_type == athr_l2c_b2 || |
132 | hw->nic_type == athr_l1d) { | 143 | hw->nic_type == athr_l1d || |
144 | hw->nic_type == athr_l1d_2) { | ||
133 | atl1c_write_phy_reg(hw, MII_DBG_ADDR, 0x00); | 145 | atl1c_write_phy_reg(hw, MII_DBG_ADDR, 0x00); |
134 | if (atl1c_read_phy_reg(hw, MII_DBG_DATA, &phy_data)) | 146 | if (atl1c_read_phy_reg(hw, MII_DBG_DATA, &phy_data)) |
135 | goto out; | 147 | goto out; |
@@ -456,14 +468,22 @@ int atl1c_phy_reset(struct atl1c_hw *hw) | |||
456 | 468 | ||
457 | if (hw->nic_type == athr_l2c_b || | 469 | if (hw->nic_type == athr_l2c_b || |
458 | hw->nic_type == athr_l2c_b2 || | 470 | hw->nic_type == athr_l2c_b2 || |
459 | hw->nic_type == athr_l1d) { | 471 | hw->nic_type == athr_l1d || |
472 | hw->nic_type == athr_l1d_2) { | ||
460 | atl1c_write_phy_reg(hw, MII_DBG_ADDR, 0x3B); | 473 | atl1c_write_phy_reg(hw, MII_DBG_ADDR, 0x3B); |
461 | atl1c_read_phy_reg(hw, MII_DBG_DATA, &phy_data); | 474 | atl1c_read_phy_reg(hw, MII_DBG_DATA, &phy_data); |
462 | atl1c_write_phy_reg(hw, MII_DBG_DATA, phy_data & 0xFFF7); | 475 | atl1c_write_phy_reg(hw, MII_DBG_DATA, phy_data & 0xFFF7); |
463 | msleep(20); | 476 | msleep(20); |
464 | } | 477 | } |
465 | 478 | if (hw->nic_type == athr_l1d) { | |
466 | /*Enable PHY LinkChange Interrupt */ | 479 | atl1c_write_phy_reg(hw, MII_DBG_ADDR, 0x29); |
480 | atl1c_write_phy_reg(hw, MII_DBG_DATA, 0x929D); | ||
481 | } | ||
482 | if (hw->nic_type == athr_l1c || hw->nic_type == athr_l2c_b2 | ||
483 | || hw->nic_type == athr_l2c || hw->nic_type == athr_l2c) { | ||
484 | atl1c_write_phy_reg(hw, MII_DBG_ADDR, 0x29); | ||
485 | atl1c_write_phy_reg(hw, MII_DBG_DATA, 0xB6DD); | ||
486 | } | ||
467 | err = atl1c_write_phy_reg(hw, MII_IER, mii_ier_data); | 487 | err = atl1c_write_phy_reg(hw, MII_IER, mii_ier_data); |
468 | if (err) { | 488 | if (err) { |
469 | if (netif_msg_hw(adapter)) | 489 | if (netif_msg_hw(adapter)) |
@@ -482,12 +502,10 @@ int atl1c_phy_init(struct atl1c_hw *hw) | |||
482 | struct pci_dev *pdev = adapter->pdev; | 502 | struct pci_dev *pdev = adapter->pdev; |
483 | int ret_val; | 503 | int ret_val; |
484 | u16 mii_bmcr_data = BMCR_RESET; | 504 | u16 mii_bmcr_data = BMCR_RESET; |
485 | u16 phy_id1, phy_id2; | ||
486 | 505 | ||
487 | if ((atl1c_read_phy_reg(hw, MII_PHYSID1, &phy_id1) != 0) || | 506 | if ((atl1c_read_phy_reg(hw, MII_PHYSID1, &hw->phy_id1) != 0) || |
488 | (atl1c_read_phy_reg(hw, MII_PHYSID2, &phy_id2) != 0)) { | 507 | (atl1c_read_phy_reg(hw, MII_PHYSID2, &hw->phy_id2) != 0)) { |
489 | if (netif_msg_link(adapter)) | 508 | dev_err(&pdev->dev, "Error get phy ID\n"); |
490 | dev_err(&pdev->dev, "Error get phy ID\n"); | ||
491 | return -1; | 509 | return -1; |
492 | } | 510 | } |
493 | switch (hw->media_type) { | 511 | switch (hw->media_type) { |
@@ -572,6 +590,65 @@ int atl1c_get_speed_and_duplex(struct atl1c_hw *hw, u16 *speed, u16 *duplex) | |||
572 | return 0; | 590 | return 0; |
573 | } | 591 | } |
574 | 592 | ||
593 | int atl1c_phy_power_saving(struct atl1c_hw *hw) | ||
594 | { | ||
595 | struct atl1c_adapter *adapter = (struct atl1c_adapter *)hw->adapter; | ||
596 | struct pci_dev *pdev = adapter->pdev; | ||
597 | int ret = 0; | ||
598 | u16 autoneg_advertised = ADVERTISED_10baseT_Half; | ||
599 | u16 save_autoneg_advertised; | ||
600 | u16 phy_data; | ||
601 | u16 mii_lpa_data; | ||
602 | u16 speed = SPEED_0; | ||
603 | u16 duplex = FULL_DUPLEX; | ||
604 | int i; | ||
605 | |||
606 | atl1c_read_phy_reg(hw, MII_BMSR, &phy_data); | ||
607 | atl1c_read_phy_reg(hw, MII_BMSR, &phy_data); | ||
608 | if (phy_data & BMSR_LSTATUS) { | ||
609 | atl1c_read_phy_reg(hw, MII_LPA, &mii_lpa_data); | ||
610 | if (mii_lpa_data & LPA_10FULL) | ||
611 | autoneg_advertised = ADVERTISED_10baseT_Full; | ||
612 | else if (mii_lpa_data & LPA_10HALF) | ||
613 | autoneg_advertised = ADVERTISED_10baseT_Half; | ||
614 | else if (mii_lpa_data & LPA_100HALF) | ||
615 | autoneg_advertised = ADVERTISED_100baseT_Half; | ||
616 | else if (mii_lpa_data & LPA_100FULL) | ||
617 | autoneg_advertised = ADVERTISED_100baseT_Full; | ||
618 | |||
619 | save_autoneg_advertised = hw->autoneg_advertised; | ||
620 | hw->phy_configured = false; | ||
621 | hw->autoneg_advertised = autoneg_advertised; | ||
622 | if (atl1c_restart_autoneg(hw) != 0) { | ||
623 | dev_dbg(&pdev->dev, "phy autoneg failed\n"); | ||
624 | ret = -1; | ||
625 | } | ||
626 | hw->autoneg_advertised = save_autoneg_advertised; | ||
627 | |||
628 | if (mii_lpa_data) { | ||
629 | for (i = 0; i < AT_SUSPEND_LINK_TIMEOUT; i++) { | ||
630 | mdelay(100); | ||
631 | atl1c_read_phy_reg(hw, MII_BMSR, &phy_data); | ||
632 | atl1c_read_phy_reg(hw, MII_BMSR, &phy_data); | ||
633 | if (phy_data & BMSR_LSTATUS) { | ||
634 | if (atl1c_get_speed_and_duplex(hw, &speed, | ||
635 | &duplex) != 0) | ||
636 | dev_dbg(&pdev->dev, | ||
637 | "get speed and duplex failed\n"); | ||
638 | break; | ||
639 | } | ||
640 | } | ||
641 | } | ||
642 | } else { | ||
643 | speed = SPEED_10; | ||
644 | duplex = HALF_DUPLEX; | ||
645 | } | ||
646 | adapter->link_speed = speed; | ||
647 | adapter->link_duplex = duplex; | ||
648 | |||
649 | return ret; | ||
650 | } | ||
651 | |||
575 | int atl1c_restart_autoneg(struct atl1c_hw *hw) | 652 | int atl1c_restart_autoneg(struct atl1c_hw *hw) |
576 | { | 653 | { |
577 | int err = 0; | 654 | int err = 0; |
diff --git a/drivers/net/atl1c/atl1c_hw.h b/drivers/net/atl1c/atl1c_hw.h index 1eeb3ed9f0cb..3dd675979aa1 100644 --- a/drivers/net/atl1c/atl1c_hw.h +++ b/drivers/net/atl1c/atl1c_hw.h | |||
@@ -42,7 +42,7 @@ bool atl1c_read_eeprom(struct atl1c_hw *hw, u32 offset, u32 *p_value); | |||
42 | int atl1c_phy_init(struct atl1c_hw *hw); | 42 | int atl1c_phy_init(struct atl1c_hw *hw); |
43 | int atl1c_check_eeprom_exist(struct atl1c_hw *hw); | 43 | int atl1c_check_eeprom_exist(struct atl1c_hw *hw); |
44 | int atl1c_restart_autoneg(struct atl1c_hw *hw); | 44 | int atl1c_restart_autoneg(struct atl1c_hw *hw); |
45 | 45 | int atl1c_phy_power_saving(struct atl1c_hw *hw); | |
46 | /* register definition */ | 46 | /* register definition */ |
47 | #define REG_DEVICE_CAP 0x5C | 47 | #define REG_DEVICE_CAP 0x5C |
48 | #define DEVICE_CAP_MAX_PAYLOAD_MASK 0x7 | 48 | #define DEVICE_CAP_MAX_PAYLOAD_MASK 0x7 |
@@ -120,6 +120,12 @@ int atl1c_restart_autoneg(struct atl1c_hw *hw); | |||
120 | #define REG_PCIE_PHYMISC 0x1000 | 120 | #define REG_PCIE_PHYMISC 0x1000 |
121 | #define PCIE_PHYMISC_FORCE_RCV_DET 0x4 | 121 | #define PCIE_PHYMISC_FORCE_RCV_DET 0x4 |
122 | 122 | ||
123 | #define REG_PCIE_PHYMISC2 0x1004 | ||
124 | #define PCIE_PHYMISC2_SERDES_CDR_MASK 0x3 | ||
125 | #define PCIE_PHYMISC2_SERDES_CDR_SHIFT 16 | ||
126 | #define PCIE_PHYMISC2_SERDES_TH_MASK 0x3 | ||
127 | #define PCIE_PHYMISC2_SERDES_TH_SHIFT 18 | ||
128 | |||
123 | #define REG_TWSI_DEBUG 0x1108 | 129 | #define REG_TWSI_DEBUG 0x1108 |
124 | #define TWSI_DEBUG_DEV_EXIST 0x20000000 | 130 | #define TWSI_DEBUG_DEV_EXIST 0x20000000 |
125 | 131 | ||
@@ -150,24 +156,28 @@ int atl1c_restart_autoneg(struct atl1c_hw *hw); | |||
150 | #define PM_CTRL_ASPM_L0S_EN 0x00001000 | 156 | #define PM_CTRL_ASPM_L0S_EN 0x00001000 |
151 | #define PM_CTRL_CLK_SWH_L1 0x00002000 | 157 | #define PM_CTRL_CLK_SWH_L1 0x00002000 |
152 | #define PM_CTRL_CLK_PWM_VER1_1 0x00004000 | 158 | #define PM_CTRL_CLK_PWM_VER1_1 0x00004000 |
153 | #define PM_CTRL_PCIE_RECV 0x00008000 | 159 | #define PM_CTRL_RCVR_WT_TIMER 0x00008000 |
154 | #define PM_CTRL_L1_ENTRY_TIMER_MASK 0xF | 160 | #define PM_CTRL_L1_ENTRY_TIMER_MASK 0xF |
155 | #define PM_CTRL_L1_ENTRY_TIMER_SHIFT 16 | 161 | #define PM_CTRL_L1_ENTRY_TIMER_SHIFT 16 |
156 | #define PM_CTRL_PM_REQ_TIMER_MASK 0xF | 162 | #define PM_CTRL_PM_REQ_TIMER_MASK 0xF |
157 | #define PM_CTRL_PM_REQ_TIMER_SHIFT 20 | 163 | #define PM_CTRL_PM_REQ_TIMER_SHIFT 20 |
158 | #define PM_CTRL_LCKDET_TIMER_MASK 0x3F | 164 | #define PM_CTRL_LCKDET_TIMER_MASK 0xF |
159 | #define PM_CTRL_LCKDET_TIMER_SHIFT 24 | 165 | #define PM_CTRL_LCKDET_TIMER_SHIFT 24 |
160 | #define PM_CTRL_EN_BUFS_RX_L0S 0x10000000 | 166 | #define PM_CTRL_EN_BUFS_RX_L0S 0x10000000 |
161 | #define PM_CTRL_SA_DLY_EN 0x20000000 | 167 | #define PM_CTRL_SA_DLY_EN 0x20000000 |
162 | #define PM_CTRL_MAC_ASPM_CHK 0x40000000 | 168 | #define PM_CTRL_MAC_ASPM_CHK 0x40000000 |
163 | #define PM_CTRL_HOTRST 0x80000000 | 169 | #define PM_CTRL_HOTRST 0x80000000 |
164 | 170 | ||
171 | #define REG_LTSSM_ID_CTRL 0x12FC | ||
172 | #define LTSSM_ID_EN_WRO 0x1000 | ||
165 | /* Selene Master Control Register */ | 173 | /* Selene Master Control Register */ |
166 | #define REG_MASTER_CTRL 0x1400 | 174 | #define REG_MASTER_CTRL 0x1400 |
167 | #define MASTER_CTRL_SOFT_RST 0x1 | 175 | #define MASTER_CTRL_SOFT_RST 0x1 |
168 | #define MASTER_CTRL_TEST_MODE_MASK 0x3 | 176 | #define MASTER_CTRL_TEST_MODE_MASK 0x3 |
169 | #define MASTER_CTRL_TEST_MODE_SHIFT 2 | 177 | #define MASTER_CTRL_TEST_MODE_SHIFT 2 |
170 | #define MASTER_CTRL_BERT_START 0x10 | 178 | #define MASTER_CTRL_BERT_START 0x10 |
179 | #define MASTER_CTRL_OOB_DIS_OFF 0x40 | ||
180 | #define MASTER_CTRL_SA_TIMER_EN 0x80 | ||
171 | #define MASTER_CTRL_MTIMER_EN 0x100 | 181 | #define MASTER_CTRL_MTIMER_EN 0x100 |
172 | #define MASTER_CTRL_MANUAL_INT 0x200 | 182 | #define MASTER_CTRL_MANUAL_INT 0x200 |
173 | #define MASTER_CTRL_TX_ITIMER_EN 0x400 | 183 | #define MASTER_CTRL_TX_ITIMER_EN 0x400 |
@@ -220,6 +230,12 @@ int atl1c_restart_autoneg(struct atl1c_hw *hw); | |||
220 | GPHY_CTRL_PWDOWN_HW |\ | 230 | GPHY_CTRL_PWDOWN_HW |\ |
221 | GPHY_CTRL_PHY_IDDQ) | 231 | GPHY_CTRL_PHY_IDDQ) |
222 | 232 | ||
233 | #define GPHY_CTRL_POWER_SAVING ( \ | ||
234 | GPHY_CTRL_SEL_ANA_RST |\ | ||
235 | GPHY_CTRL_HIB_EN |\ | ||
236 | GPHY_CTRL_HIB_PULSE |\ | ||
237 | GPHY_CTRL_PWDOWN_HW |\ | ||
238 | GPHY_CTRL_PHY_IDDQ) | ||
223 | /* Block IDLE Status Register */ | 239 | /* Block IDLE Status Register */ |
224 | #define REG_IDLE_STATUS 0x1410 | 240 | #define REG_IDLE_STATUS 0x1410 |
225 | #define IDLE_STATUS_MASK 0x00FF | 241 | #define IDLE_STATUS_MASK 0x00FF |
@@ -287,6 +303,14 @@ int atl1c_restart_autoneg(struct atl1c_hw *hw); | |||
287 | #define SERDES_LOCK_DETECT 0x1 /* SerDes lock detected. This signal | 303 | #define SERDES_LOCK_DETECT 0x1 /* SerDes lock detected. This signal |
288 | * comes from Analog SerDes */ | 304 | * comes from Analog SerDes */ |
289 | #define SERDES_LOCK_DETECT_EN 0x2 /* 1: Enable SerDes Lock detect function */ | 305 | #define SERDES_LOCK_DETECT_EN 0x2 /* 1: Enable SerDes Lock detect function */ |
306 | #define SERDES_LOCK_STS_SELFB_PLL_SHIFT 0xE | ||
307 | #define SERDES_LOCK_STS_SELFB_PLL_MASK 0x3 | ||
308 | #define SERDES_OVCLK_18_25 0x0 | ||
309 | #define SERDES_OVCLK_12_18 0x1 | ||
310 | #define SERDES_OVCLK_0_4 0x2 | ||
311 | #define SERDES_OVCLK_4_12 0x3 | ||
312 | #define SERDES_MAC_CLK_SLOWDOWN 0x20000 | ||
313 | #define SERDES_PYH_CLK_SLOWDOWN 0x40000 | ||
290 | 314 | ||
291 | /* MAC Control Register */ | 315 | /* MAC Control Register */ |
292 | #define REG_MAC_CTRL 0x1480 | 316 | #define REG_MAC_CTRL 0x1480 |
@@ -693,6 +717,21 @@ int atl1c_restart_autoneg(struct atl1c_hw *hw); | |||
693 | #define REG_MAC_TX_STATUS_BIN 0x1760 | 717 | #define REG_MAC_TX_STATUS_BIN 0x1760 |
694 | #define REG_MAC_TX_STATUS_END 0x17c0 | 718 | #define REG_MAC_TX_STATUS_END 0x17c0 |
695 | 719 | ||
720 | #define REG_CLK_GATING_CTRL 0x1814 | ||
721 | #define CLK_GATING_DMAW_EN 0x0001 | ||
722 | #define CLK_GATING_DMAR_EN 0x0002 | ||
723 | #define CLK_GATING_TXQ_EN 0x0004 | ||
724 | #define CLK_GATING_RXQ_EN 0x0008 | ||
725 | #define CLK_GATING_TXMAC_EN 0x0010 | ||
726 | #define CLK_GATING_RXMAC_EN 0x0020 | ||
727 | |||
728 | #define CLK_GATING_EN_ALL (CLK_GATING_DMAW_EN |\ | ||
729 | CLK_GATING_DMAR_EN |\ | ||
730 | CLK_GATING_TXQ_EN |\ | ||
731 | CLK_GATING_RXQ_EN |\ | ||
732 | CLK_GATING_TXMAC_EN|\ | ||
733 | CLK_GATING_RXMAC_EN) | ||
734 | |||
696 | /* DEBUG ADDR */ | 735 | /* DEBUG ADDR */ |
697 | #define REG_DEBUG_DATA0 0x1900 | 736 | #define REG_DEBUG_DATA0 0x1900 |
698 | #define REG_DEBUG_DATA1 0x1904 | 737 | #define REG_DEBUG_DATA1 0x1904 |
@@ -734,6 +773,10 @@ int atl1c_restart_autoneg(struct atl1c_hw *hw); | |||
734 | 773 | ||
735 | #define MII_PHYSID1 0x02 | 774 | #define MII_PHYSID1 0x02 |
736 | #define MII_PHYSID2 0x03 | 775 | #define MII_PHYSID2 0x03 |
776 | #define L1D_MPW_PHYID1 0xD01C /* V7 */ | ||
777 | #define L1D_MPW_PHYID2 0xD01D /* V1-V6 */ | ||
778 | #define L1D_MPW_PHYID3 0xD01E /* V8 */ | ||
779 | |||
737 | 780 | ||
738 | /* Autoneg Advertisement Register */ | 781 | /* Autoneg Advertisement Register */ |
739 | #define MII_ADVERTISE 0x04 | 782 | #define MII_ADVERTISE 0x04 |
diff --git a/drivers/net/atl1c/atl1c_main.c b/drivers/net/atl1c/atl1c_main.c index 1c3c046d5f34..c7b8ef507ebd 100644 --- a/drivers/net/atl1c/atl1c_main.c +++ b/drivers/net/atl1c/atl1c_main.c | |||
@@ -21,7 +21,7 @@ | |||
21 | 21 | ||
22 | #include "atl1c.h" | 22 | #include "atl1c.h" |
23 | 23 | ||
24 | #define ATL1C_DRV_VERSION "1.0.0.2-NAPI" | 24 | #define ATL1C_DRV_VERSION "1.0.1.0-NAPI" |
25 | char atl1c_driver_name[] = "atl1c"; | 25 | char atl1c_driver_name[] = "atl1c"; |
26 | char atl1c_driver_version[] = ATL1C_DRV_VERSION; | 26 | char atl1c_driver_version[] = ATL1C_DRV_VERSION; |
27 | #define PCI_DEVICE_ID_ATTANSIC_L2C 0x1062 | 27 | #define PCI_DEVICE_ID_ATTANSIC_L2C 0x1062 |
@@ -29,7 +29,7 @@ char atl1c_driver_version[] = ATL1C_DRV_VERSION; | |||
29 | #define PCI_DEVICE_ID_ATHEROS_L2C_B 0x2060 /* AR8152 v1.1 Fast 10/100 */ | 29 | #define PCI_DEVICE_ID_ATHEROS_L2C_B 0x2060 /* AR8152 v1.1 Fast 10/100 */ |
30 | #define PCI_DEVICE_ID_ATHEROS_L2C_B2 0x2062 /* AR8152 v2.0 Fast 10/100 */ | 30 | #define PCI_DEVICE_ID_ATHEROS_L2C_B2 0x2062 /* AR8152 v2.0 Fast 10/100 */ |
31 | #define PCI_DEVICE_ID_ATHEROS_L1D 0x1073 /* AR8151 v1.0 Gigabit 1000 */ | 31 | #define PCI_DEVICE_ID_ATHEROS_L1D 0x1073 /* AR8151 v1.0 Gigabit 1000 */ |
32 | 32 | #define PCI_DEVICE_ID_ATHEROS_L1D_2_0 0x1083 /* AR8151 v2.0 Gigabit 1000 */ | |
33 | #define L2CB_V10 0xc0 | 33 | #define L2CB_V10 0xc0 |
34 | #define L2CB_V11 0xc1 | 34 | #define L2CB_V11 0xc1 |
35 | 35 | ||
@@ -97,7 +97,28 @@ static const u16 atl1c_rrd_addr_lo_regs[AT_MAX_RECEIVE_QUEUE] = | |||
97 | 97 | ||
98 | static const u32 atl1c_default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE | | 98 | static const u32 atl1c_default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE | |
99 | NETIF_MSG_LINK | NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP; | 99 | NETIF_MSG_LINK | NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP; |
100 | static void atl1c_pcie_patch(struct atl1c_hw *hw) | ||
101 | { | ||
102 | u32 data; | ||
100 | 103 | ||
104 | AT_READ_REG(hw, REG_PCIE_PHYMISC, &data); | ||
105 | data |= PCIE_PHYMISC_FORCE_RCV_DET; | ||
106 | AT_WRITE_REG(hw, REG_PCIE_PHYMISC, data); | ||
107 | |||
108 | if (hw->nic_type == athr_l2c_b && hw->revision_id == L2CB_V10) { | ||
109 | AT_READ_REG(hw, REG_PCIE_PHYMISC2, &data); | ||
110 | |||
111 | data &= ~(PCIE_PHYMISC2_SERDES_CDR_MASK << | ||
112 | PCIE_PHYMISC2_SERDES_CDR_SHIFT); | ||
113 | data |= 3 << PCIE_PHYMISC2_SERDES_CDR_SHIFT; | ||
114 | data &= ~(PCIE_PHYMISC2_SERDES_TH_MASK << | ||
115 | PCIE_PHYMISC2_SERDES_TH_SHIFT); | ||
116 | data |= 3 << PCIE_PHYMISC2_SERDES_TH_SHIFT; | ||
117 | AT_WRITE_REG(hw, REG_PCIE_PHYMISC2, data); | ||
118 | } | ||
119 | } | ||
120 | |||
121 | /* FIXME: no need any more ? */ | ||
101 | /* | 122 | /* |
102 | * atl1c_init_pcie - init PCIE module | 123 | * atl1c_init_pcie - init PCIE module |
103 | */ | 124 | */ |
@@ -127,6 +148,11 @@ static void atl1c_reset_pcie(struct atl1c_hw *hw, u32 flag) | |||
127 | data &= ~PCIE_UC_SERVRITY_FCP; | 148 | data &= ~PCIE_UC_SERVRITY_FCP; |
128 | AT_WRITE_REG(hw, REG_PCIE_UC_SEVERITY, data); | 149 | AT_WRITE_REG(hw, REG_PCIE_UC_SEVERITY, data); |
129 | 150 | ||
151 | AT_READ_REG(hw, REG_LTSSM_ID_CTRL, &data); | ||
152 | data &= ~LTSSM_ID_EN_WRO; | ||
153 | AT_WRITE_REG(hw, REG_LTSSM_ID_CTRL, data); | ||
154 | |||
155 | atl1c_pcie_patch(hw); | ||
130 | if (flag & ATL1C_PCIE_L0S_L1_DISABLE) | 156 | if (flag & ATL1C_PCIE_L0S_L1_DISABLE) |
131 | atl1c_disable_l0s_l1(hw); | 157 | atl1c_disable_l0s_l1(hw); |
132 | if (flag & ATL1C_PCIE_PHY_RESET) | 158 | if (flag & ATL1C_PCIE_PHY_RESET) |
@@ -135,7 +161,7 @@ static void atl1c_reset_pcie(struct atl1c_hw *hw, u32 flag) | |||
135 | AT_WRITE_REG(hw, REG_GPHY_CTRL, | 161 | AT_WRITE_REG(hw, REG_GPHY_CTRL, |
136 | GPHY_CTRL_DEFAULT | GPHY_CTRL_EXT_RESET); | 162 | GPHY_CTRL_DEFAULT | GPHY_CTRL_EXT_RESET); |
137 | 163 | ||
138 | msleep(1); | 164 | msleep(5); |
139 | } | 165 | } |
140 | 166 | ||
141 | /* | 167 | /* |
@@ -159,6 +185,7 @@ static inline void atl1c_irq_disable(struct atl1c_adapter *adapter) | |||
159 | { | 185 | { |
160 | atomic_inc(&adapter->irq_sem); | 186 | atomic_inc(&adapter->irq_sem); |
161 | AT_WRITE_REG(&adapter->hw, REG_IMR, 0); | 187 | AT_WRITE_REG(&adapter->hw, REG_IMR, 0); |
188 | AT_WRITE_REG(&adapter->hw, REG_ISR, ISR_DIS_INT); | ||
162 | AT_WRITE_FLUSH(&adapter->hw); | 189 | AT_WRITE_FLUSH(&adapter->hw); |
163 | synchronize_irq(adapter->pdev->irq); | 190 | synchronize_irq(adapter->pdev->irq); |
164 | } | 191 | } |
@@ -231,15 +258,15 @@ static void atl1c_check_link_status(struct atl1c_adapter *adapter) | |||
231 | 258 | ||
232 | if ((phy_data & BMSR_LSTATUS) == 0) { | 259 | if ((phy_data & BMSR_LSTATUS) == 0) { |
233 | /* link down */ | 260 | /* link down */ |
234 | if (netif_carrier_ok(netdev)) { | 261 | hw->hibernate = true; |
235 | hw->hibernate = true; | 262 | if (atl1c_stop_mac(hw) != 0) |
236 | if (atl1c_stop_mac(hw) != 0) | 263 | if (netif_msg_hw(adapter)) |
237 | if (netif_msg_hw(adapter)) | 264 | dev_warn(&pdev->dev, "stop mac failed\n"); |
238 | dev_warn(&pdev->dev, | 265 | atl1c_set_aspm(hw, false); |
239 | "stop mac failed\n"); | ||
240 | atl1c_set_aspm(hw, false); | ||
241 | } | ||
242 | netif_carrier_off(netdev); | 266 | netif_carrier_off(netdev); |
267 | netif_stop_queue(netdev); | ||
268 | atl1c_phy_reset(hw); | ||
269 | atl1c_phy_init(&adapter->hw); | ||
243 | } else { | 270 | } else { |
244 | /* Link Up */ | 271 | /* Link Up */ |
245 | hw->hibernate = false; | 272 | hw->hibernate = false; |
@@ -308,6 +335,7 @@ static void atl1c_common_task(struct work_struct *work) | |||
308 | netdev = adapter->netdev; | 335 | netdev = adapter->netdev; |
309 | 336 | ||
310 | if (adapter->work_event & ATL1C_WORK_EVENT_RESET) { | 337 | if (adapter->work_event & ATL1C_WORK_EVENT_RESET) { |
338 | adapter->work_event &= ~ATL1C_WORK_EVENT_RESET; | ||
311 | netif_device_detach(netdev); | 339 | netif_device_detach(netdev); |
312 | atl1c_down(adapter); | 340 | atl1c_down(adapter); |
313 | atl1c_up(adapter); | 341 | atl1c_up(adapter); |
@@ -315,8 +343,11 @@ static void atl1c_common_task(struct work_struct *work) | |||
315 | return; | 343 | return; |
316 | } | 344 | } |
317 | 345 | ||
318 | if (adapter->work_event & ATL1C_WORK_EVENT_LINK_CHANGE) | 346 | if (adapter->work_event & ATL1C_WORK_EVENT_LINK_CHANGE) { |
347 | adapter->work_event &= ~ATL1C_WORK_EVENT_LINK_CHANGE; | ||
319 | atl1c_check_link_status(adapter); | 348 | atl1c_check_link_status(adapter); |
349 | } | ||
350 | return; | ||
320 | } | 351 | } |
321 | 352 | ||
322 | 353 | ||
@@ -476,6 +507,13 @@ static int atl1c_change_mtu(struct net_device *netdev, int new_mtu) | |||
476 | netdev->mtu = new_mtu; | 507 | netdev->mtu = new_mtu; |
477 | adapter->hw.max_frame_size = new_mtu; | 508 | adapter->hw.max_frame_size = new_mtu; |
478 | atl1c_set_rxbufsize(adapter, netdev); | 509 | atl1c_set_rxbufsize(adapter, netdev); |
510 | if (new_mtu > MAX_TSO_FRAME_SIZE) { | ||
511 | adapter->netdev->features &= ~NETIF_F_TSO; | ||
512 | adapter->netdev->features &= ~NETIF_F_TSO6; | ||
513 | } else { | ||
514 | adapter->netdev->features |= NETIF_F_TSO; | ||
515 | adapter->netdev->features |= NETIF_F_TSO6; | ||
516 | } | ||
479 | atl1c_down(adapter); | 517 | atl1c_down(adapter); |
480 | atl1c_up(adapter); | 518 | atl1c_up(adapter); |
481 | clear_bit(__AT_RESETTING, &adapter->flags); | 519 | clear_bit(__AT_RESETTING, &adapter->flags); |
@@ -613,6 +651,9 @@ static void atl1c_set_mac_type(struct atl1c_hw *hw) | |||
613 | case PCI_DEVICE_ID_ATHEROS_L1D: | 651 | case PCI_DEVICE_ID_ATHEROS_L1D: |
614 | hw->nic_type = athr_l1d; | 652 | hw->nic_type = athr_l1d; |
615 | break; | 653 | break; |
654 | case PCI_DEVICE_ID_ATHEROS_L1D_2_0: | ||
655 | hw->nic_type = athr_l1d_2; | ||
656 | break; | ||
616 | default: | 657 | default: |
617 | break; | 658 | break; |
618 | } | 659 | } |
@@ -627,9 +668,7 @@ static int atl1c_setup_mac_funcs(struct atl1c_hw *hw) | |||
627 | AT_READ_REG(hw, REG_PHY_STATUS, &phy_status_data); | 668 | AT_READ_REG(hw, REG_PHY_STATUS, &phy_status_data); |
628 | AT_READ_REG(hw, REG_LINK_CTRL, &link_ctrl_data); | 669 | AT_READ_REG(hw, REG_LINK_CTRL, &link_ctrl_data); |
629 | 670 | ||
630 | hw->ctrl_flags = ATL1C_INTR_CLEAR_ON_READ | | 671 | hw->ctrl_flags = ATL1C_INTR_MODRT_ENABLE | |
631 | ATL1C_INTR_MODRT_ENABLE | | ||
632 | ATL1C_RX_IPV6_CHKSUM | | ||
633 | ATL1C_TXQ_MODE_ENHANCE; | 672 | ATL1C_TXQ_MODE_ENHANCE; |
634 | if (link_ctrl_data & LINK_CTRL_L0S_EN) | 673 | if (link_ctrl_data & LINK_CTRL_L0S_EN) |
635 | hw->ctrl_flags |= ATL1C_ASPM_L0S_SUPPORT; | 674 | hw->ctrl_flags |= ATL1C_ASPM_L0S_SUPPORT; |
@@ -637,12 +676,12 @@ static int atl1c_setup_mac_funcs(struct atl1c_hw *hw) | |||
637 | hw->ctrl_flags |= ATL1C_ASPM_L1_SUPPORT; | 676 | hw->ctrl_flags |= ATL1C_ASPM_L1_SUPPORT; |
638 | if (link_ctrl_data & LINK_CTRL_EXT_SYNC) | 677 | if (link_ctrl_data & LINK_CTRL_EXT_SYNC) |
639 | hw->ctrl_flags |= ATL1C_LINK_EXT_SYNC; | 678 | hw->ctrl_flags |= ATL1C_LINK_EXT_SYNC; |
679 | hw->ctrl_flags |= ATL1C_ASPM_CTRL_MON; | ||
640 | 680 | ||
641 | if (hw->nic_type == athr_l1c || | 681 | if (hw->nic_type == athr_l1c || |
642 | hw->nic_type == athr_l1d) { | 682 | hw->nic_type == athr_l1d || |
643 | hw->ctrl_flags |= ATL1C_ASPM_CTRL_MON; | 683 | hw->nic_type == athr_l1d_2) |
644 | hw->link_cap_flags |= ATL1C_LINK_CAP_1000M; | 684 | hw->link_cap_flags |= ATL1C_LINK_CAP_1000M; |
645 | } | ||
646 | return 0; | 685 | return 0; |
647 | } | 686 | } |
648 | /* | 687 | /* |
@@ -657,6 +696,8 @@ static int __devinit atl1c_sw_init(struct atl1c_adapter *adapter) | |||
657 | { | 696 | { |
658 | struct atl1c_hw *hw = &adapter->hw; | 697 | struct atl1c_hw *hw = &adapter->hw; |
659 | struct pci_dev *pdev = adapter->pdev; | 698 | struct pci_dev *pdev = adapter->pdev; |
699 | u32 revision; | ||
700 | |||
660 | 701 | ||
661 | adapter->wol = 0; | 702 | adapter->wol = 0; |
662 | adapter->link_speed = SPEED_0; | 703 | adapter->link_speed = SPEED_0; |
@@ -669,7 +710,8 @@ static int __devinit atl1c_sw_init(struct atl1c_adapter *adapter) | |||
669 | hw->device_id = pdev->device; | 710 | hw->device_id = pdev->device; |
670 | hw->subsystem_vendor_id = pdev->subsystem_vendor; | 711 | hw->subsystem_vendor_id = pdev->subsystem_vendor; |
671 | hw->subsystem_id = pdev->subsystem_device; | 712 | hw->subsystem_id = pdev->subsystem_device; |
672 | 713 | AT_READ_REG(hw, PCI_CLASS_REVISION, &revision); | |
714 | hw->revision_id = revision & 0xFF; | ||
673 | /* before link up, we assume hibernate is true */ | 715 | /* before link up, we assume hibernate is true */ |
674 | hw->hibernate = true; | 716 | hw->hibernate = true; |
675 | hw->media_type = MEDIA_TYPE_AUTO_SENSOR; | 717 | hw->media_type = MEDIA_TYPE_AUTO_SENSOR; |
@@ -974,6 +1016,7 @@ static void atl1c_configure_des_ring(struct atl1c_adapter *adapter) | |||
974 | struct atl1c_cmb *cmb = (struct atl1c_cmb *) &adapter->cmb; | 1016 | struct atl1c_cmb *cmb = (struct atl1c_cmb *) &adapter->cmb; |
975 | struct atl1c_smb *smb = (struct atl1c_smb *) &adapter->smb; | 1017 | struct atl1c_smb *smb = (struct atl1c_smb *) &adapter->smb; |
976 | int i; | 1018 | int i; |
1019 | u32 data; | ||
977 | 1020 | ||
978 | /* TPD */ | 1021 | /* TPD */ |
979 | AT_WRITE_REG(hw, REG_TX_BASE_ADDR_HI, | 1022 | AT_WRITE_REG(hw, REG_TX_BASE_ADDR_HI, |
@@ -1017,6 +1060,23 @@ static void atl1c_configure_des_ring(struct atl1c_adapter *adapter) | |||
1017 | (u32)((smb->dma & AT_DMA_HI_ADDR_MASK) >> 32)); | 1060 | (u32)((smb->dma & AT_DMA_HI_ADDR_MASK) >> 32)); |
1018 | AT_WRITE_REG(hw, REG_SMB_BASE_ADDR_LO, | 1061 | AT_WRITE_REG(hw, REG_SMB_BASE_ADDR_LO, |
1019 | (u32)(smb->dma & AT_DMA_LO_ADDR_MASK)); | 1062 | (u32)(smb->dma & AT_DMA_LO_ADDR_MASK)); |
1063 | if (hw->nic_type == athr_l2c_b) { | ||
1064 | AT_WRITE_REG(hw, REG_SRAM_RXF_LEN, 0x02a0L); | ||
1065 | AT_WRITE_REG(hw, REG_SRAM_TXF_LEN, 0x0100L); | ||
1066 | AT_WRITE_REG(hw, REG_SRAM_RXF_ADDR, 0x029f0000L); | ||
1067 | AT_WRITE_REG(hw, REG_SRAM_RFD0_INFO, 0x02bf02a0L); | ||
1068 | AT_WRITE_REG(hw, REG_SRAM_TXF_ADDR, 0x03bf02c0L); | ||
1069 | AT_WRITE_REG(hw, REG_SRAM_TRD_ADDR, 0x03df03c0L); | ||
1070 | AT_WRITE_REG(hw, REG_TXF_WATER_MARK, 0); /* TX watermark, to enter l1 state.*/ | ||
1071 | AT_WRITE_REG(hw, REG_RXD_DMA_CTRL, 0); /* RXD threshold.*/ | ||
1072 | } | ||
1073 | if (hw->nic_type == athr_l2c_b || hw->nic_type == athr_l1d_2) { | ||
1074 | /* Power Saving for L2c_B */ | ||
1075 | AT_READ_REG(hw, REG_SERDES_LOCK, &data); | ||
1076 | data |= SERDES_MAC_CLK_SLOWDOWN; | ||
1077 | data |= SERDES_PYH_CLK_SLOWDOWN; | ||
1078 | AT_WRITE_REG(hw, REG_SERDES_LOCK, data); | ||
1079 | } | ||
1020 | /* Load all of base address above */ | 1080 | /* Load all of base address above */ |
1021 | AT_WRITE_REG(hw, REG_LOAD_PTR, 1); | 1081 | AT_WRITE_REG(hw, REG_LOAD_PTR, 1); |
1022 | } | 1082 | } |
@@ -1029,6 +1089,7 @@ static void atl1c_configure_tx(struct atl1c_adapter *adapter) | |||
1029 | u16 tx_offload_thresh; | 1089 | u16 tx_offload_thresh; |
1030 | u32 txq_ctrl_data; | 1090 | u32 txq_ctrl_data; |
1031 | u32 extra_size = 0; /* Jumbo frame threshold in QWORD unit */ | 1091 | u32 extra_size = 0; /* Jumbo frame threshold in QWORD unit */ |
1092 | u32 max_pay_load_data; | ||
1032 | 1093 | ||
1033 | extra_size = ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN; | 1094 | extra_size = ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN; |
1034 | tx_offload_thresh = MAX_TX_OFFLOAD_THRESH; | 1095 | tx_offload_thresh = MAX_TX_OFFLOAD_THRESH; |
@@ -1046,8 +1107,11 @@ static void atl1c_configure_tx(struct atl1c_adapter *adapter) | |||
1046 | TXQ_NUM_TPD_BURST_SHIFT; | 1107 | TXQ_NUM_TPD_BURST_SHIFT; |
1047 | if (hw->ctrl_flags & ATL1C_TXQ_MODE_ENHANCE) | 1108 | if (hw->ctrl_flags & ATL1C_TXQ_MODE_ENHANCE) |
1048 | txq_ctrl_data |= TXQ_CTRL_ENH_MODE; | 1109 | txq_ctrl_data |= TXQ_CTRL_ENH_MODE; |
1049 | txq_ctrl_data |= (atl1c_pay_load_size[hw->dmar_block] & | 1110 | max_pay_load_data = (atl1c_pay_load_size[hw->dmar_block] & |
1050 | TXQ_TXF_BURST_NUM_MASK) << TXQ_TXF_BURST_NUM_SHIFT; | 1111 | TXQ_TXF_BURST_NUM_MASK) << TXQ_TXF_BURST_NUM_SHIFT; |
1112 | if (hw->nic_type == athr_l2c_b || hw->nic_type == athr_l2c_b2) | ||
1113 | max_pay_load_data >>= 1; | ||
1114 | txq_ctrl_data |= max_pay_load_data; | ||
1051 | 1115 | ||
1052 | AT_WRITE_REG(hw, REG_TXQ_CTRL, txq_ctrl_data); | 1116 | AT_WRITE_REG(hw, REG_TXQ_CTRL, txq_ctrl_data); |
1053 | } | 1117 | } |
@@ -1078,7 +1142,7 @@ static void atl1c_configure_rx(struct atl1c_adapter *adapter) | |||
1078 | rxq_ctrl_data |= (hw->rss_hash_bits & RSS_HASH_BITS_MASK) << | 1142 | rxq_ctrl_data |= (hw->rss_hash_bits & RSS_HASH_BITS_MASK) << |
1079 | RSS_HASH_BITS_SHIFT; | 1143 | RSS_HASH_BITS_SHIFT; |
1080 | if (hw->ctrl_flags & ATL1C_ASPM_CTRL_MON) | 1144 | if (hw->ctrl_flags & ATL1C_ASPM_CTRL_MON) |
1081 | rxq_ctrl_data |= (ASPM_THRUPUT_LIMIT_100M & | 1145 | rxq_ctrl_data |= (ASPM_THRUPUT_LIMIT_1M & |
1082 | ASPM_THRUPUT_LIMIT_MASK) << ASPM_THRUPUT_LIMIT_SHIFT; | 1146 | ASPM_THRUPUT_LIMIT_MASK) << ASPM_THRUPUT_LIMIT_SHIFT; |
1083 | 1147 | ||
1084 | AT_WRITE_REG(hw, REG_RXQ_CTRL, rxq_ctrl_data); | 1148 | AT_WRITE_REG(hw, REG_RXQ_CTRL, rxq_ctrl_data); |
@@ -1198,21 +1262,23 @@ static int atl1c_reset_mac(struct atl1c_hw *hw) | |||
1198 | { | 1262 | { |
1199 | struct atl1c_adapter *adapter = (struct atl1c_adapter *)hw->adapter; | 1263 | struct atl1c_adapter *adapter = (struct atl1c_adapter *)hw->adapter; |
1200 | struct pci_dev *pdev = adapter->pdev; | 1264 | struct pci_dev *pdev = adapter->pdev; |
1201 | int ret; | 1265 | u32 master_ctrl_data = 0; |
1202 | 1266 | ||
1203 | AT_WRITE_REG(hw, REG_IMR, 0); | 1267 | AT_WRITE_REG(hw, REG_IMR, 0); |
1204 | AT_WRITE_REG(hw, REG_ISR, ISR_DIS_INT); | 1268 | AT_WRITE_REG(hw, REG_ISR, ISR_DIS_INT); |
1205 | 1269 | ||
1206 | ret = atl1c_stop_mac(hw); | 1270 | atl1c_stop_mac(hw); |
1207 | if (ret) | ||
1208 | return ret; | ||
1209 | /* | 1271 | /* |
1210 | * Issue Soft Reset to the MAC. This will reset the chip's | 1272 | * Issue Soft Reset to the MAC. This will reset the chip's |
1211 | * transmit, receive, DMA. It will not effect | 1273 | * transmit, receive, DMA. It will not effect |
1212 | * the current PCI configuration. The global reset bit is self- | 1274 | * the current PCI configuration. The global reset bit is self- |
1213 | * clearing, and should clear within a microsecond. | 1275 | * clearing, and should clear within a microsecond. |
1214 | */ | 1276 | */ |
1215 | AT_WRITE_REGW(hw, REG_MASTER_CTRL, MASTER_CTRL_SOFT_RST); | 1277 | AT_READ_REG(hw, REG_MASTER_CTRL, &master_ctrl_data); |
1278 | master_ctrl_data |= MASTER_CTRL_OOB_DIS_OFF; | ||
1279 | AT_WRITE_REGW(hw, REG_MASTER_CTRL, ((master_ctrl_data | MASTER_CTRL_SOFT_RST) | ||
1280 | & 0xFFFF)); | ||
1281 | |||
1216 | AT_WRITE_FLUSH(hw); | 1282 | AT_WRITE_FLUSH(hw); |
1217 | msleep(10); | 1283 | msleep(10); |
1218 | /* Wait at least 10ms for All module to be Idle */ | 1284 | /* Wait at least 10ms for All module to be Idle */ |
@@ -1253,42 +1319,39 @@ static void atl1c_set_aspm(struct atl1c_hw *hw, bool linkup) | |||
1253 | { | 1319 | { |
1254 | u32 pm_ctrl_data; | 1320 | u32 pm_ctrl_data; |
1255 | u32 link_ctrl_data; | 1321 | u32 link_ctrl_data; |
1322 | u32 link_l1_timer = 0xF; | ||
1256 | 1323 | ||
1257 | AT_READ_REG(hw, REG_PM_CTRL, &pm_ctrl_data); | 1324 | AT_READ_REG(hw, REG_PM_CTRL, &pm_ctrl_data); |
1258 | AT_READ_REG(hw, REG_LINK_CTRL, &link_ctrl_data); | 1325 | AT_READ_REG(hw, REG_LINK_CTRL, &link_ctrl_data); |
1259 | pm_ctrl_data &= ~PM_CTRL_SERDES_PD_EX_L1; | ||
1260 | 1326 | ||
1327 | pm_ctrl_data &= ~PM_CTRL_SERDES_PD_EX_L1; | ||
1261 | pm_ctrl_data &= ~(PM_CTRL_L1_ENTRY_TIMER_MASK << | 1328 | pm_ctrl_data &= ~(PM_CTRL_L1_ENTRY_TIMER_MASK << |
1262 | PM_CTRL_L1_ENTRY_TIMER_SHIFT); | 1329 | PM_CTRL_L1_ENTRY_TIMER_SHIFT); |
1263 | pm_ctrl_data &= ~(PM_CTRL_LCKDET_TIMER_MASK << | 1330 | pm_ctrl_data &= ~(PM_CTRL_LCKDET_TIMER_MASK << |
1264 | PM_CTRL_LCKDET_TIMER_SHIFT); | 1331 | PM_CTRL_LCKDET_TIMER_SHIFT); |
1265 | 1332 | pm_ctrl_data |= AT_LCKDET_TIMER << PM_CTRL_LCKDET_TIMER_SHIFT; | |
1266 | pm_ctrl_data |= PM_CTRL_MAC_ASPM_CHK; | ||
1267 | pm_ctrl_data &= ~PM_CTRL_ASPM_L1_EN; | ||
1268 | pm_ctrl_data |= PM_CTRL_RBER_EN; | ||
1269 | pm_ctrl_data |= PM_CTRL_SDES_EN; | ||
1270 | 1333 | ||
1271 | if (hw->nic_type == athr_l2c_b || | 1334 | if (hw->nic_type == athr_l2c_b || hw->nic_type == athr_l1d || |
1272 | hw->nic_type == athr_l1d || | 1335 | hw->nic_type == athr_l2c_b2 || hw->nic_type == athr_l1d_2) { |
1273 | hw->nic_type == athr_l2c_b2) { | ||
1274 | link_ctrl_data &= ~LINK_CTRL_EXT_SYNC; | 1336 | link_ctrl_data &= ~LINK_CTRL_EXT_SYNC; |
1275 | if (!(hw->ctrl_flags & ATL1C_APS_MODE_ENABLE)) { | 1337 | if (!(hw->ctrl_flags & ATL1C_APS_MODE_ENABLE)) { |
1276 | if (hw->nic_type == athr_l2c_b && | 1338 | if (hw->nic_type == athr_l2c_b && hw->revision_id == L2CB_V10) |
1277 | hw->revision_id == L2CB_V10) | ||
1278 | link_ctrl_data |= LINK_CTRL_EXT_SYNC; | 1339 | link_ctrl_data |= LINK_CTRL_EXT_SYNC; |
1279 | } | 1340 | } |
1280 | 1341 | ||
1281 | AT_WRITE_REG(hw, REG_LINK_CTRL, link_ctrl_data); | 1342 | AT_WRITE_REG(hw, REG_LINK_CTRL, link_ctrl_data); |
1282 | 1343 | ||
1283 | pm_ctrl_data |= PM_CTRL_PCIE_RECV; | 1344 | pm_ctrl_data |= PM_CTRL_RCVR_WT_TIMER; |
1284 | pm_ctrl_data |= AT_ASPM_L1_TIMER << PM_CTRL_PM_REQ_TIMER_SHIFT; | 1345 | pm_ctrl_data &= ~(PM_CTRL_PM_REQ_TIMER_MASK << |
1285 | pm_ctrl_data &= ~PM_CTRL_EN_BUFS_RX_L0S; | 1346 | PM_CTRL_PM_REQ_TIMER_SHIFT); |
1347 | pm_ctrl_data |= AT_ASPM_L1_TIMER << | ||
1348 | PM_CTRL_PM_REQ_TIMER_SHIFT; | ||
1286 | pm_ctrl_data &= ~PM_CTRL_SA_DLY_EN; | 1349 | pm_ctrl_data &= ~PM_CTRL_SA_DLY_EN; |
1287 | pm_ctrl_data &= ~PM_CTRL_HOTRST; | 1350 | pm_ctrl_data &= ~PM_CTRL_HOTRST; |
1288 | pm_ctrl_data |= 1 << PM_CTRL_L1_ENTRY_TIMER_SHIFT; | 1351 | pm_ctrl_data |= 1 << PM_CTRL_L1_ENTRY_TIMER_SHIFT; |
1289 | pm_ctrl_data |= PM_CTRL_SERDES_PD_EX_L1; | 1352 | pm_ctrl_data |= PM_CTRL_SERDES_PD_EX_L1; |
1290 | } | 1353 | } |
1291 | 1354 | pm_ctrl_data |= PM_CTRL_MAC_ASPM_CHK; | |
1292 | if (linkup) { | 1355 | if (linkup) { |
1293 | pm_ctrl_data &= ~PM_CTRL_ASPM_L1_EN; | 1356 | pm_ctrl_data &= ~PM_CTRL_ASPM_L1_EN; |
1294 | pm_ctrl_data &= ~PM_CTRL_ASPM_L0S_EN; | 1357 | pm_ctrl_data &= ~PM_CTRL_ASPM_L0S_EN; |
@@ -1297,27 +1360,26 @@ static void atl1c_set_aspm(struct atl1c_hw *hw, bool linkup) | |||
1297 | if (hw->ctrl_flags & ATL1C_ASPM_L0S_SUPPORT) | 1360 | if (hw->ctrl_flags & ATL1C_ASPM_L0S_SUPPORT) |
1298 | pm_ctrl_data |= PM_CTRL_ASPM_L0S_EN; | 1361 | pm_ctrl_data |= PM_CTRL_ASPM_L0S_EN; |
1299 | 1362 | ||
1300 | if (hw->nic_type == athr_l2c_b || | 1363 | if (hw->nic_type == athr_l2c_b || hw->nic_type == athr_l1d || |
1301 | hw->nic_type == athr_l1d || | 1364 | hw->nic_type == athr_l2c_b2 || hw->nic_type == athr_l1d_2) { |
1302 | hw->nic_type == athr_l2c_b2) { | ||
1303 | if (hw->nic_type == athr_l2c_b) | 1365 | if (hw->nic_type == athr_l2c_b) |
1304 | if (!(hw->ctrl_flags & ATL1C_APS_MODE_ENABLE)) | 1366 | if (!(hw->ctrl_flags & ATL1C_APS_MODE_ENABLE)) |
1305 | pm_ctrl_data &= PM_CTRL_ASPM_L0S_EN; | 1367 | pm_ctrl_data &= ~PM_CTRL_ASPM_L0S_EN; |
1306 | pm_ctrl_data &= ~PM_CTRL_SERDES_L1_EN; | 1368 | pm_ctrl_data &= ~PM_CTRL_SERDES_L1_EN; |
1307 | pm_ctrl_data &= ~PM_CTRL_SERDES_PLL_L1_EN; | 1369 | pm_ctrl_data &= ~PM_CTRL_SERDES_PLL_L1_EN; |
1308 | pm_ctrl_data &= ~PM_CTRL_SERDES_BUDS_RX_L1_EN; | 1370 | pm_ctrl_data &= ~PM_CTRL_SERDES_BUDS_RX_L1_EN; |
1309 | pm_ctrl_data |= PM_CTRL_CLK_SWH_L1; | 1371 | pm_ctrl_data |= PM_CTRL_CLK_SWH_L1; |
1310 | if (hw->adapter->link_speed == SPEED_100 || | 1372 | if (hw->adapter->link_speed == SPEED_100 || |
1311 | hw->adapter->link_speed == SPEED_1000) { | 1373 | hw->adapter->link_speed == SPEED_1000) { |
1312 | pm_ctrl_data &= | 1374 | pm_ctrl_data &= ~(PM_CTRL_L1_ENTRY_TIMER_MASK << |
1313 | ~(PM_CTRL_L1_ENTRY_TIMER_MASK << | 1375 | PM_CTRL_L1_ENTRY_TIMER_SHIFT); |
1314 | PM_CTRL_L1_ENTRY_TIMER_SHIFT); | 1376 | if (hw->nic_type == athr_l2c_b) |
1315 | if (hw->nic_type == athr_l1d) | 1377 | link_l1_timer = 7; |
1316 | pm_ctrl_data |= 0xF << | 1378 | else if (hw->nic_type == athr_l2c_b2 || |
1317 | PM_CTRL_L1_ENTRY_TIMER_SHIFT; | 1379 | hw->nic_type == athr_l1d_2) |
1318 | else | 1380 | link_l1_timer = 4; |
1319 | pm_ctrl_data |= 7 << | 1381 | pm_ctrl_data |= link_l1_timer << |
1320 | PM_CTRL_L1_ENTRY_TIMER_SHIFT; | 1382 | PM_CTRL_L1_ENTRY_TIMER_SHIFT; |
1321 | } | 1383 | } |
1322 | } else { | 1384 | } else { |
1323 | pm_ctrl_data |= PM_CTRL_SERDES_L1_EN; | 1385 | pm_ctrl_data |= PM_CTRL_SERDES_L1_EN; |
@@ -1326,24 +1388,12 @@ static void atl1c_set_aspm(struct atl1c_hw *hw, bool linkup) | |||
1326 | pm_ctrl_data &= ~PM_CTRL_CLK_SWH_L1; | 1388 | pm_ctrl_data &= ~PM_CTRL_CLK_SWH_L1; |
1327 | pm_ctrl_data &= ~PM_CTRL_ASPM_L0S_EN; | 1389 | pm_ctrl_data &= ~PM_CTRL_ASPM_L0S_EN; |
1328 | pm_ctrl_data &= ~PM_CTRL_ASPM_L1_EN; | 1390 | pm_ctrl_data &= ~PM_CTRL_ASPM_L1_EN; |
1329 | } | ||
1330 | atl1c_write_phy_reg(hw, MII_DBG_ADDR, 0x29); | ||
1331 | if (hw->adapter->link_speed == SPEED_10) | ||
1332 | if (hw->nic_type == athr_l1d) | ||
1333 | atl1c_write_phy_reg(hw, MII_DBG_ADDR, 0xB69D); | ||
1334 | else | ||
1335 | atl1c_write_phy_reg(hw, MII_DBG_DATA, 0xB6DD); | ||
1336 | else if (hw->adapter->link_speed == SPEED_100) | ||
1337 | atl1c_write_phy_reg(hw, MII_DBG_DATA, 0xB2DD); | ||
1338 | else | ||
1339 | atl1c_write_phy_reg(hw, MII_DBG_DATA, 0x96DD); | ||
1340 | 1391 | ||
1392 | } | ||
1341 | } else { | 1393 | } else { |
1342 | pm_ctrl_data &= ~PM_CTRL_SERDES_BUDS_RX_L1_EN; | ||
1343 | pm_ctrl_data &= ~PM_CTRL_SERDES_L1_EN; | 1394 | pm_ctrl_data &= ~PM_CTRL_SERDES_L1_EN; |
1344 | pm_ctrl_data &= ~PM_CTRL_ASPM_L0S_EN; | 1395 | pm_ctrl_data &= ~PM_CTRL_ASPM_L0S_EN; |
1345 | pm_ctrl_data &= ~PM_CTRL_SERDES_PLL_L1_EN; | 1396 | pm_ctrl_data &= ~PM_CTRL_SERDES_PLL_L1_EN; |
1346 | |||
1347 | pm_ctrl_data |= PM_CTRL_CLK_SWH_L1; | 1397 | pm_ctrl_data |= PM_CTRL_CLK_SWH_L1; |
1348 | 1398 | ||
1349 | if (hw->ctrl_flags & ATL1C_ASPM_L1_SUPPORT) | 1399 | if (hw->ctrl_flags & ATL1C_ASPM_L1_SUPPORT) |
@@ -1351,8 +1401,9 @@ static void atl1c_set_aspm(struct atl1c_hw *hw, bool linkup) | |||
1351 | else | 1401 | else |
1352 | pm_ctrl_data &= ~PM_CTRL_ASPM_L1_EN; | 1402 | pm_ctrl_data &= ~PM_CTRL_ASPM_L1_EN; |
1353 | } | 1403 | } |
1354 | |||
1355 | AT_WRITE_REG(hw, REG_PM_CTRL, pm_ctrl_data); | 1404 | AT_WRITE_REG(hw, REG_PM_CTRL, pm_ctrl_data); |
1405 | |||
1406 | return; | ||
1356 | } | 1407 | } |
1357 | 1408 | ||
1358 | static void atl1c_setup_mac_ctrl(struct atl1c_adapter *adapter) | 1409 | static void atl1c_setup_mac_ctrl(struct atl1c_adapter *adapter) |
@@ -1391,7 +1442,8 @@ static void atl1c_setup_mac_ctrl(struct atl1c_adapter *adapter) | |||
1391 | mac_ctrl_data |= MAC_CTRL_MC_ALL_EN; | 1442 | mac_ctrl_data |= MAC_CTRL_MC_ALL_EN; |
1392 | 1443 | ||
1393 | mac_ctrl_data |= MAC_CTRL_SINGLE_PAUSE_EN; | 1444 | mac_ctrl_data |= MAC_CTRL_SINGLE_PAUSE_EN; |
1394 | if (hw->nic_type == athr_l1d || hw->nic_type == athr_l2c_b2) { | 1445 | if (hw->nic_type == athr_l1d || hw->nic_type == athr_l2c_b2 || |
1446 | hw->nic_type == athr_l1d_2) { | ||
1395 | mac_ctrl_data |= MAC_CTRL_SPEED_MODE_SW; | 1447 | mac_ctrl_data |= MAC_CTRL_SPEED_MODE_SW; |
1396 | mac_ctrl_data |= MAC_CTRL_HASH_ALG_CRC32; | 1448 | mac_ctrl_data |= MAC_CTRL_HASH_ALG_CRC32; |
1397 | } | 1449 | } |
@@ -1409,6 +1461,7 @@ static int atl1c_configure(struct atl1c_adapter *adapter) | |||
1409 | struct atl1c_hw *hw = &adapter->hw; | 1461 | struct atl1c_hw *hw = &adapter->hw; |
1410 | u32 master_ctrl_data = 0; | 1462 | u32 master_ctrl_data = 0; |
1411 | u32 intr_modrt_data; | 1463 | u32 intr_modrt_data; |
1464 | u32 data; | ||
1412 | 1465 | ||
1413 | /* clear interrupt status */ | 1466 | /* clear interrupt status */ |
1414 | AT_WRITE_REG(hw, REG_ISR, 0xFFFFFFFF); | 1467 | AT_WRITE_REG(hw, REG_ISR, 0xFFFFFFFF); |
@@ -1418,6 +1471,15 @@ static int atl1c_configure(struct atl1c_adapter *adapter) | |||
1418 | * HW will enable self to assert interrupt event to system after | 1471 | * HW will enable self to assert interrupt event to system after |
1419 | * waiting x-time for software to notify it accept interrupt. | 1472 | * waiting x-time for software to notify it accept interrupt. |
1420 | */ | 1473 | */ |
1474 | |||
1475 | data = CLK_GATING_EN_ALL; | ||
1476 | if (hw->ctrl_flags & ATL1C_CLK_GATING_EN) { | ||
1477 | if (hw->nic_type == athr_l2c_b) | ||
1478 | data &= ~CLK_GATING_RXMAC_EN; | ||
1479 | } else | ||
1480 | data = 0; | ||
1481 | AT_WRITE_REG(hw, REG_CLK_GATING_CTRL, data); | ||
1482 | |||
1421 | AT_WRITE_REG(hw, REG_INT_RETRIG_TIMER, | 1483 | AT_WRITE_REG(hw, REG_INT_RETRIG_TIMER, |
1422 | hw->ict & INT_RETRIG_TIMER_MASK); | 1484 | hw->ict & INT_RETRIG_TIMER_MASK); |
1423 | 1485 | ||
@@ -1436,6 +1498,7 @@ static int atl1c_configure(struct atl1c_adapter *adapter) | |||
1436 | if (hw->ctrl_flags & ATL1C_INTR_CLEAR_ON_READ) | 1498 | if (hw->ctrl_flags & ATL1C_INTR_CLEAR_ON_READ) |
1437 | master_ctrl_data |= MASTER_CTRL_INT_RDCLR; | 1499 | master_ctrl_data |= MASTER_CTRL_INT_RDCLR; |
1438 | 1500 | ||
1501 | master_ctrl_data |= MASTER_CTRL_SA_TIMER_EN; | ||
1439 | AT_WRITE_REG(hw, REG_MASTER_CTRL, master_ctrl_data); | 1502 | AT_WRITE_REG(hw, REG_MASTER_CTRL, master_ctrl_data); |
1440 | 1503 | ||
1441 | if (hw->ctrl_flags & ATL1C_CMB_ENABLE) { | 1504 | if (hw->ctrl_flags & ATL1C_CMB_ENABLE) { |
@@ -1624,11 +1687,9 @@ static irqreturn_t atl1c_intr(int irq, void *data) | |||
1624 | "atl1c hardware error (status = 0x%x)\n", | 1687 | "atl1c hardware error (status = 0x%x)\n", |
1625 | status & ISR_ERROR); | 1688 | status & ISR_ERROR); |
1626 | /* reset MAC */ | 1689 | /* reset MAC */ |
1627 | hw->intr_mask &= ~ISR_ERROR; | ||
1628 | AT_WRITE_REG(hw, REG_IMR, hw->intr_mask); | ||
1629 | adapter->work_event |= ATL1C_WORK_EVENT_RESET; | 1690 | adapter->work_event |= ATL1C_WORK_EVENT_RESET; |
1630 | schedule_work(&adapter->common_task); | 1691 | schedule_work(&adapter->common_task); |
1631 | break; | 1692 | return IRQ_HANDLED; |
1632 | } | 1693 | } |
1633 | 1694 | ||
1634 | if (status & ISR_OVER) | 1695 | if (status & ISR_OVER) |
@@ -2303,7 +2364,6 @@ void atl1c_down(struct atl1c_adapter *adapter) | |||
2303 | napi_disable(&adapter->napi); | 2364 | napi_disable(&adapter->napi); |
2304 | atl1c_irq_disable(adapter); | 2365 | atl1c_irq_disable(adapter); |
2305 | atl1c_free_irq(adapter); | 2366 | atl1c_free_irq(adapter); |
2306 | AT_WRITE_REG(&adapter->hw, REG_ISR, ISR_DIS_INT); | ||
2307 | /* reset MAC to disable all RX/TX */ | 2367 | /* reset MAC to disable all RX/TX */ |
2308 | atl1c_reset_mac(&adapter->hw); | 2368 | atl1c_reset_mac(&adapter->hw); |
2309 | msleep(1); | 2369 | msleep(1); |
@@ -2387,79 +2447,68 @@ static int atl1c_suspend(struct pci_dev *pdev, pm_message_t state) | |||
2387 | struct net_device *netdev = pci_get_drvdata(pdev); | 2447 | struct net_device *netdev = pci_get_drvdata(pdev); |
2388 | struct atl1c_adapter *adapter = netdev_priv(netdev); | 2448 | struct atl1c_adapter *adapter = netdev_priv(netdev); |
2389 | struct atl1c_hw *hw = &adapter->hw; | 2449 | struct atl1c_hw *hw = &adapter->hw; |
2390 | u32 ctrl; | 2450 | u32 mac_ctrl_data = 0; |
2391 | u32 mac_ctrl_data; | 2451 | u32 master_ctrl_data = 0; |
2392 | u32 master_ctrl_data; | ||
2393 | u32 wol_ctrl_data = 0; | 2452 | u32 wol_ctrl_data = 0; |
2394 | u16 mii_bmsr_data; | 2453 | u16 mii_intr_status_data = 0; |
2395 | u16 save_autoneg_advertised; | ||
2396 | u16 mii_intr_status_data; | ||
2397 | u32 wufc = adapter->wol; | 2454 | u32 wufc = adapter->wol; |
2398 | u32 i; | ||
2399 | int retval = 0; | 2455 | int retval = 0; |
2400 | 2456 | ||
2457 | atl1c_disable_l0s_l1(hw); | ||
2401 | if (netif_running(netdev)) { | 2458 | if (netif_running(netdev)) { |
2402 | WARN_ON(test_bit(__AT_RESETTING, &adapter->flags)); | 2459 | WARN_ON(test_bit(__AT_RESETTING, &adapter->flags)); |
2403 | atl1c_down(adapter); | 2460 | atl1c_down(adapter); |
2404 | } | 2461 | } |
2405 | netif_device_detach(netdev); | 2462 | netif_device_detach(netdev); |
2406 | atl1c_disable_l0s_l1(hw); | ||
2407 | retval = pci_save_state(pdev); | 2463 | retval = pci_save_state(pdev); |
2408 | if (retval) | 2464 | if (retval) |
2409 | return retval; | 2465 | return retval; |
2466 | |||
2467 | if (wufc) | ||
2468 | if (atl1c_phy_power_saving(hw) != 0) | ||
2469 | dev_dbg(&pdev->dev, "phy power saving failed"); | ||
2470 | |||
2471 | AT_READ_REG(hw, REG_MASTER_CTRL, &master_ctrl_data); | ||
2472 | AT_READ_REG(hw, REG_MAC_CTRL, &mac_ctrl_data); | ||
2473 | |||
2474 | master_ctrl_data &= ~MASTER_CTRL_CLK_SEL_DIS; | ||
2475 | mac_ctrl_data &= ~(MAC_CTRL_PRMLEN_MASK << MAC_CTRL_PRMLEN_SHIFT); | ||
2476 | mac_ctrl_data |= (((u32)adapter->hw.preamble_len & | ||
2477 | MAC_CTRL_PRMLEN_MASK) << | ||
2478 | MAC_CTRL_PRMLEN_SHIFT); | ||
2479 | mac_ctrl_data &= ~(MAC_CTRL_SPEED_MASK << MAC_CTRL_SPEED_SHIFT); | ||
2480 | mac_ctrl_data &= ~MAC_CTRL_DUPLX; | ||
2481 | |||
2410 | if (wufc) { | 2482 | if (wufc) { |
2411 | AT_READ_REG(hw, REG_MASTER_CTRL, &master_ctrl_data); | 2483 | mac_ctrl_data |= MAC_CTRL_RX_EN; |
2412 | master_ctrl_data &= ~MASTER_CTRL_CLK_SEL_DIS; | 2484 | if (adapter->link_speed == SPEED_1000 || |
2413 | 2485 | adapter->link_speed == SPEED_0) { | |
2414 | /* get link status */ | 2486 | mac_ctrl_data |= atl1c_mac_speed_1000 << |
2415 | atl1c_read_phy_reg(hw, MII_BMSR, (u16 *)&mii_bmsr_data); | 2487 | MAC_CTRL_SPEED_SHIFT; |
2416 | atl1c_read_phy_reg(hw, MII_BMSR, (u16 *)&mii_bmsr_data); | 2488 | mac_ctrl_data |= MAC_CTRL_DUPLX; |
2417 | save_autoneg_advertised = hw->autoneg_advertised; | 2489 | } else |
2418 | hw->autoneg_advertised = ADVERTISED_10baseT_Half; | 2490 | mac_ctrl_data |= atl1c_mac_speed_10_100 << |
2419 | if (atl1c_restart_autoneg(hw) != 0) | 2491 | MAC_CTRL_SPEED_SHIFT; |
2420 | if (netif_msg_link(adapter)) | 2492 | |
2421 | dev_warn(&pdev->dev, "phy autoneg failed\n"); | 2493 | if (adapter->link_duplex == DUPLEX_FULL) |
2422 | hw->phy_configured = false; /* re-init PHY when resume */ | 2494 | mac_ctrl_data |= MAC_CTRL_DUPLX; |
2423 | hw->autoneg_advertised = save_autoneg_advertised; | 2495 | |
2424 | /* turn on magic packet wol */ | 2496 | /* turn on magic packet wol */ |
2425 | if (wufc & AT_WUFC_MAG) | 2497 | if (wufc & AT_WUFC_MAG) |
2426 | wol_ctrl_data = WOL_MAGIC_EN | WOL_MAGIC_PME_EN; | 2498 | wol_ctrl_data |= WOL_MAGIC_EN | WOL_MAGIC_PME_EN; |
2427 | 2499 | ||
2428 | if (wufc & AT_WUFC_LNKC) { | 2500 | if (wufc & AT_WUFC_LNKC) { |
2429 | for (i = 0; i < AT_SUSPEND_LINK_TIMEOUT; i++) { | ||
2430 | msleep(100); | ||
2431 | atl1c_read_phy_reg(hw, MII_BMSR, | ||
2432 | (u16 *)&mii_bmsr_data); | ||
2433 | if (mii_bmsr_data & BMSR_LSTATUS) | ||
2434 | break; | ||
2435 | } | ||
2436 | if ((mii_bmsr_data & BMSR_LSTATUS) == 0) | ||
2437 | if (netif_msg_link(adapter)) | ||
2438 | dev_warn(&pdev->dev, | ||
2439 | "%s: Link may change" | ||
2440 | "when suspend\n", | ||
2441 | atl1c_driver_name); | ||
2442 | wol_ctrl_data |= WOL_LINK_CHG_EN | WOL_LINK_CHG_PME_EN; | 2501 | wol_ctrl_data |= WOL_LINK_CHG_EN | WOL_LINK_CHG_PME_EN; |
2443 | /* only link up can wake up */ | 2502 | /* only link up can wake up */ |
2444 | if (atl1c_write_phy_reg(hw, MII_IER, IER_LINK_UP) != 0) { | 2503 | if (atl1c_write_phy_reg(hw, MII_IER, IER_LINK_UP) != 0) { |
2445 | if (netif_msg_link(adapter)) | 2504 | dev_dbg(&pdev->dev, "%s: read write phy " |
2446 | dev_err(&pdev->dev, | 2505 | "register failed.\n", |
2447 | "%s: read write phy " | 2506 | atl1c_driver_name); |
2448 | "register failed.\n", | ||
2449 | atl1c_driver_name); | ||
2450 | goto wol_dis; | ||
2451 | } | 2507 | } |
2452 | } | 2508 | } |
2453 | /* clear phy interrupt */ | 2509 | /* clear phy interrupt */ |
2454 | atl1c_read_phy_reg(hw, MII_ISR, &mii_intr_status_data); | 2510 | atl1c_read_phy_reg(hw, MII_ISR, &mii_intr_status_data); |
2455 | /* Config MAC Ctrl register */ | 2511 | /* Config MAC Ctrl register */ |
2456 | mac_ctrl_data = MAC_CTRL_RX_EN; | ||
2457 | /* set to 10/100M halt duplex */ | ||
2458 | mac_ctrl_data |= atl1c_mac_speed_10_100 << MAC_CTRL_SPEED_SHIFT; | ||
2459 | mac_ctrl_data |= (((u32)adapter->hw.preamble_len & | ||
2460 | MAC_CTRL_PRMLEN_MASK) << | ||
2461 | MAC_CTRL_PRMLEN_SHIFT); | ||
2462 | |||
2463 | if (adapter->vlgrp) | 2512 | if (adapter->vlgrp) |
2464 | mac_ctrl_data |= MAC_CTRL_RMV_VLAN; | 2513 | mac_ctrl_data |= MAC_CTRL_RMV_VLAN; |
2465 | 2514 | ||
@@ -2467,37 +2516,30 @@ static int atl1c_suspend(struct pci_dev *pdev, pm_message_t state) | |||
2467 | if (wufc & AT_WUFC_MAG) | 2516 | if (wufc & AT_WUFC_MAG) |
2468 | mac_ctrl_data |= MAC_CTRL_BC_EN; | 2517 | mac_ctrl_data |= MAC_CTRL_BC_EN; |
2469 | 2518 | ||
2470 | if (netif_msg_hw(adapter)) | 2519 | dev_dbg(&pdev->dev, |
2471 | dev_dbg(&pdev->dev, | 2520 | "%s: suspend MAC=0x%x\n", |
2472 | "%s: suspend MAC=0x%x\n", | 2521 | atl1c_driver_name, mac_ctrl_data); |
2473 | atl1c_driver_name, mac_ctrl_data); | ||
2474 | AT_WRITE_REG(hw, REG_MASTER_CTRL, master_ctrl_data); | 2522 | AT_WRITE_REG(hw, REG_MASTER_CTRL, master_ctrl_data); |
2475 | AT_WRITE_REG(hw, REG_WOL_CTRL, wol_ctrl_data); | 2523 | AT_WRITE_REG(hw, REG_WOL_CTRL, wol_ctrl_data); |
2476 | AT_WRITE_REG(hw, REG_MAC_CTRL, mac_ctrl_data); | 2524 | AT_WRITE_REG(hw, REG_MAC_CTRL, mac_ctrl_data); |
2477 | 2525 | ||
2478 | /* pcie patch */ | 2526 | /* pcie patch */ |
2479 | AT_READ_REG(hw, REG_PCIE_PHYMISC, &ctrl); | 2527 | device_set_wakeup_enable(&pdev->dev, 1); |
2480 | ctrl |= PCIE_PHYMISC_FORCE_RCV_DET; | ||
2481 | AT_WRITE_REG(hw, REG_PCIE_PHYMISC, ctrl); | ||
2482 | 2528 | ||
2483 | pci_enable_wake(pdev, pci_choose_state(pdev, state), 1); | 2529 | AT_WRITE_REG(hw, REG_GPHY_CTRL, GPHY_CTRL_DEFAULT | |
2484 | goto suspend_exit; | 2530 | GPHY_CTRL_EXT_RESET); |
2531 | pci_prepare_to_sleep(pdev); | ||
2532 | } else { | ||
2533 | AT_WRITE_REG(hw, REG_GPHY_CTRL, GPHY_CTRL_POWER_SAVING); | ||
2534 | master_ctrl_data |= MASTER_CTRL_CLK_SEL_DIS; | ||
2535 | mac_ctrl_data |= atl1c_mac_speed_10_100 << MAC_CTRL_SPEED_SHIFT; | ||
2536 | mac_ctrl_data |= MAC_CTRL_DUPLX; | ||
2537 | AT_WRITE_REG(hw, REG_MASTER_CTRL, master_ctrl_data); | ||
2538 | AT_WRITE_REG(hw, REG_MAC_CTRL, mac_ctrl_data); | ||
2539 | AT_WRITE_REG(hw, REG_WOL_CTRL, 0); | ||
2540 | hw->phy_configured = false; /* re-init PHY when resume */ | ||
2541 | pci_enable_wake(pdev, pci_choose_state(pdev, state), 0); | ||
2485 | } | 2542 | } |
2486 | wol_dis: | ||
2487 | |||
2488 | /* WOL disabled */ | ||
2489 | AT_WRITE_REG(hw, REG_WOL_CTRL, 0); | ||
2490 | |||
2491 | /* pcie patch */ | ||
2492 | AT_READ_REG(hw, REG_PCIE_PHYMISC, &ctrl); | ||
2493 | ctrl |= PCIE_PHYMISC_FORCE_RCV_DET; | ||
2494 | AT_WRITE_REG(hw, REG_PCIE_PHYMISC, ctrl); | ||
2495 | |||
2496 | atl1c_phy_disable(hw); | ||
2497 | hw->phy_configured = false; /* re-init PHY when resume */ | ||
2498 | |||
2499 | pci_enable_wake(pdev, pci_choose_state(pdev, state), 0); | ||
2500 | suspend_exit: | ||
2501 | 2543 | ||
2502 | pci_disable_device(pdev); | 2544 | pci_disable_device(pdev); |
2503 | pci_set_power_state(pdev, pci_choose_state(pdev, state)); | 2545 | pci_set_power_state(pdev, pci_choose_state(pdev, state)); |
@@ -2516,9 +2558,19 @@ static int atl1c_resume(struct pci_dev *pdev) | |||
2516 | pci_enable_wake(pdev, PCI_D3cold, 0); | 2558 | pci_enable_wake(pdev, PCI_D3cold, 0); |
2517 | 2559 | ||
2518 | AT_WRITE_REG(&adapter->hw, REG_WOL_CTRL, 0); | 2560 | AT_WRITE_REG(&adapter->hw, REG_WOL_CTRL, 0); |
2561 | atl1c_reset_pcie(&adapter->hw, ATL1C_PCIE_L0S_L1_DISABLE | | ||
2562 | ATL1C_PCIE_PHY_RESET); | ||
2519 | 2563 | ||
2520 | atl1c_phy_reset(&adapter->hw); | 2564 | atl1c_phy_reset(&adapter->hw); |
2521 | atl1c_reset_mac(&adapter->hw); | 2565 | atl1c_reset_mac(&adapter->hw); |
2566 | atl1c_phy_init(&adapter->hw); | ||
2567 | |||
2568 | #if 0 | ||
2569 | AT_READ_REG(&adapter->hw, REG_PM_CTRLSTAT, &pm_data); | ||
2570 | pm_data &= ~PM_CTRLSTAT_PME_EN; | ||
2571 | AT_WRITE_REG(&adapter->hw, REG_PM_CTRLSTAT, pm_data); | ||
2572 | #endif | ||
2573 | |||
2522 | netif_device_attach(netdev); | 2574 | netif_device_attach(netdev); |
2523 | if (netif_running(netdev)) | 2575 | if (netif_running(netdev)) |
2524 | atl1c_up(adapter); | 2576 | atl1c_up(adapter); |
diff --git a/drivers/net/atlx/atl1.h b/drivers/net/atlx/atl1.h index 146372fd6683..9c0ddb273ac8 100644 --- a/drivers/net/atlx/atl1.h +++ b/drivers/net/atlx/atl1.h | |||
@@ -436,8 +436,8 @@ struct rx_free_desc { | |||
436 | __le16 buf_len; /* Size of the receive buffer in host memory */ | 436 | __le16 buf_len; /* Size of the receive buffer in host memory */ |
437 | u16 coalese; /* Update consumer index to host after the | 437 | u16 coalese; /* Update consumer index to host after the |
438 | * reception of this frame */ | 438 | * reception of this frame */ |
439 | /* __attribute__ ((packed)) is required */ | 439 | /* __packed is required */ |
440 | } __attribute__ ((packed)); | 440 | } __packed; |
441 | 441 | ||
442 | /* | 442 | /* |
443 | * The L1 transmit packet descriptor is comprised of four 32-bit words. | 443 | * The L1 transmit packet descriptor is comprised of four 32-bit words. |
diff --git a/drivers/net/benet/be_cmds.c b/drivers/net/benet/be_cmds.c index b9ad799c719f..ee1ad9693c8f 100644 --- a/drivers/net/benet/be_cmds.c +++ b/drivers/net/benet/be_cmds.c | |||
@@ -186,7 +186,7 @@ static int be_mcc_notify_wait(struct be_adapter *adapter) | |||
186 | 186 | ||
187 | static int be_mbox_db_ready_wait(struct be_adapter *adapter, void __iomem *db) | 187 | static int be_mbox_db_ready_wait(struct be_adapter *adapter, void __iomem *db) |
188 | { | 188 | { |
189 | int cnt = 0, wait = 5; | 189 | int msecs = 0; |
190 | u32 ready; | 190 | u32 ready; |
191 | 191 | ||
192 | do { | 192 | do { |
@@ -201,15 +201,14 @@ static int be_mbox_db_ready_wait(struct be_adapter *adapter, void __iomem *db) | |||
201 | if (ready) | 201 | if (ready) |
202 | break; | 202 | break; |
203 | 203 | ||
204 | if (cnt > 4000000) { | 204 | if (msecs > 4000) { |
205 | dev_err(&adapter->pdev->dev, "mbox poll timed out\n"); | 205 | dev_err(&adapter->pdev->dev, "mbox poll timed out\n"); |
206 | return -1; | 206 | return -1; |
207 | } | 207 | } |
208 | 208 | ||
209 | if (cnt > 50) | 209 | set_current_state(TASK_INTERRUPTIBLE); |
210 | wait = 200; | 210 | schedule_timeout(msecs_to_jiffies(1)); |
211 | cnt += wait; | 211 | msecs++; |
212 | udelay(wait); | ||
213 | } while (true); | 212 | } while (true); |
214 | 213 | ||
215 | return 0; | 214 | return 0; |
diff --git a/drivers/net/benet/be_main.c b/drivers/net/benet/be_main.c index 54b14272f333..322577469852 100644 --- a/drivers/net/benet/be_main.c +++ b/drivers/net/benet/be_main.c | |||
@@ -1735,6 +1735,44 @@ done: | |||
1735 | adapter->isr_registered = false; | 1735 | adapter->isr_registered = false; |
1736 | } | 1736 | } |
1737 | 1737 | ||
1738 | static int be_close(struct net_device *netdev) | ||
1739 | { | ||
1740 | struct be_adapter *adapter = netdev_priv(netdev); | ||
1741 | struct be_eq_obj *rx_eq = &adapter->rx_eq; | ||
1742 | struct be_eq_obj *tx_eq = &adapter->tx_eq; | ||
1743 | int vec; | ||
1744 | |||
1745 | cancel_delayed_work_sync(&adapter->work); | ||
1746 | |||
1747 | be_async_mcc_disable(adapter); | ||
1748 | |||
1749 | netif_stop_queue(netdev); | ||
1750 | netif_carrier_off(netdev); | ||
1751 | adapter->link_up = false; | ||
1752 | |||
1753 | be_intr_set(adapter, false); | ||
1754 | |||
1755 | if (adapter->msix_enabled) { | ||
1756 | vec = be_msix_vec_get(adapter, tx_eq->q.id); | ||
1757 | synchronize_irq(vec); | ||
1758 | vec = be_msix_vec_get(adapter, rx_eq->q.id); | ||
1759 | synchronize_irq(vec); | ||
1760 | } else { | ||
1761 | synchronize_irq(netdev->irq); | ||
1762 | } | ||
1763 | be_irq_unregister(adapter); | ||
1764 | |||
1765 | napi_disable(&rx_eq->napi); | ||
1766 | napi_disable(&tx_eq->napi); | ||
1767 | |||
1768 | /* Wait for all pending tx completions to arrive so that | ||
1769 | * all tx skbs are freed. | ||
1770 | */ | ||
1771 | be_tx_compl_clean(adapter); | ||
1772 | |||
1773 | return 0; | ||
1774 | } | ||
1775 | |||
1738 | static int be_open(struct net_device *netdev) | 1776 | static int be_open(struct net_device *netdev) |
1739 | { | 1777 | { |
1740 | struct be_adapter *adapter = netdev_priv(netdev); | 1778 | struct be_adapter *adapter = netdev_priv(netdev); |
@@ -1765,27 +1803,29 @@ static int be_open(struct net_device *netdev) | |||
1765 | /* Now that interrupts are on we can process async mcc */ | 1803 | /* Now that interrupts are on we can process async mcc */ |
1766 | be_async_mcc_enable(adapter); | 1804 | be_async_mcc_enable(adapter); |
1767 | 1805 | ||
1806 | schedule_delayed_work(&adapter->work, msecs_to_jiffies(100)); | ||
1807 | |||
1768 | status = be_cmd_link_status_query(adapter, &link_up, &mac_speed, | 1808 | status = be_cmd_link_status_query(adapter, &link_up, &mac_speed, |
1769 | &link_speed); | 1809 | &link_speed); |
1770 | if (status) | 1810 | if (status) |
1771 | goto ret_sts; | 1811 | goto err; |
1772 | be_link_status_update(adapter, link_up); | 1812 | be_link_status_update(adapter, link_up); |
1773 | 1813 | ||
1774 | if (be_physfn(adapter)) | 1814 | if (be_physfn(adapter)) { |
1775 | status = be_vid_config(adapter); | 1815 | status = be_vid_config(adapter); |
1776 | if (status) | 1816 | if (status) |
1777 | goto ret_sts; | 1817 | goto err; |
1778 | 1818 | ||
1779 | if (be_physfn(adapter)) { | ||
1780 | status = be_cmd_set_flow_control(adapter, | 1819 | status = be_cmd_set_flow_control(adapter, |
1781 | adapter->tx_fc, adapter->rx_fc); | 1820 | adapter->tx_fc, adapter->rx_fc); |
1782 | if (status) | 1821 | if (status) |
1783 | goto ret_sts; | 1822 | goto err; |
1784 | } | 1823 | } |
1785 | 1824 | ||
1786 | schedule_delayed_work(&adapter->work, msecs_to_jiffies(100)); | 1825 | return 0; |
1787 | ret_sts: | 1826 | err: |
1788 | return status; | 1827 | be_close(adapter->netdev); |
1828 | return -EIO; | ||
1789 | } | 1829 | } |
1790 | 1830 | ||
1791 | static int be_setup_wol(struct be_adapter *adapter, bool enable) | 1831 | static int be_setup_wol(struct be_adapter *adapter, bool enable) |
@@ -1913,43 +1953,6 @@ static int be_clear(struct be_adapter *adapter) | |||
1913 | return 0; | 1953 | return 0; |
1914 | } | 1954 | } |
1915 | 1955 | ||
1916 | static int be_close(struct net_device *netdev) | ||
1917 | { | ||
1918 | struct be_adapter *adapter = netdev_priv(netdev); | ||
1919 | struct be_eq_obj *rx_eq = &adapter->rx_eq; | ||
1920 | struct be_eq_obj *tx_eq = &adapter->tx_eq; | ||
1921 | int vec; | ||
1922 | |||
1923 | cancel_delayed_work_sync(&adapter->work); | ||
1924 | |||
1925 | be_async_mcc_disable(adapter); | ||
1926 | |||
1927 | netif_stop_queue(netdev); | ||
1928 | netif_carrier_off(netdev); | ||
1929 | adapter->link_up = false; | ||
1930 | |||
1931 | be_intr_set(adapter, false); | ||
1932 | |||
1933 | if (adapter->msix_enabled) { | ||
1934 | vec = be_msix_vec_get(adapter, tx_eq->q.id); | ||
1935 | synchronize_irq(vec); | ||
1936 | vec = be_msix_vec_get(adapter, rx_eq->q.id); | ||
1937 | synchronize_irq(vec); | ||
1938 | } else { | ||
1939 | synchronize_irq(netdev->irq); | ||
1940 | } | ||
1941 | be_irq_unregister(adapter); | ||
1942 | |||
1943 | napi_disable(&rx_eq->napi); | ||
1944 | napi_disable(&tx_eq->napi); | ||
1945 | |||
1946 | /* Wait for all pending tx completions to arrive so that | ||
1947 | * all tx skbs are freed. | ||
1948 | */ | ||
1949 | be_tx_compl_clean(adapter); | ||
1950 | |||
1951 | return 0; | ||
1952 | } | ||
1953 | 1956 | ||
1954 | #define FW_FILE_HDR_SIGN "ServerEngines Corp. " | 1957 | #define FW_FILE_HDR_SIGN "ServerEngines Corp. " |
1955 | char flash_cookie[2][16] = {"*** SE FLAS", | 1958 | char flash_cookie[2][16] = {"*** SE FLAS", |
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c index 40fdc41446cc..25c14c6236f5 100644 --- a/drivers/net/bonding/bond_alb.c +++ b/drivers/net/bonding/bond_alb.c | |||
@@ -233,34 +233,27 @@ static void tlb_deinitialize(struct bonding *bond) | |||
233 | _unlock_tx_hashtbl(bond); | 233 | _unlock_tx_hashtbl(bond); |
234 | } | 234 | } |
235 | 235 | ||
236 | static long long compute_gap(struct slave *slave) | ||
237 | { | ||
238 | return (s64) (slave->speed << 20) - /* Convert to Megabit per sec */ | ||
239 | (s64) (SLAVE_TLB_INFO(slave).load << 3); /* Bytes to bits */ | ||
240 | } | ||
241 | |||
236 | /* Caller must hold bond lock for read */ | 242 | /* Caller must hold bond lock for read */ |
237 | static struct slave *tlb_get_least_loaded_slave(struct bonding *bond) | 243 | static struct slave *tlb_get_least_loaded_slave(struct bonding *bond) |
238 | { | 244 | { |
239 | struct slave *slave, *least_loaded; | 245 | struct slave *slave, *least_loaded; |
240 | s64 max_gap; | 246 | long long max_gap; |
241 | int i, found = 0; | 247 | int i; |
242 | |||
243 | /* Find the first enabled slave */ | ||
244 | bond_for_each_slave(bond, slave, i) { | ||
245 | if (SLAVE_IS_OK(slave)) { | ||
246 | found = 1; | ||
247 | break; | ||
248 | } | ||
249 | } | ||
250 | |||
251 | if (!found) { | ||
252 | return NULL; | ||
253 | } | ||
254 | 248 | ||
255 | least_loaded = slave; | 249 | least_loaded = NULL; |
256 | max_gap = (s64)(slave->speed << 20) - /* Convert to Megabit per sec */ | 250 | max_gap = LLONG_MIN; |
257 | (s64)(SLAVE_TLB_INFO(slave).load << 3); /* Bytes to bits */ | ||
258 | 251 | ||
259 | /* Find the slave with the largest gap */ | 252 | /* Find the slave with the largest gap */ |
260 | bond_for_each_slave_from(bond, slave, i, least_loaded) { | 253 | bond_for_each_slave(bond, slave, i) { |
261 | if (SLAVE_IS_OK(slave)) { | 254 | if (SLAVE_IS_OK(slave)) { |
262 | s64 gap = (s64)(slave->speed << 20) - | 255 | long long gap = compute_gap(slave); |
263 | (s64)(SLAVE_TLB_INFO(slave).load << 3); | 256 | |
264 | if (max_gap < gap) { | 257 | if (max_gap < gap) { |
265 | least_loaded = slave; | 258 | least_loaded = slave; |
266 | max_gap = gap; | 259 | max_gap = gap; |
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 5e12462a9d5e..1b19276cff12 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c | |||
@@ -90,6 +90,7 @@ | |||
90 | #define BOND_LINK_ARP_INTERV 0 | 90 | #define BOND_LINK_ARP_INTERV 0 |
91 | 91 | ||
92 | static int max_bonds = BOND_DEFAULT_MAX_BONDS; | 92 | static int max_bonds = BOND_DEFAULT_MAX_BONDS; |
93 | static int tx_queues = BOND_DEFAULT_TX_QUEUES; | ||
93 | static int num_grat_arp = 1; | 94 | static int num_grat_arp = 1; |
94 | static int num_unsol_na = 1; | 95 | static int num_unsol_na = 1; |
95 | static int miimon = BOND_LINK_MON_INTERV; | 96 | static int miimon = BOND_LINK_MON_INTERV; |
@@ -106,10 +107,13 @@ static int arp_interval = BOND_LINK_ARP_INTERV; | |||
106 | static char *arp_ip_target[BOND_MAX_ARP_TARGETS]; | 107 | static char *arp_ip_target[BOND_MAX_ARP_TARGETS]; |
107 | static char *arp_validate; | 108 | static char *arp_validate; |
108 | static char *fail_over_mac; | 109 | static char *fail_over_mac; |
110 | static int all_slaves_active = 0; | ||
109 | static struct bond_params bonding_defaults; | 111 | static struct bond_params bonding_defaults; |
110 | 112 | ||
111 | module_param(max_bonds, int, 0); | 113 | module_param(max_bonds, int, 0); |
112 | MODULE_PARM_DESC(max_bonds, "Max number of bonded devices"); | 114 | MODULE_PARM_DESC(max_bonds, "Max number of bonded devices"); |
115 | module_param(tx_queues, int, 0); | ||
116 | MODULE_PARM_DESC(tx_queues, "Max number of transmit queues (default = 16)"); | ||
113 | module_param(num_grat_arp, int, 0644); | 117 | module_param(num_grat_arp, int, 0644); |
114 | MODULE_PARM_DESC(num_grat_arp, "Number of gratuitous ARP packets to send on failover event"); | 118 | MODULE_PARM_DESC(num_grat_arp, "Number of gratuitous ARP packets to send on failover event"); |
115 | module_param(num_unsol_na, int, 0644); | 119 | module_param(num_unsol_na, int, 0644); |
@@ -155,6 +159,10 @@ module_param(arp_validate, charp, 0); | |||
155 | MODULE_PARM_DESC(arp_validate, "validate src/dst of ARP probes: none (default), active, backup or all"); | 159 | MODULE_PARM_DESC(arp_validate, "validate src/dst of ARP probes: none (default), active, backup or all"); |
156 | module_param(fail_over_mac, charp, 0); | 160 | module_param(fail_over_mac, charp, 0); |
157 | MODULE_PARM_DESC(fail_over_mac, "For active-backup, do not set all slaves to the same MAC. none (default), active or follow"); | 161 | MODULE_PARM_DESC(fail_over_mac, "For active-backup, do not set all slaves to the same MAC. none (default), active or follow"); |
162 | module_param(all_slaves_active, int, 0); | ||
163 | MODULE_PARM_DESC(all_slaves_active, "Keep all frames received on an interface" | ||
164 | "by setting active flag for all slaves. " | ||
165 | "0 for never (default), 1 for always."); | ||
158 | 166 | ||
159 | /*----------------------------- Global variables ----------------------------*/ | 167 | /*----------------------------- Global variables ----------------------------*/ |
160 | 168 | ||
@@ -1522,16 +1530,32 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) | |||
1522 | } | 1530 | } |
1523 | } | 1531 | } |
1524 | 1532 | ||
1533 | /* If this is the first slave, then we need to set the master's hardware | ||
1534 | * address to be the same as the slave's. */ | ||
1535 | if (bond->slave_cnt == 0) | ||
1536 | memcpy(bond->dev->dev_addr, slave_dev->dev_addr, | ||
1537 | slave_dev->addr_len); | ||
1538 | |||
1539 | |||
1525 | new_slave = kzalloc(sizeof(struct slave), GFP_KERNEL); | 1540 | new_slave = kzalloc(sizeof(struct slave), GFP_KERNEL); |
1526 | if (!new_slave) { | 1541 | if (!new_slave) { |
1527 | res = -ENOMEM; | 1542 | res = -ENOMEM; |
1528 | goto err_undo_flags; | 1543 | goto err_undo_flags; |
1529 | } | 1544 | } |
1530 | 1545 | ||
1531 | /* save slave's original flags before calling | 1546 | /* |
1532 | * netdev_set_master and dev_open | 1547 | * Set the new_slave's queue_id to be zero. Queue ID mapping |
1548 | * is set via sysfs or module option if desired. | ||
1533 | */ | 1549 | */ |
1534 | new_slave->original_flags = slave_dev->flags; | 1550 | new_slave->queue_id = 0; |
1551 | |||
1552 | /* Save slave's original mtu and then set it to match the bond */ | ||
1553 | new_slave->original_mtu = slave_dev->mtu; | ||
1554 | res = dev_set_mtu(slave_dev, bond->dev->mtu); | ||
1555 | if (res) { | ||
1556 | pr_debug("Error %d calling dev_set_mtu\n", res); | ||
1557 | goto err_free; | ||
1558 | } | ||
1535 | 1559 | ||
1536 | /* | 1560 | /* |
1537 | * Save slave's original ("permanent") mac address for modes | 1561 | * Save slave's original ("permanent") mac address for modes |
@@ -1550,7 +1574,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) | |||
1550 | res = dev_set_mac_address(slave_dev, &addr); | 1574 | res = dev_set_mac_address(slave_dev, &addr); |
1551 | if (res) { | 1575 | if (res) { |
1552 | pr_debug("Error %d calling set_mac_address\n", res); | 1576 | pr_debug("Error %d calling set_mac_address\n", res); |
1553 | goto err_free; | 1577 | goto err_restore_mtu; |
1554 | } | 1578 | } |
1555 | } | 1579 | } |
1556 | 1580 | ||
@@ -1785,6 +1809,9 @@ err_restore_mac: | |||
1785 | dev_set_mac_address(slave_dev, &addr); | 1809 | dev_set_mac_address(slave_dev, &addr); |
1786 | } | 1810 | } |
1787 | 1811 | ||
1812 | err_restore_mtu: | ||
1813 | dev_set_mtu(slave_dev, new_slave->original_mtu); | ||
1814 | |||
1788 | err_free: | 1815 | err_free: |
1789 | kfree(new_slave); | 1816 | kfree(new_slave); |
1790 | 1817 | ||
@@ -1969,6 +1996,8 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev) | |||
1969 | dev_set_mac_address(slave_dev, &addr); | 1996 | dev_set_mac_address(slave_dev, &addr); |
1970 | } | 1997 | } |
1971 | 1998 | ||
1999 | dev_set_mtu(slave_dev, slave->original_mtu); | ||
2000 | |||
1972 | slave_dev->priv_flags &= ~(IFF_MASTER_8023AD | IFF_MASTER_ALB | | 2001 | slave_dev->priv_flags &= ~(IFF_MASTER_8023AD | IFF_MASTER_ALB | |
1973 | IFF_SLAVE_INACTIVE | IFF_BONDING | | 2002 | IFF_SLAVE_INACTIVE | IFF_BONDING | |
1974 | IFF_SLAVE_NEEDARP); | 2003 | IFF_SLAVE_NEEDARP); |
@@ -3265,6 +3294,7 @@ static void bond_info_show_slave(struct seq_file *seq, | |||
3265 | else | 3294 | else |
3266 | seq_puts(seq, "Aggregator ID: N/A\n"); | 3295 | seq_puts(seq, "Aggregator ID: N/A\n"); |
3267 | } | 3296 | } |
3297 | seq_printf(seq, "Slave queue ID: %d\n", slave->queue_id); | ||
3268 | } | 3298 | } |
3269 | 3299 | ||
3270 | static int bond_info_seq_show(struct seq_file *seq, void *v) | 3300 | static int bond_info_seq_show(struct seq_file *seq, void *v) |
@@ -4401,9 +4431,59 @@ static void bond_set_xmit_hash_policy(struct bonding *bond) | |||
4401 | } | 4431 | } |
4402 | } | 4432 | } |
4403 | 4433 | ||
4434 | /* | ||
4435 | * Lookup the slave that corresponds to a qid | ||
4436 | */ | ||
4437 | static inline int bond_slave_override(struct bonding *bond, | ||
4438 | struct sk_buff *skb) | ||
4439 | { | ||
4440 | int i, res = 1; | ||
4441 | struct slave *slave = NULL; | ||
4442 | struct slave *check_slave; | ||
4443 | |||
4444 | read_lock(&bond->lock); | ||
4445 | |||
4446 | if (!BOND_IS_OK(bond) || !skb->queue_mapping) | ||
4447 | goto out; | ||
4448 | |||
4449 | /* Find out if any slaves have the same mapping as this skb. */ | ||
4450 | bond_for_each_slave(bond, check_slave, i) { | ||
4451 | if (check_slave->queue_id == skb->queue_mapping) { | ||
4452 | slave = check_slave; | ||
4453 | break; | ||
4454 | } | ||
4455 | } | ||
4456 | |||
4457 | /* If the slave isn't UP, use default transmit policy. */ | ||
4458 | if (slave && slave->queue_id && IS_UP(slave->dev) && | ||
4459 | (slave->link == BOND_LINK_UP)) { | ||
4460 | res = bond_dev_queue_xmit(bond, skb, slave->dev); | ||
4461 | } | ||
4462 | |||
4463 | out: | ||
4464 | read_unlock(&bond->lock); | ||
4465 | return res; | ||
4466 | } | ||
4467 | |||
4468 | static u16 bond_select_queue(struct net_device *dev, struct sk_buff *skb) | ||
4469 | { | ||
4470 | /* | ||
4471 | * This helper function exists to help dev_pick_tx get the correct | ||
4472 | * destination queue. Using a helper function skips the a call to | ||
4473 | * skb_tx_hash and will put the skbs in the queue we expect on their | ||
4474 | * way down to the bonding driver. | ||
4475 | */ | ||
4476 | return skb->queue_mapping; | ||
4477 | } | ||
4478 | |||
4404 | static netdev_tx_t bond_start_xmit(struct sk_buff *skb, struct net_device *dev) | 4479 | static netdev_tx_t bond_start_xmit(struct sk_buff *skb, struct net_device *dev) |
4405 | { | 4480 | { |
4406 | const struct bonding *bond = netdev_priv(dev); | 4481 | struct bonding *bond = netdev_priv(dev); |
4482 | |||
4483 | if (TX_QUEUE_OVERRIDE(bond->params.mode)) { | ||
4484 | if (!bond_slave_override(bond, skb)) | ||
4485 | return NETDEV_TX_OK; | ||
4486 | } | ||
4407 | 4487 | ||
4408 | switch (bond->params.mode) { | 4488 | switch (bond->params.mode) { |
4409 | case BOND_MODE_ROUNDROBIN: | 4489 | case BOND_MODE_ROUNDROBIN: |
@@ -4488,6 +4568,7 @@ static const struct net_device_ops bond_netdev_ops = { | |||
4488 | .ndo_open = bond_open, | 4568 | .ndo_open = bond_open, |
4489 | .ndo_stop = bond_close, | 4569 | .ndo_stop = bond_close, |
4490 | .ndo_start_xmit = bond_start_xmit, | 4570 | .ndo_start_xmit = bond_start_xmit, |
4571 | .ndo_select_queue = bond_select_queue, | ||
4491 | .ndo_get_stats = bond_get_stats, | 4572 | .ndo_get_stats = bond_get_stats, |
4492 | .ndo_do_ioctl = bond_do_ioctl, | 4573 | .ndo_do_ioctl = bond_do_ioctl, |
4493 | .ndo_set_multicast_list = bond_set_multicast_list, | 4574 | .ndo_set_multicast_list = bond_set_multicast_list, |
@@ -4756,6 +4837,20 @@ static int bond_check_params(struct bond_params *params) | |||
4756 | } | 4837 | } |
4757 | } | 4838 | } |
4758 | 4839 | ||
4840 | if (tx_queues < 1 || tx_queues > 255) { | ||
4841 | pr_warning("Warning: tx_queues (%d) should be between " | ||
4842 | "1 and 255, resetting to %d\n", | ||
4843 | tx_queues, BOND_DEFAULT_TX_QUEUES); | ||
4844 | tx_queues = BOND_DEFAULT_TX_QUEUES; | ||
4845 | } | ||
4846 | |||
4847 | if ((all_slaves_active != 0) && (all_slaves_active != 1)) { | ||
4848 | pr_warning("Warning: all_slaves_active module parameter (%d), " | ||
4849 | "not of valid value (0/1), so it was set to " | ||
4850 | "0\n", all_slaves_active); | ||
4851 | all_slaves_active = 0; | ||
4852 | } | ||
4853 | |||
4759 | /* reset values for TLB/ALB */ | 4854 | /* reset values for TLB/ALB */ |
4760 | if ((bond_mode == BOND_MODE_TLB) || | 4855 | if ((bond_mode == BOND_MODE_TLB) || |
4761 | (bond_mode == BOND_MODE_ALB)) { | 4856 | (bond_mode == BOND_MODE_ALB)) { |
@@ -4926,6 +5021,8 @@ static int bond_check_params(struct bond_params *params) | |||
4926 | params->primary[0] = 0; | 5021 | params->primary[0] = 0; |
4927 | params->primary_reselect = primary_reselect_value; | 5022 | params->primary_reselect = primary_reselect_value; |
4928 | params->fail_over_mac = fail_over_mac_value; | 5023 | params->fail_over_mac = fail_over_mac_value; |
5024 | params->tx_queues = tx_queues; | ||
5025 | params->all_slaves_active = all_slaves_active; | ||
4929 | 5026 | ||
4930 | if (primary) { | 5027 | if (primary) { |
4931 | strncpy(params->primary, primary, IFNAMSIZ); | 5028 | strncpy(params->primary, primary, IFNAMSIZ); |
@@ -5012,8 +5109,8 @@ int bond_create(struct net *net, const char *name) | |||
5012 | 5109 | ||
5013 | rtnl_lock(); | 5110 | rtnl_lock(); |
5014 | 5111 | ||
5015 | bond_dev = alloc_netdev(sizeof(struct bonding), name ? name : "", | 5112 | bond_dev = alloc_netdev_mq(sizeof(struct bonding), name ? name : "", |
5016 | bond_setup); | 5113 | bond_setup, tx_queues); |
5017 | if (!bond_dev) { | 5114 | if (!bond_dev) { |
5018 | pr_err("%s: eek! can't alloc netdev!\n", name); | 5115 | pr_err("%s: eek! can't alloc netdev!\n", name); |
5019 | rtnl_unlock(); | 5116 | rtnl_unlock(); |
diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c index b8bec086daa1..f9a034361a8e 100644 --- a/drivers/net/bonding/bond_sysfs.c +++ b/drivers/net/bonding/bond_sysfs.c | |||
@@ -211,7 +211,8 @@ static ssize_t bonding_show_slaves(struct device *d, | |||
211 | /* | 211 | /* |
212 | * Set the slaves in the current bond. The bond interface must be | 212 | * Set the slaves in the current bond. The bond interface must be |
213 | * up for this to succeed. | 213 | * up for this to succeed. |
214 | * This function is largely the same flow as bonding_update_bonds(). | 214 | * This is supposed to be only thin wrapper for bond_enslave and bond_release. |
215 | * All hard work should be done there. | ||
215 | */ | 216 | */ |
216 | static ssize_t bonding_store_slaves(struct device *d, | 217 | static ssize_t bonding_store_slaves(struct device *d, |
217 | struct device_attribute *attr, | 218 | struct device_attribute *attr, |
@@ -219,10 +220,8 @@ static ssize_t bonding_store_slaves(struct device *d, | |||
219 | { | 220 | { |
220 | char command[IFNAMSIZ + 1] = { 0, }; | 221 | char command[IFNAMSIZ + 1] = { 0, }; |
221 | char *ifname; | 222 | char *ifname; |
222 | int i, res, found, ret = count; | 223 | int res, ret = count; |
223 | u32 original_mtu; | 224 | struct net_device *dev; |
224 | struct slave *slave; | ||
225 | struct net_device *dev = NULL; | ||
226 | struct bonding *bond = to_bond(d); | 225 | struct bonding *bond = to_bond(d); |
227 | 226 | ||
228 | /* Quick sanity check -- is the bond interface up? */ | 227 | /* Quick sanity check -- is the bond interface up? */ |
@@ -231,8 +230,6 @@ static ssize_t bonding_store_slaves(struct device *d, | |||
231 | bond->dev->name); | 230 | bond->dev->name); |
232 | } | 231 | } |
233 | 232 | ||
234 | /* Note: We can't hold bond->lock here, as bond_create grabs it. */ | ||
235 | |||
236 | if (!rtnl_trylock()) | 233 | if (!rtnl_trylock()) |
237 | return restart_syscall(); | 234 | return restart_syscall(); |
238 | 235 | ||
@@ -242,91 +239,33 @@ static ssize_t bonding_store_slaves(struct device *d, | |||
242 | !dev_valid_name(ifname)) | 239 | !dev_valid_name(ifname)) |
243 | goto err_no_cmd; | 240 | goto err_no_cmd; |
244 | 241 | ||
245 | if (command[0] == '+') { | 242 | dev = __dev_get_by_name(dev_net(bond->dev), ifname); |
246 | 243 | if (!dev) { | |
247 | /* Got a slave name in ifname. Is it already in the list? */ | 244 | pr_info("%s: Interface %s does not exist!\n", |
248 | found = 0; | 245 | bond->dev->name, ifname); |
249 | 246 | ret = -ENODEV; | |
250 | dev = __dev_get_by_name(dev_net(bond->dev), ifname); | 247 | goto out; |
251 | if (!dev) { | 248 | } |
252 | pr_info("%s: Interface %s does not exist!\n", | ||
253 | bond->dev->name, ifname); | ||
254 | ret = -ENODEV; | ||
255 | goto out; | ||
256 | } | ||
257 | |||
258 | if (dev->flags & IFF_UP) { | ||
259 | pr_err("%s: Error: Unable to enslave %s because it is already up.\n", | ||
260 | bond->dev->name, dev->name); | ||
261 | ret = -EPERM; | ||
262 | goto out; | ||
263 | } | ||
264 | |||
265 | read_lock(&bond->lock); | ||
266 | bond_for_each_slave(bond, slave, i) | ||
267 | if (slave->dev == dev) { | ||
268 | pr_err("%s: Interface %s is already enslaved!\n", | ||
269 | bond->dev->name, ifname); | ||
270 | ret = -EPERM; | ||
271 | read_unlock(&bond->lock); | ||
272 | goto out; | ||
273 | } | ||
274 | read_unlock(&bond->lock); | ||
275 | |||
276 | pr_info("%s: Adding slave %s.\n", bond->dev->name, ifname); | ||
277 | |||
278 | /* If this is the first slave, then we need to set | ||
279 | the master's hardware address to be the same as the | ||
280 | slave's. */ | ||
281 | if (is_zero_ether_addr(bond->dev->dev_addr)) | ||
282 | memcpy(bond->dev->dev_addr, dev->dev_addr, | ||
283 | dev->addr_len); | ||
284 | |||
285 | /* Set the slave's MTU to match the bond */ | ||
286 | original_mtu = dev->mtu; | ||
287 | res = dev_set_mtu(dev, bond->dev->mtu); | ||
288 | if (res) { | ||
289 | ret = res; | ||
290 | goto out; | ||
291 | } | ||
292 | 249 | ||
250 | switch (command[0]) { | ||
251 | case '+': | ||
252 | pr_info("%s: Adding slave %s.\n", bond->dev->name, dev->name); | ||
293 | res = bond_enslave(bond->dev, dev); | 253 | res = bond_enslave(bond->dev, dev); |
294 | bond_for_each_slave(bond, slave, i) | 254 | break; |
295 | if (strnicmp(slave->dev->name, ifname, IFNAMSIZ) == 0) | ||
296 | slave->original_mtu = original_mtu; | ||
297 | if (res) | ||
298 | ret = res; | ||
299 | 255 | ||
300 | goto out; | 256 | case '-': |
301 | } | 257 | pr_info("%s: Removing slave %s.\n", bond->dev->name, dev->name); |
258 | res = bond_release(bond->dev, dev); | ||
259 | break; | ||
302 | 260 | ||
303 | if (command[0] == '-') { | 261 | default: |
304 | dev = NULL; | 262 | goto err_no_cmd; |
305 | original_mtu = 0; | ||
306 | bond_for_each_slave(bond, slave, i) | ||
307 | if (strnicmp(slave->dev->name, ifname, IFNAMSIZ) == 0) { | ||
308 | dev = slave->dev; | ||
309 | original_mtu = slave->original_mtu; | ||
310 | break; | ||
311 | } | ||
312 | if (dev) { | ||
313 | pr_info("%s: Removing slave %s\n", | ||
314 | bond->dev->name, dev->name); | ||
315 | res = bond_release(bond->dev, dev); | ||
316 | if (res) { | ||
317 | ret = res; | ||
318 | goto out; | ||
319 | } | ||
320 | /* set the slave MTU to the default */ | ||
321 | dev_set_mtu(dev, original_mtu); | ||
322 | } else { | ||
323 | pr_err("unable to remove non-existent slave %s for bond %s.\n", | ||
324 | ifname, bond->dev->name); | ||
325 | ret = -ENODEV; | ||
326 | } | ||
327 | goto out; | ||
328 | } | 263 | } |
329 | 264 | ||
265 | if (res) | ||
266 | ret = res; | ||
267 | goto out; | ||
268 | |||
330 | err_no_cmd: | 269 | err_no_cmd: |
331 | pr_err("no command found in slaves file for bond %s. Use +ifname or -ifname.\n", | 270 | pr_err("no command found in slaves file for bond %s. Use +ifname or -ifname.\n", |
332 | bond->dev->name); | 271 | bond->dev->name); |
@@ -1472,7 +1411,173 @@ static ssize_t bonding_show_ad_partner_mac(struct device *d, | |||
1472 | } | 1411 | } |
1473 | static DEVICE_ATTR(ad_partner_mac, S_IRUGO, bonding_show_ad_partner_mac, NULL); | 1412 | static DEVICE_ATTR(ad_partner_mac, S_IRUGO, bonding_show_ad_partner_mac, NULL); |
1474 | 1413 | ||
1414 | /* | ||
1415 | * Show the queue_ids of the slaves in the current bond. | ||
1416 | */ | ||
1417 | static ssize_t bonding_show_queue_id(struct device *d, | ||
1418 | struct device_attribute *attr, | ||
1419 | char *buf) | ||
1420 | { | ||
1421 | struct slave *slave; | ||
1422 | int i, res = 0; | ||
1423 | struct bonding *bond = to_bond(d); | ||
1424 | |||
1425 | if (!rtnl_trylock()) | ||
1426 | return restart_syscall(); | ||
1475 | 1427 | ||
1428 | read_lock(&bond->lock); | ||
1429 | bond_for_each_slave(bond, slave, i) { | ||
1430 | if (res > (PAGE_SIZE - 6)) { | ||
1431 | /* not enough space for another interface name */ | ||
1432 | if ((PAGE_SIZE - res) > 10) | ||
1433 | res = PAGE_SIZE - 10; | ||
1434 | res += sprintf(buf + res, "++more++ "); | ||
1435 | break; | ||
1436 | } | ||
1437 | res += sprintf(buf + res, "%s:%d ", | ||
1438 | slave->dev->name, slave->queue_id); | ||
1439 | } | ||
1440 | read_unlock(&bond->lock); | ||
1441 | if (res) | ||
1442 | buf[res-1] = '\n'; /* eat the leftover space */ | ||
1443 | rtnl_unlock(); | ||
1444 | return res; | ||
1445 | } | ||
1446 | |||
1447 | /* | ||
1448 | * Set the queue_ids of the slaves in the current bond. The bond | ||
1449 | * interface must be enslaved for this to work. | ||
1450 | */ | ||
1451 | static ssize_t bonding_store_queue_id(struct device *d, | ||
1452 | struct device_attribute *attr, | ||
1453 | const char *buffer, size_t count) | ||
1454 | { | ||
1455 | struct slave *slave, *update_slave; | ||
1456 | struct bonding *bond = to_bond(d); | ||
1457 | u16 qid; | ||
1458 | int i, ret = count; | ||
1459 | char *delim; | ||
1460 | struct net_device *sdev = NULL; | ||
1461 | |||
1462 | if (!rtnl_trylock()) | ||
1463 | return restart_syscall(); | ||
1464 | |||
1465 | /* delim will point to queue id if successful */ | ||
1466 | delim = strchr(buffer, ':'); | ||
1467 | if (!delim) | ||
1468 | goto err_no_cmd; | ||
1469 | |||
1470 | /* | ||
1471 | * Terminate string that points to device name and bump it | ||
1472 | * up one, so we can read the queue id there. | ||
1473 | */ | ||
1474 | *delim = '\0'; | ||
1475 | if (sscanf(++delim, "%hd\n", &qid) != 1) | ||
1476 | goto err_no_cmd; | ||
1477 | |||
1478 | /* Check buffer length, valid ifname and queue id */ | ||
1479 | if (strlen(buffer) > IFNAMSIZ || | ||
1480 | !dev_valid_name(buffer) || | ||
1481 | qid > bond->params.tx_queues) | ||
1482 | goto err_no_cmd; | ||
1483 | |||
1484 | /* Get the pointer to that interface if it exists */ | ||
1485 | sdev = __dev_get_by_name(dev_net(bond->dev), buffer); | ||
1486 | if (!sdev) | ||
1487 | goto err_no_cmd; | ||
1488 | |||
1489 | read_lock(&bond->lock); | ||
1490 | |||
1491 | /* Search for thes slave and check for duplicate qids */ | ||
1492 | update_slave = NULL; | ||
1493 | bond_for_each_slave(bond, slave, i) { | ||
1494 | if (sdev == slave->dev) | ||
1495 | /* | ||
1496 | * We don't need to check the matching | ||
1497 | * slave for dups, since we're overwriting it | ||
1498 | */ | ||
1499 | update_slave = slave; | ||
1500 | else if (qid && qid == slave->queue_id) { | ||
1501 | goto err_no_cmd_unlock; | ||
1502 | } | ||
1503 | } | ||
1504 | |||
1505 | if (!update_slave) | ||
1506 | goto err_no_cmd_unlock; | ||
1507 | |||
1508 | /* Actually set the qids for the slave */ | ||
1509 | update_slave->queue_id = qid; | ||
1510 | |||
1511 | read_unlock(&bond->lock); | ||
1512 | out: | ||
1513 | rtnl_unlock(); | ||
1514 | return ret; | ||
1515 | |||
1516 | err_no_cmd_unlock: | ||
1517 | read_unlock(&bond->lock); | ||
1518 | err_no_cmd: | ||
1519 | pr_info("invalid input for queue_id set for %s.\n", | ||
1520 | bond->dev->name); | ||
1521 | ret = -EPERM; | ||
1522 | goto out; | ||
1523 | } | ||
1524 | |||
1525 | static DEVICE_ATTR(queue_id, S_IRUGO | S_IWUSR, bonding_show_queue_id, | ||
1526 | bonding_store_queue_id); | ||
1527 | |||
1528 | |||
1529 | /* | ||
1530 | * Show and set the all_slaves_active flag. | ||
1531 | */ | ||
1532 | static ssize_t bonding_show_slaves_active(struct device *d, | ||
1533 | struct device_attribute *attr, | ||
1534 | char *buf) | ||
1535 | { | ||
1536 | struct bonding *bond = to_bond(d); | ||
1537 | |||
1538 | return sprintf(buf, "%d\n", bond->params.all_slaves_active); | ||
1539 | } | ||
1540 | |||
1541 | static ssize_t bonding_store_slaves_active(struct device *d, | ||
1542 | struct device_attribute *attr, | ||
1543 | const char *buf, size_t count) | ||
1544 | { | ||
1545 | int i, new_value, ret = count; | ||
1546 | struct bonding *bond = to_bond(d); | ||
1547 | struct slave *slave; | ||
1548 | |||
1549 | if (sscanf(buf, "%d", &new_value) != 1) { | ||
1550 | pr_err("%s: no all_slaves_active value specified.\n", | ||
1551 | bond->dev->name); | ||
1552 | ret = -EINVAL; | ||
1553 | goto out; | ||
1554 | } | ||
1555 | |||
1556 | if (new_value == bond->params.all_slaves_active) | ||
1557 | goto out; | ||
1558 | |||
1559 | if ((new_value == 0) || (new_value == 1)) { | ||
1560 | bond->params.all_slaves_active = new_value; | ||
1561 | } else { | ||
1562 | pr_info("%s: Ignoring invalid all_slaves_active value %d.\n", | ||
1563 | bond->dev->name, new_value); | ||
1564 | ret = -EINVAL; | ||
1565 | goto out; | ||
1566 | } | ||
1567 | |||
1568 | bond_for_each_slave(bond, slave, i) { | ||
1569 | if (slave->state == BOND_STATE_BACKUP) { | ||
1570 | if (new_value) | ||
1571 | slave->dev->priv_flags &= ~IFF_SLAVE_INACTIVE; | ||
1572 | else | ||
1573 | slave->dev->priv_flags |= IFF_SLAVE_INACTIVE; | ||
1574 | } | ||
1575 | } | ||
1576 | out: | ||
1577 | return count; | ||
1578 | } | ||
1579 | static DEVICE_ATTR(all_slaves_active, S_IRUGO | S_IWUSR, | ||
1580 | bonding_show_slaves_active, bonding_store_slaves_active); | ||
1476 | 1581 | ||
1477 | static struct attribute *per_bond_attrs[] = { | 1582 | static struct attribute *per_bond_attrs[] = { |
1478 | &dev_attr_slaves.attr, | 1583 | &dev_attr_slaves.attr, |
@@ -1499,6 +1604,8 @@ static struct attribute *per_bond_attrs[] = { | |||
1499 | &dev_attr_ad_actor_key.attr, | 1604 | &dev_attr_ad_actor_key.attr, |
1500 | &dev_attr_ad_partner_key.attr, | 1605 | &dev_attr_ad_partner_key.attr, |
1501 | &dev_attr_ad_partner_mac.attr, | 1606 | &dev_attr_ad_partner_mac.attr, |
1607 | &dev_attr_queue_id.attr, | ||
1608 | &dev_attr_all_slaves_active.attr, | ||
1502 | NULL, | 1609 | NULL, |
1503 | }; | 1610 | }; |
1504 | 1611 | ||
diff --git a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h index 2aa336720591..c6fdd851579a 100644 --- a/drivers/net/bonding/bonding.h +++ b/drivers/net/bonding/bonding.h | |||
@@ -23,8 +23,8 @@ | |||
23 | #include "bond_3ad.h" | 23 | #include "bond_3ad.h" |
24 | #include "bond_alb.h" | 24 | #include "bond_alb.h" |
25 | 25 | ||
26 | #define DRV_VERSION "3.6.0" | 26 | #define DRV_VERSION "3.7.0" |
27 | #define DRV_RELDATE "September 26, 2009" | 27 | #define DRV_RELDATE "June 2, 2010" |
28 | #define DRV_NAME "bonding" | 28 | #define DRV_NAME "bonding" |
29 | #define DRV_DESCRIPTION "Ethernet Channel Bonding Driver" | 29 | #define DRV_DESCRIPTION "Ethernet Channel Bonding Driver" |
30 | 30 | ||
@@ -60,6 +60,9 @@ | |||
60 | ((mode) == BOND_MODE_TLB) || \ | 60 | ((mode) == BOND_MODE_TLB) || \ |
61 | ((mode) == BOND_MODE_ALB)) | 61 | ((mode) == BOND_MODE_ALB)) |
62 | 62 | ||
63 | #define TX_QUEUE_OVERRIDE(mode) \ | ||
64 | (((mode) == BOND_MODE_ACTIVEBACKUP) || \ | ||
65 | ((mode) == BOND_MODE_ROUNDROBIN)) | ||
63 | /* | 66 | /* |
64 | * Less bad way to call ioctl from within the kernel; this needs to be | 67 | * Less bad way to call ioctl from within the kernel; this needs to be |
65 | * done some other way to get the call out of interrupt context. | 68 | * done some other way to get the call out of interrupt context. |
@@ -131,6 +134,8 @@ struct bond_params { | |||
131 | char primary[IFNAMSIZ]; | 134 | char primary[IFNAMSIZ]; |
132 | int primary_reselect; | 135 | int primary_reselect; |
133 | __be32 arp_targets[BOND_MAX_ARP_TARGETS]; | 136 | __be32 arp_targets[BOND_MAX_ARP_TARGETS]; |
137 | int tx_queues; | ||
138 | int all_slaves_active; | ||
134 | }; | 139 | }; |
135 | 140 | ||
136 | struct bond_parm_tbl { | 141 | struct bond_parm_tbl { |
@@ -159,12 +164,12 @@ struct slave { | |||
159 | s8 link; /* one of BOND_LINK_XXXX */ | 164 | s8 link; /* one of BOND_LINK_XXXX */ |
160 | s8 new_link; | 165 | s8 new_link; |
161 | s8 state; /* one of BOND_STATE_XXXX */ | 166 | s8 state; /* one of BOND_STATE_XXXX */ |
162 | u32 original_flags; | ||
163 | u32 original_mtu; | 167 | u32 original_mtu; |
164 | u32 link_failure_count; | 168 | u32 link_failure_count; |
165 | u8 perm_hwaddr[ETH_ALEN]; | 169 | u8 perm_hwaddr[ETH_ALEN]; |
166 | u16 speed; | 170 | u16 speed; |
167 | u8 duplex; | 171 | u8 duplex; |
172 | u16 queue_id; | ||
168 | struct ad_slave_info ad_info; /* HUGE - better to dynamically alloc */ | 173 | struct ad_slave_info ad_info; /* HUGE - better to dynamically alloc */ |
169 | struct tlb_slave_info tlb_info; | 174 | struct tlb_slave_info tlb_info; |
170 | }; | 175 | }; |
@@ -291,7 +296,8 @@ static inline void bond_set_slave_inactive_flags(struct slave *slave) | |||
291 | struct bonding *bond = netdev_priv(slave->dev->master); | 296 | struct bonding *bond = netdev_priv(slave->dev->master); |
292 | if (!bond_is_lb(bond)) | 297 | if (!bond_is_lb(bond)) |
293 | slave->state = BOND_STATE_BACKUP; | 298 | slave->state = BOND_STATE_BACKUP; |
294 | slave->dev->priv_flags |= IFF_SLAVE_INACTIVE; | 299 | if (!bond->params.all_slaves_active) |
300 | slave->dev->priv_flags |= IFF_SLAVE_INACTIVE; | ||
295 | if (slave_do_arp_validate(bond, slave)) | 301 | if (slave_do_arp_validate(bond, slave)) |
296 | slave->dev->priv_flags |= IFF_SLAVE_NEEDARP; | 302 | slave->dev->priv_flags |= IFF_SLAVE_NEEDARP; |
297 | } | 303 | } |
diff --git a/drivers/net/caif/caif_serial.c b/drivers/net/caif/caif_serial.c index 09257ca8f563..3e706f00a0d3 100644 --- a/drivers/net/caif/caif_serial.c +++ b/drivers/net/caif/caif_serial.c | |||
@@ -174,6 +174,7 @@ static void ldisc_receive(struct tty_struct *tty, const u8 *data, | |||
174 | struct ser_device *ser; | 174 | struct ser_device *ser; |
175 | int ret; | 175 | int ret; |
176 | u8 *p; | 176 | u8 *p; |
177 | |||
177 | ser = tty->disc_data; | 178 | ser = tty->disc_data; |
178 | 179 | ||
179 | /* | 180 | /* |
@@ -221,6 +222,7 @@ static int handle_tx(struct ser_device *ser) | |||
221 | struct tty_struct *tty; | 222 | struct tty_struct *tty; |
222 | struct sk_buff *skb; | 223 | struct sk_buff *skb; |
223 | int tty_wr, len, room; | 224 | int tty_wr, len, room; |
225 | |||
224 | tty = ser->tty; | 226 | tty = ser->tty; |
225 | ser->tx_started = true; | 227 | ser->tx_started = true; |
226 | 228 | ||
@@ -281,6 +283,7 @@ error: | |||
281 | static int caif_xmit(struct sk_buff *skb, struct net_device *dev) | 283 | static int caif_xmit(struct sk_buff *skb, struct net_device *dev) |
282 | { | 284 | { |
283 | struct ser_device *ser; | 285 | struct ser_device *ser; |
286 | |||
284 | BUG_ON(dev == NULL); | 287 | BUG_ON(dev == NULL); |
285 | ser = netdev_priv(dev); | 288 | ser = netdev_priv(dev); |
286 | 289 | ||
@@ -299,6 +302,7 @@ static int caif_xmit(struct sk_buff *skb, struct net_device *dev) | |||
299 | static void ldisc_tx_wakeup(struct tty_struct *tty) | 302 | static void ldisc_tx_wakeup(struct tty_struct *tty) |
300 | { | 303 | { |
301 | struct ser_device *ser; | 304 | struct ser_device *ser; |
305 | |||
302 | ser = tty->disc_data; | 306 | ser = tty->disc_data; |
303 | BUG_ON(ser == NULL); | 307 | BUG_ON(ser == NULL); |
304 | BUG_ON(ser->tty != tty); | 308 | BUG_ON(ser->tty != tty); |
@@ -348,6 +352,7 @@ static void ldisc_close(struct tty_struct *tty) | |||
348 | struct ser_device *ser = tty->disc_data; | 352 | struct ser_device *ser = tty->disc_data; |
349 | /* Remove may be called inside or outside of rtnl_lock */ | 353 | /* Remove may be called inside or outside of rtnl_lock */ |
350 | int islocked = rtnl_is_locked(); | 354 | int islocked = rtnl_is_locked(); |
355 | |||
351 | if (!islocked) | 356 | if (!islocked) |
352 | rtnl_lock(); | 357 | rtnl_lock(); |
353 | /* device is freed automagically by net-sysfs */ | 358 | /* device is freed automagically by net-sysfs */ |
@@ -374,6 +379,7 @@ static struct tty_ldisc_ops caif_ldisc = { | |||
374 | static int register_ldisc(void) | 379 | static int register_ldisc(void) |
375 | { | 380 | { |
376 | int result; | 381 | int result; |
382 | |||
377 | result = tty_register_ldisc(N_CAIF, &caif_ldisc); | 383 | result = tty_register_ldisc(N_CAIF, &caif_ldisc); |
378 | if (result < 0) { | 384 | if (result < 0) { |
379 | pr_err("cannot register CAIF ldisc=%d err=%d\n", N_CAIF, | 385 | pr_err("cannot register CAIF ldisc=%d err=%d\n", N_CAIF, |
@@ -391,6 +397,7 @@ static const struct net_device_ops netdev_ops = { | |||
391 | static void caifdev_setup(struct net_device *dev) | 397 | static void caifdev_setup(struct net_device *dev) |
392 | { | 398 | { |
393 | struct ser_device *serdev = netdev_priv(dev); | 399 | struct ser_device *serdev = netdev_priv(dev); |
400 | |||
394 | dev->features = 0; | 401 | dev->features = 0; |
395 | dev->netdev_ops = &netdev_ops; | 402 | dev->netdev_ops = &netdev_ops; |
396 | dev->type = ARPHRD_CAIF; | 403 | dev->type = ARPHRD_CAIF; |
@@ -410,8 +417,6 @@ static void caifdev_setup(struct net_device *dev) | |||
410 | 417 | ||
411 | static int caif_net_open(struct net_device *dev) | 418 | static int caif_net_open(struct net_device *dev) |
412 | { | 419 | { |
413 | struct ser_device *ser; | ||
414 | ser = netdev_priv(dev); | ||
415 | netif_wake_queue(dev); | 420 | netif_wake_queue(dev); |
416 | return 0; | 421 | return 0; |
417 | } | 422 | } |
@@ -425,6 +430,7 @@ static int caif_net_close(struct net_device *dev) | |||
425 | static int __init caif_ser_init(void) | 430 | static int __init caif_ser_init(void) |
426 | { | 431 | { |
427 | int ret; | 432 | int ret; |
433 | |||
428 | ret = register_ldisc(); | 434 | ret = register_ldisc(); |
429 | debugfsdir = debugfs_create_dir("caif_serial", NULL); | 435 | debugfsdir = debugfs_create_dir("caif_serial", NULL); |
430 | return ret; | 436 | return ret; |
@@ -435,6 +441,7 @@ static void __exit caif_ser_exit(void) | |||
435 | struct ser_device *ser = NULL; | 441 | struct ser_device *ser = NULL; |
436 | struct list_head *node; | 442 | struct list_head *node; |
437 | struct list_head *_tmp; | 443 | struct list_head *_tmp; |
444 | |||
438 | list_for_each_safe(node, _tmp, &ser_list) { | 445 | list_for_each_safe(node, _tmp, &ser_list) { |
439 | ser = list_entry(node, struct ser_device, node); | 446 | ser = list_entry(node, struct ser_device, node); |
440 | dev_close(ser->dev); | 447 | dev_close(ser->dev); |
diff --git a/drivers/net/can/mscan/mscan.h b/drivers/net/can/mscan/mscan.h index 4ff966473bc9..b43e9f5d3268 100644 --- a/drivers/net/can/mscan/mscan.h +++ b/drivers/net/can/mscan/mscan.h | |||
@@ -227,7 +227,7 @@ struct mscan_regs { | |||
227 | u16 time; /* + 0x7c 0x3e */ | 227 | u16 time; /* + 0x7c 0x3e */ |
228 | } tx; | 228 | } tx; |
229 | _MSCAN_RESERVED_(32, 2); /* + 0x7e */ | 229 | _MSCAN_RESERVED_(32, 2); /* + 0x7e */ |
230 | } __attribute__ ((packed)); | 230 | } __packed; |
231 | 231 | ||
232 | #undef _MSCAN_RESERVED_ | 232 | #undef _MSCAN_RESERVED_ |
233 | #define MSCAN_REGION sizeof(struct mscan) | 233 | #define MSCAN_REGION sizeof(struct mscan) |
diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c index 1fc0871d2ef7..e75f1a876972 100644 --- a/drivers/net/can/usb/ems_usb.c +++ b/drivers/net/can/usb/ems_usb.c | |||
@@ -197,7 +197,7 @@ struct cpc_can_err_counter { | |||
197 | }; | 197 | }; |
198 | 198 | ||
199 | /* Main message type used between library and application */ | 199 | /* Main message type used between library and application */ |
200 | struct __attribute__ ((packed)) ems_cpc_msg { | 200 | struct __packed ems_cpc_msg { |
201 | u8 type; /* type of message */ | 201 | u8 type; /* type of message */ |
202 | u8 length; /* length of data within union 'msg' */ | 202 | u8 length; /* length of data within union 'msg' */ |
203 | u8 msgid; /* confirmation handle */ | 203 | u8 msgid; /* confirmation handle */ |
diff --git a/drivers/net/chelsio/common.h b/drivers/net/chelsio/common.h index 036b2dfb1d40..092f31a126e6 100644 --- a/drivers/net/chelsio/common.h +++ b/drivers/net/chelsio/common.h | |||
@@ -286,7 +286,6 @@ struct board_info { | |||
286 | unsigned int clock_mc3; | 286 | unsigned int clock_mc3; |
287 | unsigned int clock_mc4; | 287 | unsigned int clock_mc4; |
288 | unsigned int espi_nports; | 288 | unsigned int espi_nports; |
289 | unsigned int clock_cspi; | ||
290 | unsigned int clock_elmer0; | 289 | unsigned int clock_elmer0; |
291 | unsigned char mdio_mdien; | 290 | unsigned char mdio_mdien; |
292 | unsigned char mdio_mdiinv; | 291 | unsigned char mdio_mdiinv; |
diff --git a/drivers/net/chelsio/subr.c b/drivers/net/chelsio/subr.c index 53bde15fc94d..599d178df62d 100644 --- a/drivers/net/chelsio/subr.c +++ b/drivers/net/chelsio/subr.c | |||
@@ -185,9 +185,6 @@ static int t1_pci_intr_handler(adapter_t *adapter) | |||
185 | return 0; | 185 | return 0; |
186 | } | 186 | } |
187 | 187 | ||
188 | #ifdef CONFIG_CHELSIO_T1_COUGAR | ||
189 | #include "cspi.h" | ||
190 | #endif | ||
191 | #ifdef CONFIG_CHELSIO_T1_1G | 188 | #ifdef CONFIG_CHELSIO_T1_1G |
192 | #include "fpga_defs.h" | 189 | #include "fpga_defs.h" |
193 | 190 | ||
@@ -280,7 +277,7 @@ static void mi1_mdio_init(adapter_t *adapter, const struct board_info *bi) | |||
280 | t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_CFG, val); | 277 | t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_CFG, val); |
281 | } | 278 | } |
282 | 279 | ||
283 | #if defined(CONFIG_CHELSIO_T1_1G) || defined(CONFIG_CHELSIO_T1_COUGAR) | 280 | #if defined(CONFIG_CHELSIO_T1_1G) |
284 | /* | 281 | /* |
285 | * Elmer MI1 MDIO read/write operations. | 282 | * Elmer MI1 MDIO read/write operations. |
286 | */ | 283 | */ |
@@ -317,7 +314,7 @@ static int mi1_mdio_write(struct net_device *dev, int phy_addr, int mmd_addr, | |||
317 | return 0; | 314 | return 0; |
318 | } | 315 | } |
319 | 316 | ||
320 | #if defined(CONFIG_CHELSIO_T1_1G) || defined(CONFIG_CHELSIO_T1_COUGAR) | 317 | #if defined(CONFIG_CHELSIO_T1_1G) |
321 | static const struct mdio_ops mi1_mdio_ops = { | 318 | static const struct mdio_ops mi1_mdio_ops = { |
322 | .init = mi1_mdio_init, | 319 | .init = mi1_mdio_init, |
323 | .read = mi1_mdio_read, | 320 | .read = mi1_mdio_read, |
@@ -752,31 +749,6 @@ int t1_elmer0_ext_intr_handler(adapter_t *adapter) | |||
752 | mod_detect ? "removed" : "inserted"); | 749 | mod_detect ? "removed" : "inserted"); |
753 | } | 750 | } |
754 | break; | 751 | break; |
755 | #ifdef CONFIG_CHELSIO_T1_COUGAR | ||
756 | case CHBT_BOARD_COUGAR: | ||
757 | if (adapter->params.nports == 1) { | ||
758 | if (cause & ELMER0_GP_BIT1) { /* Vitesse MAC */ | ||
759 | struct cmac *mac = adapter->port[0].mac; | ||
760 | mac->ops->interrupt_handler(mac); | ||
761 | } | ||
762 | if (cause & ELMER0_GP_BIT5) { /* XPAK MOD_DETECT */ | ||
763 | } | ||
764 | } else { | ||
765 | int i, port_bit; | ||
766 | |||
767 | for_each_port(adapter, i) { | ||
768 | port_bit = i ? i + 1 : 0; | ||
769 | if (!(cause & (1 << port_bit))) | ||
770 | continue; | ||
771 | |||
772 | phy = adapter->port[i].phy; | ||
773 | phy_cause = phy->ops->interrupt_handler(phy); | ||
774 | if (phy_cause & cphy_cause_link_change) | ||
775 | t1_link_changed(adapter, i); | ||
776 | } | ||
777 | } | ||
778 | break; | ||
779 | #endif | ||
780 | } | 752 | } |
781 | t1_tpi_write(adapter, A_ELMER0_INT_CAUSE, cause); | 753 | t1_tpi_write(adapter, A_ELMER0_INT_CAUSE, cause); |
782 | return 0; | 754 | return 0; |
@@ -955,7 +927,6 @@ static int board_init(adapter_t *adapter, const struct board_info *bi) | |||
955 | case CHBT_BOARD_N110: | 927 | case CHBT_BOARD_N110: |
956 | case CHBT_BOARD_N210: | 928 | case CHBT_BOARD_N210: |
957 | case CHBT_BOARD_CHT210: | 929 | case CHBT_BOARD_CHT210: |
958 | case CHBT_BOARD_COUGAR: | ||
959 | t1_tpi_par(adapter, 0xf); | 930 | t1_tpi_par(adapter, 0xf); |
960 | t1_tpi_write(adapter, A_ELMER0_GPO, 0x800); | 931 | t1_tpi_write(adapter, A_ELMER0_GPO, 0x800); |
961 | break; | 932 | break; |
@@ -1004,10 +975,6 @@ int t1_init_hw_modules(adapter_t *adapter) | |||
1004 | adapter->regs + A_MC5_CONFIG); | 975 | adapter->regs + A_MC5_CONFIG); |
1005 | } | 976 | } |
1006 | 977 | ||
1007 | #ifdef CONFIG_CHELSIO_T1_COUGAR | ||
1008 | if (adapter->cspi && t1_cspi_init(adapter->cspi)) | ||
1009 | goto out_err; | ||
1010 | #endif | ||
1011 | if (adapter->espi && t1_espi_init(adapter->espi, bi->chip_mac, | 978 | if (adapter->espi && t1_espi_init(adapter->espi, bi->chip_mac, |
1012 | bi->espi_nports)) | 979 | bi->espi_nports)) |
1013 | goto out_err; | 980 | goto out_err; |
@@ -1061,10 +1028,6 @@ void t1_free_sw_modules(adapter_t *adapter) | |||
1061 | t1_tp_destroy(adapter->tp); | 1028 | t1_tp_destroy(adapter->tp); |
1062 | if (adapter->espi) | 1029 | if (adapter->espi) |
1063 | t1_espi_destroy(adapter->espi); | 1030 | t1_espi_destroy(adapter->espi); |
1064 | #ifdef CONFIG_CHELSIO_T1_COUGAR | ||
1065 | if (adapter->cspi) | ||
1066 | t1_cspi_destroy(adapter->cspi); | ||
1067 | #endif | ||
1068 | } | 1031 | } |
1069 | 1032 | ||
1070 | static void __devinit init_link_config(struct link_config *lc, | 1033 | static void __devinit init_link_config(struct link_config *lc, |
@@ -1084,14 +1047,6 @@ static void __devinit init_link_config(struct link_config *lc, | |||
1084 | } | 1047 | } |
1085 | } | 1048 | } |
1086 | 1049 | ||
1087 | #ifdef CONFIG_CHELSIO_T1_COUGAR | ||
1088 | if (bi->clock_cspi && !(adapter->cspi = t1_cspi_create(adapter))) { | ||
1089 | pr_err("%s: CSPI initialization failed\n", | ||
1090 | adapter->name); | ||
1091 | goto error; | ||
1092 | } | ||
1093 | #endif | ||
1094 | |||
1095 | /* | 1050 | /* |
1096 | * Allocate and initialize the data structures that hold the SW state of | 1051 | * Allocate and initialize the data structures that hold the SW state of |
1097 | * the Terminator HW modules. | 1052 | * the Terminator HW modules. |
diff --git a/drivers/net/cxgb3/cxgb3_main.c b/drivers/net/cxgb3/cxgb3_main.c index e3f1b8566495..066fd5b09fda 100644 --- a/drivers/net/cxgb3/cxgb3_main.c +++ b/drivers/net/cxgb3/cxgb3_main.c | |||
@@ -2311,15 +2311,9 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr) | |||
2311 | if (copy_from_user(&t, useraddr, sizeof(t))) | 2311 | if (copy_from_user(&t, useraddr, sizeof(t))) |
2312 | return -EFAULT; | 2312 | return -EFAULT; |
2313 | /* Check t.len sanity ? */ | 2313 | /* Check t.len sanity ? */ |
2314 | fw_data = kmalloc(t.len, GFP_KERNEL); | 2314 | fw_data = memdup_user(useraddr + sizeof(t), t.len); |
2315 | if (!fw_data) | 2315 | if (IS_ERR(fw_data)) |
2316 | return -ENOMEM; | 2316 | return PTR_ERR(fw_data); |
2317 | |||
2318 | if (copy_from_user | ||
2319 | (fw_data, useraddr + sizeof(t), t.len)) { | ||
2320 | kfree(fw_data); | ||
2321 | return -EFAULT; | ||
2322 | } | ||
2323 | 2317 | ||
2324 | ret = t3_load_fw(adapter, fw_data, t.len); | 2318 | ret = t3_load_fw(adapter, fw_data, t.len); |
2325 | kfree(fw_data); | 2319 | kfree(fw_data); |
diff --git a/drivers/net/dm9000.c b/drivers/net/dm9000.c index abcc838e18af..4fd6b2b4554b 100644 --- a/drivers/net/dm9000.c +++ b/drivers/net/dm9000.c | |||
@@ -961,7 +961,7 @@ struct dm9000_rxhdr { | |||
961 | u8 RxPktReady; | 961 | u8 RxPktReady; |
962 | u8 RxStatus; | 962 | u8 RxStatus; |
963 | __le16 RxLen; | 963 | __le16 RxLen; |
964 | } __attribute__((__packed__)); | 964 | } __packed; |
965 | 965 | ||
966 | /* | 966 | /* |
967 | * Received a packet and pass to upper layer | 967 | * Received a packet and pass to upper layer |
diff --git a/drivers/net/ehea/ehea_qmr.h b/drivers/net/ehea/ehea_qmr.h index 882c50c9c34f..f608a6c54af5 100644 --- a/drivers/net/ehea/ehea_qmr.h +++ b/drivers/net/ehea/ehea_qmr.h | |||
@@ -126,7 +126,7 @@ struct ehea_swqe { | |||
126 | u8 immediate_data[SWQE2_MAX_IMM]; | 126 | u8 immediate_data[SWQE2_MAX_IMM]; |
127 | /* 0xd0 */ | 127 | /* 0xd0 */ |
128 | struct ehea_vsgentry sg_list[EHEA_MAX_WQE_SG_ENTRIES-1]; | 128 | struct ehea_vsgentry sg_list[EHEA_MAX_WQE_SG_ENTRIES-1]; |
129 | } immdata_desc __attribute__ ((packed)); | 129 | } immdata_desc __packed; |
130 | 130 | ||
131 | /* Send WQE Format 3 */ | 131 | /* Send WQE Format 3 */ |
132 | struct { | 132 | struct { |
diff --git a/drivers/net/enic/vnic_vic.h b/drivers/net/enic/vnic_vic.h index 085c2a274cb1..7e46e5e8600f 100644 --- a/drivers/net/enic/vnic_vic.h +++ b/drivers/net/enic/vnic_vic.h | |||
@@ -44,7 +44,7 @@ struct vic_provinfo { | |||
44 | u16 length; | 44 | u16 length; |
45 | u8 value[0]; | 45 | u8 value[0]; |
46 | } tlv[0]; | 46 | } tlv[0]; |
47 | } __attribute__ ((packed)); | 47 | } __packed; |
48 | 48 | ||
49 | #define VIC_PROVINFO_MAX_DATA 1385 | 49 | #define VIC_PROVINFO_MAX_DATA 1385 |
50 | #define VIC_PROVINFO_MAX_TLV_DATA (VIC_PROVINFO_MAX_DATA - \ | 50 | #define VIC_PROVINFO_MAX_TLV_DATA (VIC_PROVINFO_MAX_DATA - \ |
diff --git a/drivers/net/fec.c b/drivers/net/fec.c index edfff92a6d8e..a3cae4ed6ac9 100644 --- a/drivers/net/fec.c +++ b/drivers/net/fec.c | |||
@@ -210,7 +210,7 @@ static void fec_stop(struct net_device *dev); | |||
210 | /* Transmitter timeout */ | 210 | /* Transmitter timeout */ |
211 | #define TX_TIMEOUT (2 * HZ) | 211 | #define TX_TIMEOUT (2 * HZ) |
212 | 212 | ||
213 | static int | 213 | static netdev_tx_t |
214 | fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev) | 214 | fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev) |
215 | { | 215 | { |
216 | struct fec_enet_private *fep = netdev_priv(dev); | 216 | struct fec_enet_private *fep = netdev_priv(dev); |
@@ -679,30 +679,24 @@ static int fec_enet_mii_probe(struct net_device *dev) | |||
679 | { | 679 | { |
680 | struct fec_enet_private *fep = netdev_priv(dev); | 680 | struct fec_enet_private *fep = netdev_priv(dev); |
681 | struct phy_device *phy_dev = NULL; | 681 | struct phy_device *phy_dev = NULL; |
682 | int phy_addr; | 682 | int ret; |
683 | 683 | ||
684 | fep->phy_dev = NULL; | 684 | fep->phy_dev = NULL; |
685 | 685 | ||
686 | /* find the first phy */ | 686 | /* find the first phy */ |
687 | for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++) { | 687 | phy_dev = phy_find_first(fep->mii_bus); |
688 | if (fep->mii_bus->phy_map[phy_addr]) { | ||
689 | phy_dev = fep->mii_bus->phy_map[phy_addr]; | ||
690 | break; | ||
691 | } | ||
692 | } | ||
693 | |||
694 | if (!phy_dev) { | 688 | if (!phy_dev) { |
695 | printk(KERN_ERR "%s: no PHY found\n", dev->name); | 689 | printk(KERN_ERR "%s: no PHY found\n", dev->name); |
696 | return -ENODEV; | 690 | return -ENODEV; |
697 | } | 691 | } |
698 | 692 | ||
699 | /* attach the mac to the phy */ | 693 | /* attach the mac to the phy */ |
700 | phy_dev = phy_connect(dev, dev_name(&phy_dev->dev), | 694 | ret = phy_connect_direct(dev, phy_dev, |
701 | &fec_enet_adjust_link, 0, | 695 | &fec_enet_adjust_link, 0, |
702 | PHY_INTERFACE_MODE_MII); | 696 | PHY_INTERFACE_MODE_MII); |
703 | if (IS_ERR(phy_dev)) { | 697 | if (ret) { |
704 | printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name); | 698 | printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name); |
705 | return PTR_ERR(phy_dev); | 699 | return ret; |
706 | } | 700 | } |
707 | 701 | ||
708 | /* mask with MAC supported features */ | 702 | /* mask with MAC supported features */ |
@@ -1365,6 +1359,8 @@ fec_drv_remove(struct platform_device *pdev) | |||
1365 | return 0; | 1359 | return 0; |
1366 | } | 1360 | } |
1367 | 1361 | ||
1362 | #ifdef CONFIG_PM | ||
1363 | |||
1368 | static int | 1364 | static int |
1369 | fec_suspend(struct platform_device *dev, pm_message_t state) | 1365 | fec_suspend(struct platform_device *dev, pm_message_t state) |
1370 | { | 1366 | { |
@@ -1395,15 +1391,31 @@ fec_resume(struct platform_device *dev) | |||
1395 | return 0; | 1391 | return 0; |
1396 | } | 1392 | } |
1397 | 1393 | ||
1394 | static const struct dev_pm_ops fec_pm_ops = { | ||
1395 | .suspend = fec_suspend, | ||
1396 | .resume = fec_resume, | ||
1397 | .freeze = fec_suspend, | ||
1398 | .thaw = fec_resume, | ||
1399 | .poweroff = fec_suspend, | ||
1400 | .restore = fec_resume, | ||
1401 | }; | ||
1402 | |||
1403 | #define FEC_PM_OPS (&fec_pm_ops) | ||
1404 | |||
1405 | #else /* !CONFIG_PM */ | ||
1406 | |||
1407 | #define FEC_PM_OPS NULL | ||
1408 | |||
1409 | #endif /* !CONFIG_PM */ | ||
1410 | |||
1398 | static struct platform_driver fec_driver = { | 1411 | static struct platform_driver fec_driver = { |
1399 | .driver = { | 1412 | .driver = { |
1400 | .name = "fec", | 1413 | .name = "fec", |
1401 | .owner = THIS_MODULE, | 1414 | .owner = THIS_MODULE, |
1415 | .pm = FEC_PM_OPS, | ||
1402 | }, | 1416 | }, |
1403 | .probe = fec_probe, | 1417 | .probe = fec_probe, |
1404 | .remove = __devexit_p(fec_drv_remove), | 1418 | .remove = __devexit_p(fec_drv_remove), |
1405 | .suspend = fec_suspend, | ||
1406 | .resume = fec_resume, | ||
1407 | }; | 1419 | }; |
1408 | 1420 | ||
1409 | static int __init | 1421 | static int __init |
diff --git a/drivers/net/fec_mpc52xx_phy.c b/drivers/net/fec_mpc52xx_phy.c index 006f64d9f96a..dbaf72cbb233 100644 --- a/drivers/net/fec_mpc52xx_phy.c +++ b/drivers/net/fec_mpc52xx_phy.c | |||
@@ -29,15 +29,14 @@ static int mpc52xx_fec_mdio_transfer(struct mii_bus *bus, int phy_id, | |||
29 | int reg, u32 value) | 29 | int reg, u32 value) |
30 | { | 30 | { |
31 | struct mpc52xx_fec_mdio_priv *priv = bus->priv; | 31 | struct mpc52xx_fec_mdio_priv *priv = bus->priv; |
32 | struct mpc52xx_fec __iomem *fec; | 32 | struct mpc52xx_fec __iomem *fec = priv->regs; |
33 | int tries = 3; | 33 | int tries = 3; |
34 | 34 | ||
35 | value |= (phy_id << FEC_MII_DATA_PA_SHIFT) & FEC_MII_DATA_PA_MSK; | 35 | value |= (phy_id << FEC_MII_DATA_PA_SHIFT) & FEC_MII_DATA_PA_MSK; |
36 | value |= (reg << FEC_MII_DATA_RA_SHIFT) & FEC_MII_DATA_RA_MSK; | 36 | value |= (reg << FEC_MII_DATA_RA_SHIFT) & FEC_MII_DATA_RA_MSK; |
37 | 37 | ||
38 | fec = priv->regs; | ||
39 | out_be32(&fec->ievent, FEC_IEVENT_MII); | 38 | out_be32(&fec->ievent, FEC_IEVENT_MII); |
40 | out_be32(&priv->regs->mii_data, value); | 39 | out_be32(&fec->mii_data, value); |
41 | 40 | ||
42 | /* wait for it to finish, this takes about 23 us on lite5200b */ | 41 | /* wait for it to finish, this takes about 23 us on lite5200b */ |
43 | while (!(in_be32(&fec->ievent) & FEC_IEVENT_MII) && --tries) | 42 | while (!(in_be32(&fec->ievent) & FEC_IEVENT_MII) && --tries) |
@@ -47,7 +46,7 @@ static int mpc52xx_fec_mdio_transfer(struct mii_bus *bus, int phy_id, | |||
47 | return -ETIMEDOUT; | 46 | return -ETIMEDOUT; |
48 | 47 | ||
49 | return value & FEC_MII_DATA_OP_RD ? | 48 | return value & FEC_MII_DATA_OP_RD ? |
50 | in_be32(&priv->regs->mii_data) & FEC_MII_DATA_DATAMSK : 0; | 49 | in_be32(&fec->mii_data) & FEC_MII_DATA_DATAMSK : 0; |
51 | } | 50 | } |
52 | 51 | ||
53 | static int mpc52xx_fec_mdio_read(struct mii_bus *bus, int phy_id, int reg) | 52 | static int mpc52xx_fec_mdio_read(struct mii_bus *bus, int phy_id, int reg) |
@@ -69,9 +68,8 @@ static int mpc52xx_fec_mdio_probe(struct of_device *of, | |||
69 | struct device_node *np = of->dev.of_node; | 68 | struct device_node *np = of->dev.of_node; |
70 | struct mii_bus *bus; | 69 | struct mii_bus *bus; |
71 | struct mpc52xx_fec_mdio_priv *priv; | 70 | struct mpc52xx_fec_mdio_priv *priv; |
72 | struct resource res = {}; | 71 | struct resource res; |
73 | int err; | 72 | int err; |
74 | int i; | ||
75 | 73 | ||
76 | bus = mdiobus_alloc(); | 74 | bus = mdiobus_alloc(); |
77 | if (bus == NULL) | 75 | if (bus == NULL) |
@@ -93,7 +91,7 @@ static int mpc52xx_fec_mdio_probe(struct of_device *of, | |||
93 | err = of_address_to_resource(np, 0, &res); | 91 | err = of_address_to_resource(np, 0, &res); |
94 | if (err) | 92 | if (err) |
95 | goto out_free; | 93 | goto out_free; |
96 | priv->regs = ioremap(res.start, res.end - res.start + 1); | 94 | priv->regs = ioremap(res.start, resource_size(&res)); |
97 | if (priv->regs == NULL) { | 95 | if (priv->regs == NULL) { |
98 | err = -ENOMEM; | 96 | err = -ENOMEM; |
99 | goto out_free; | 97 | goto out_free; |
@@ -118,10 +116,6 @@ static int mpc52xx_fec_mdio_probe(struct of_device *of, | |||
118 | out_unmap: | 116 | out_unmap: |
119 | iounmap(priv->regs); | 117 | iounmap(priv->regs); |
120 | out_free: | 118 | out_free: |
121 | for (i=0; i<PHY_MAX_ADDR; i++) | ||
122 | if (bus->irq[i] != PHY_POLL) | ||
123 | irq_dispose_mapping(bus->irq[i]); | ||
124 | kfree(bus->irq); | ||
125 | kfree(priv); | 119 | kfree(priv); |
126 | mdiobus_free(bus); | 120 | mdiobus_free(bus); |
127 | 121 | ||
@@ -133,23 +127,16 @@ static int mpc52xx_fec_mdio_remove(struct of_device *of) | |||
133 | struct device *dev = &of->dev; | 127 | struct device *dev = &of->dev; |
134 | struct mii_bus *bus = dev_get_drvdata(dev); | 128 | struct mii_bus *bus = dev_get_drvdata(dev); |
135 | struct mpc52xx_fec_mdio_priv *priv = bus->priv; | 129 | struct mpc52xx_fec_mdio_priv *priv = bus->priv; |
136 | int i; | ||
137 | 130 | ||
138 | mdiobus_unregister(bus); | 131 | mdiobus_unregister(bus); |
139 | dev_set_drvdata(dev, NULL); | 132 | dev_set_drvdata(dev, NULL); |
140 | |||
141 | iounmap(priv->regs); | 133 | iounmap(priv->regs); |
142 | for (i=0; i<PHY_MAX_ADDR; i++) | ||
143 | if (bus->irq[i] != PHY_POLL) | ||
144 | irq_dispose_mapping(bus->irq[i]); | ||
145 | kfree(priv); | 134 | kfree(priv); |
146 | kfree(bus->irq); | ||
147 | mdiobus_free(bus); | 135 | mdiobus_free(bus); |
148 | 136 | ||
149 | return 0; | 137 | return 0; |
150 | } | 138 | } |
151 | 139 | ||
152 | |||
153 | static struct of_device_id mpc52xx_fec_mdio_match[] = { | 140 | static struct of_device_id mpc52xx_fec_mdio_match[] = { |
154 | { .compatible = "fsl,mpc5200b-mdio", }, | 141 | { .compatible = "fsl,mpc5200b-mdio", }, |
155 | { .compatible = "fsl,mpc5200-mdio", }, | 142 | { .compatible = "fsl,mpc5200-mdio", }, |
@@ -171,5 +158,4 @@ struct of_platform_driver mpc52xx_fec_mdio_driver = { | |||
171 | /* let fec driver call it, since this has to be registered before it */ | 158 | /* let fec driver call it, since this has to be registered before it */ |
172 | EXPORT_SYMBOL_GPL(mpc52xx_fec_mdio_driver); | 159 | EXPORT_SYMBOL_GPL(mpc52xx_fec_mdio_driver); |
173 | 160 | ||
174 | |||
175 | MODULE_LICENSE("Dual BSD/GPL"); | 161 | MODULE_LICENSE("Dual BSD/GPL"); |
diff --git a/drivers/net/fsl_pq_mdio.h b/drivers/net/fsl_pq_mdio.h index 1f7d865cedb6..bd17a2a0139b 100644 --- a/drivers/net/fsl_pq_mdio.h +++ b/drivers/net/fsl_pq_mdio.h | |||
@@ -39,7 +39,7 @@ struct fsl_pq_mdio { | |||
39 | u8 reserved[28]; /* Space holder */ | 39 | u8 reserved[28]; /* Space holder */ |
40 | u32 utbipar; /* TBI phy address reg (only on UCC) */ | 40 | u32 utbipar; /* TBI phy address reg (only on UCC) */ |
41 | u8 res4[2728]; | 41 | u8 res4[2728]; |
42 | } __attribute__ ((packed)); | 42 | } __packed; |
43 | 43 | ||
44 | int fsl_pq_mdio_read(struct mii_bus *bus, int mii_id, int regnum); | 44 | int fsl_pq_mdio_read(struct mii_bus *bus, int mii_id, int regnum); |
45 | int fsl_pq_mdio_write(struct mii_bus *bus, int mii_id, int regnum, u16 value); | 45 | int fsl_pq_mdio_write(struct mii_bus *bus, int mii_id, int regnum, u16 value); |
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c index 1830f3199cb5..ab54821f6709 100644 --- a/drivers/net/gianfar.c +++ b/drivers/net/gianfar.c | |||
@@ -681,8 +681,8 @@ static int gfar_of_init(struct of_device *ofdev, struct net_device **pdev) | |||
681 | priv->rx_queue[i] = NULL; | 681 | priv->rx_queue[i] = NULL; |
682 | 682 | ||
683 | for (i = 0; i < priv->num_tx_queues; i++) { | 683 | for (i = 0; i < priv->num_tx_queues; i++) { |
684 | priv->tx_queue[i] = (struct gfar_priv_tx_q *)kzalloc( | 684 | priv->tx_queue[i] = kzalloc(sizeof(struct gfar_priv_tx_q), |
685 | sizeof (struct gfar_priv_tx_q), GFP_KERNEL); | 685 | GFP_KERNEL); |
686 | if (!priv->tx_queue[i]) { | 686 | if (!priv->tx_queue[i]) { |
687 | err = -ENOMEM; | 687 | err = -ENOMEM; |
688 | goto tx_alloc_failed; | 688 | goto tx_alloc_failed; |
@@ -694,8 +694,8 @@ static int gfar_of_init(struct of_device *ofdev, struct net_device **pdev) | |||
694 | } | 694 | } |
695 | 695 | ||
696 | for (i = 0; i < priv->num_rx_queues; i++) { | 696 | for (i = 0; i < priv->num_rx_queues; i++) { |
697 | priv->rx_queue[i] = (struct gfar_priv_rx_q *)kzalloc( | 697 | priv->rx_queue[i] = kzalloc(sizeof(struct gfar_priv_rx_q), |
698 | sizeof (struct gfar_priv_rx_q), GFP_KERNEL); | 698 | GFP_KERNEL); |
699 | if (!priv->rx_queue[i]) { | 699 | if (!priv->rx_queue[i]) { |
700 | err = -ENOMEM; | 700 | err = -ENOMEM; |
701 | goto rx_alloc_failed; | 701 | goto rx_alloc_failed; |
diff --git a/drivers/net/irda/donauboe.h b/drivers/net/irda/donauboe.h index 0dbd1932b72f..36c3060411d2 100644 --- a/drivers/net/irda/donauboe.h +++ b/drivers/net/irda/donauboe.h | |||
@@ -273,7 +273,7 @@ struct OboeSlot | |||
273 | __u8 control; /*Slot control/status see below */ | 273 | __u8 control; /*Slot control/status see below */ |
274 | __u32 address; /*Slot buffer address */ | 274 | __u32 address; /*Slot buffer address */ |
275 | } | 275 | } |
276 | __attribute__ ((packed)); | 276 | __packed; |
277 | 277 | ||
278 | #define OBOE_NTASKS OBOE_TXRING_OFFSET_IN_SLOTS | 278 | #define OBOE_NTASKS OBOE_TXRING_OFFSET_IN_SLOTS |
279 | 279 | ||
diff --git a/drivers/net/irda/irda-usb.h b/drivers/net/irda/irda-usb.h index ac0443d52e50..58ddb5214916 100644 --- a/drivers/net/irda/irda-usb.h +++ b/drivers/net/irda/irda-usb.h | |||
@@ -125,7 +125,7 @@ struct irda_class_desc { | |||
125 | __u8 bmAdditionalBOFs; | 125 | __u8 bmAdditionalBOFs; |
126 | __u8 bIrdaRateSniff; | 126 | __u8 bIrdaRateSniff; |
127 | __u8 bMaxUnicastList; | 127 | __u8 bMaxUnicastList; |
128 | } __attribute__ ((packed)); | 128 | } __packed; |
129 | 129 | ||
130 | /* class specific interface request to get the IrDA-USB class descriptor | 130 | /* class specific interface request to get the IrDA-USB class descriptor |
131 | * (6.2.5, USB-IrDA class spec 1.0) */ | 131 | * (6.2.5, USB-IrDA class spec 1.0) */ |
diff --git a/drivers/net/irda/ks959-sir.c b/drivers/net/irda/ks959-sir.c index b54d3b48045e..1046014dd6c2 100644 --- a/drivers/net/irda/ks959-sir.c +++ b/drivers/net/irda/ks959-sir.c | |||
@@ -154,7 +154,7 @@ struct ks959_speedparams { | |||
154 | __le32 baudrate; /* baud rate, little endian */ | 154 | __le32 baudrate; /* baud rate, little endian */ |
155 | __u8 flags; | 155 | __u8 flags; |
156 | __u8 reserved[3]; | 156 | __u8 reserved[3]; |
157 | } __attribute__ ((packed)); | 157 | } __packed; |
158 | 158 | ||
159 | #define KS_DATA_5_BITS 0x00 | 159 | #define KS_DATA_5_BITS 0x00 |
160 | #define KS_DATA_6_BITS 0x01 | 160 | #define KS_DATA_6_BITS 0x01 |
diff --git a/drivers/net/irda/ksdazzle-sir.c b/drivers/net/irda/ksdazzle-sir.c index 8d713ebac15b..9cc142fcc712 100644 --- a/drivers/net/irda/ksdazzle-sir.c +++ b/drivers/net/irda/ksdazzle-sir.c | |||
@@ -117,7 +117,7 @@ struct ksdazzle_speedparams { | |||
117 | __le32 baudrate; /* baud rate, little endian */ | 117 | __le32 baudrate; /* baud rate, little endian */ |
118 | __u8 flags; | 118 | __u8 flags; |
119 | __u8 reserved[3]; | 119 | __u8 reserved[3]; |
120 | } __attribute__ ((packed)); | 120 | } __packed; |
121 | 121 | ||
122 | #define KS_DATA_5_BITS 0x00 | 122 | #define KS_DATA_5_BITS 0x00 |
123 | #define KS_DATA_6_BITS 0x01 | 123 | #define KS_DATA_6_BITS 0x01 |
diff --git a/drivers/net/irda/vlsi_ir.h b/drivers/net/irda/vlsi_ir.h index 3050d1a0cccf..3f24a1f33022 100644 --- a/drivers/net/irda/vlsi_ir.h +++ b/drivers/net/irda/vlsi_ir.h | |||
@@ -544,9 +544,9 @@ struct ring_descr_hw { | |||
544 | struct { | 544 | struct { |
545 | u8 addr_res[3]; | 545 | u8 addr_res[3]; |
546 | volatile u8 status; /* descriptor status */ | 546 | volatile u8 status; /* descriptor status */ |
547 | } __attribute__((packed)) rd_s; | 547 | } __packed rd_s; |
548 | } __attribute((packed)) rd_u; | 548 | } __packed rd_u; |
549 | } __attribute__ ((packed)); | 549 | } __packed; |
550 | 550 | ||
551 | #define rd_addr rd_u.addr | 551 | #define rd_addr rd_u.addr |
552 | #define rd_status rd_u.rd_s.status | 552 | #define rd_status rd_u.rd_s.status |
diff --git a/drivers/net/ixgbe/ixgbe.h b/drivers/net/ixgbe/ixgbe.h index ffae480587ae..9270089eb282 100644 --- a/drivers/net/ixgbe/ixgbe.h +++ b/drivers/net/ixgbe/ixgbe.h | |||
@@ -44,11 +44,9 @@ | |||
44 | #include <linux/dca.h> | 44 | #include <linux/dca.h> |
45 | #endif | 45 | #endif |
46 | 46 | ||
47 | #define PFX "ixgbe: " | 47 | /* common prefix used by pr_<> macros */ |
48 | #define DPRINTK(nlevel, klevel, fmt, args...) \ | 48 | #undef pr_fmt |
49 | ((void)((NETIF_MSG_##nlevel & adapter->msg_enable) && \ | 49 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
50 | printk(KERN_##klevel PFX "%s: %s: " fmt, adapter->netdev->name, \ | ||
51 | __func__ , ## args))) | ||
52 | 50 | ||
53 | /* TX/RX descriptor defines */ | 51 | /* TX/RX descriptor defines */ |
54 | #define IXGBE_DEFAULT_TXD 512 | 52 | #define IXGBE_DEFAULT_TXD 512 |
diff --git a/drivers/net/ixgbe/ixgbe_82599.c b/drivers/net/ixgbe/ixgbe_82599.c index a4e2901f2f08..976fd9e146c6 100644 --- a/drivers/net/ixgbe/ixgbe_82599.c +++ b/drivers/net/ixgbe/ixgbe_82599.c | |||
@@ -707,9 +707,8 @@ static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw, | |||
707 | 707 | ||
708 | out: | 708 | out: |
709 | if (link_up && (link_speed == IXGBE_LINK_SPEED_1GB_FULL)) | 709 | if (link_up && (link_speed == IXGBE_LINK_SPEED_1GB_FULL)) |
710 | netif_info(adapter, hw, adapter->netdev, "Smartspeed has" | 710 | e_info("Smartspeed has downgraded the link speed from " |
711 | " downgraded the link speed from the maximum" | 711 | "the maximum advertised\n"); |
712 | " advertised\n"); | ||
713 | return status; | 712 | return status; |
714 | } | 713 | } |
715 | 714 | ||
diff --git a/drivers/net/ixgbe/ixgbe_common.h b/drivers/net/ixgbe/ixgbe_common.h index 3080afb12bdf..d5d3aae8524b 100644 --- a/drivers/net/ixgbe/ixgbe_common.h +++ b/drivers/net/ixgbe/ixgbe_common.h | |||
@@ -105,12 +105,26 @@ s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index); | |||
105 | 105 | ||
106 | #define IXGBE_WRITE_FLUSH(a) IXGBE_READ_REG(a, IXGBE_STATUS) | 106 | #define IXGBE_WRITE_FLUSH(a) IXGBE_READ_REG(a, IXGBE_STATUS) |
107 | 107 | ||
108 | #ifdef DEBUG | 108 | extern struct net_device *ixgbe_get_hw_dev(struct ixgbe_hw *hw); |
109 | extern char *ixgbe_get_hw_dev_name(struct ixgbe_hw *hw); | ||
110 | #define hw_dbg(hw, format, arg...) \ | 109 | #define hw_dbg(hw, format, arg...) \ |
111 | printk(KERN_DEBUG "%s: " format, ixgbe_get_hw_dev_name(hw), ##arg) | 110 | netdev_dbg(ixgbe_get_hw_dev(hw), format, ##arg) |
112 | #else | 111 | #define e_err(format, arg...) \ |
113 | #define hw_dbg(hw, format, arg...) do {} while (0) | 112 | netdev_err(adapter->netdev, format, ## arg) |
114 | #endif | 113 | #define e_info(format, arg...) \ |
114 | netdev_info(adapter->netdev, format, ## arg) | ||
115 | #define e_warn(format, arg...) \ | ||
116 | netdev_warn(adapter->netdev, format, ## arg) | ||
117 | #define e_notice(format, arg...) \ | ||
118 | netdev_notice(adapter->netdev, format, ## arg) | ||
119 | #define e_crit(format, arg...) \ | ||
120 | netdev_crit(adapter->netdev, format, ## arg) | ||
121 | #define e_dev_info(format, arg...) \ | ||
122 | dev_info(&adapter->pdev->dev, format, ## arg) | ||
123 | #define e_dev_warn(format, arg...) \ | ||
124 | dev_warn(&adapter->pdev->dev, format, ## arg) | ||
125 | #define e_dev_err(format, arg...) \ | ||
126 | dev_err(&adapter->pdev->dev, format, ## arg) | ||
127 | #define e_dev_notice(format, arg...) \ | ||
128 | dev_notice(&adapter->pdev->dev, format, ## arg) | ||
115 | 129 | ||
116 | #endif /* IXGBE_COMMON */ | 130 | #endif /* IXGBE_COMMON */ |
diff --git a/drivers/net/ixgbe/ixgbe_dcb_nl.c b/drivers/net/ixgbe/ixgbe_dcb_nl.c index 71da325dfa80..657623589d53 100644 --- a/drivers/net/ixgbe/ixgbe_dcb_nl.c +++ b/drivers/net/ixgbe/ixgbe_dcb_nl.c | |||
@@ -121,7 +121,7 @@ static u8 ixgbe_dcbnl_set_state(struct net_device *netdev, u8 state) | |||
121 | goto out; | 121 | goto out; |
122 | 122 | ||
123 | if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) { | 123 | if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) { |
124 | DPRINTK(DRV, ERR, "Enable failed, needs MSI-X\n"); | 124 | e_err("Enable failed, needs MSI-X\n"); |
125 | err = 1; | 125 | err = 1; |
126 | goto out; | 126 | goto out; |
127 | } | 127 | } |
diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c index c50a7541ffec..644e3d21b751 100644 --- a/drivers/net/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ixgbe/ixgbe_ethtool.c | |||
@@ -294,8 +294,7 @@ static int ixgbe_set_settings(struct net_device *netdev, | |||
294 | hw->mac.autotry_restart = true; | 294 | hw->mac.autotry_restart = true; |
295 | err = hw->mac.ops.setup_link(hw, advertised, true, true); | 295 | err = hw->mac.ops.setup_link(hw, advertised, true, true); |
296 | if (err) { | 296 | if (err) { |
297 | DPRINTK(PROBE, INFO, | 297 | e_info("setup link failed with code %d\n", err); |
298 | "setup link failed with code %d\n", err); | ||
299 | hw->mac.ops.setup_link(hw, old, true, true); | 298 | hw->mac.ops.setup_link(hw, old, true, true); |
300 | } | 299 | } |
301 | } else { | 300 | } else { |
@@ -1188,9 +1187,9 @@ static struct ixgbe_reg_test reg_test_82598[] = { | |||
1188 | writel((_test[pat] & W), (adapter->hw.hw_addr + R)); \ | 1187 | writel((_test[pat] & W), (adapter->hw.hw_addr + R)); \ |
1189 | val = readl(adapter->hw.hw_addr + R); \ | 1188 | val = readl(adapter->hw.hw_addr + R); \ |
1190 | if (val != (_test[pat] & W & M)) { \ | 1189 | if (val != (_test[pat] & W & M)) { \ |
1191 | DPRINTK(DRV, ERR, "pattern test reg %04X failed: got "\ | 1190 | e_err("pattern test reg %04X failed: got " \ |
1192 | "0x%08X expected 0x%08X\n", \ | 1191 | "0x%08X expected 0x%08X\n", \ |
1193 | R, val, (_test[pat] & W & M)); \ | 1192 | R, val, (_test[pat] & W & M)); \ |
1194 | *data = R; \ | 1193 | *data = R; \ |
1195 | writel(before, adapter->hw.hw_addr + R); \ | 1194 | writel(before, adapter->hw.hw_addr + R); \ |
1196 | return 1; \ | 1195 | return 1; \ |
@@ -1206,8 +1205,8 @@ static struct ixgbe_reg_test reg_test_82598[] = { | |||
1206 | writel((W & M), (adapter->hw.hw_addr + R)); \ | 1205 | writel((W & M), (adapter->hw.hw_addr + R)); \ |
1207 | val = readl(adapter->hw.hw_addr + R); \ | 1206 | val = readl(adapter->hw.hw_addr + R); \ |
1208 | if ((W & M) != (val & M)) { \ | 1207 | if ((W & M) != (val & M)) { \ |
1209 | DPRINTK(DRV, ERR, "set/check reg %04X test failed: got 0x%08X "\ | 1208 | e_err("set/check reg %04X test failed: got 0x%08X " \ |
1210 | "expected 0x%08X\n", R, (val & M), (W & M)); \ | 1209 | "expected 0x%08X\n", R, (val & M), (W & M)); \ |
1211 | *data = R; \ | 1210 | *data = R; \ |
1212 | writel(before, (adapter->hw.hw_addr + R)); \ | 1211 | writel(before, (adapter->hw.hw_addr + R)); \ |
1213 | return 1; \ | 1212 | return 1; \ |
@@ -1240,8 +1239,8 @@ static int ixgbe_reg_test(struct ixgbe_adapter *adapter, u64 *data) | |||
1240 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_STATUS, toggle); | 1239 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_STATUS, toggle); |
1241 | after = IXGBE_READ_REG(&adapter->hw, IXGBE_STATUS) & toggle; | 1240 | after = IXGBE_READ_REG(&adapter->hw, IXGBE_STATUS) & toggle; |
1242 | if (value != after) { | 1241 | if (value != after) { |
1243 | DPRINTK(DRV, ERR, "failed STATUS register test got: " | 1242 | e_err("failed STATUS register test got: 0x%08X expected: " |
1244 | "0x%08X expected: 0x%08X\n", after, value); | 1243 | "0x%08X\n", after, value); |
1245 | *data = 1; | 1244 | *data = 1; |
1246 | return 1; | 1245 | return 1; |
1247 | } | 1246 | } |
@@ -1341,8 +1340,8 @@ static int ixgbe_intr_test(struct ixgbe_adapter *adapter, u64 *data) | |||
1341 | *data = 1; | 1340 | *data = 1; |
1342 | return -1; | 1341 | return -1; |
1343 | } | 1342 | } |
1344 | DPRINTK(HW, INFO, "testing %s interrupt\n", | 1343 | e_info("testing %s interrupt\n", shared_int ? |
1345 | (shared_int ? "shared" : "unshared")); | 1344 | "shared" : "unshared"); |
1346 | 1345 | ||
1347 | /* Disable all the interrupts */ | 1346 | /* Disable all the interrupts */ |
1348 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFFFFFF); | 1347 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFFFFFF); |
@@ -1847,7 +1846,7 @@ static void ixgbe_diag_test(struct net_device *netdev, | |||
1847 | if (eth_test->flags == ETH_TEST_FL_OFFLINE) { | 1846 | if (eth_test->flags == ETH_TEST_FL_OFFLINE) { |
1848 | /* Offline tests */ | 1847 | /* Offline tests */ |
1849 | 1848 | ||
1850 | DPRINTK(HW, INFO, "offline testing starting\n"); | 1849 | e_info("offline testing starting\n"); |
1851 | 1850 | ||
1852 | /* Link test performed before hardware reset so autoneg doesn't | 1851 | /* Link test performed before hardware reset so autoneg doesn't |
1853 | * interfere with test result */ | 1852 | * interfere with test result */ |
@@ -1880,17 +1879,17 @@ static void ixgbe_diag_test(struct net_device *netdev, | |||
1880 | else | 1879 | else |
1881 | ixgbe_reset(adapter); | 1880 | ixgbe_reset(adapter); |
1882 | 1881 | ||
1883 | DPRINTK(HW, INFO, "register testing starting\n"); | 1882 | e_info("register testing starting\n"); |
1884 | if (ixgbe_reg_test(adapter, &data[0])) | 1883 | if (ixgbe_reg_test(adapter, &data[0])) |
1885 | eth_test->flags |= ETH_TEST_FL_FAILED; | 1884 | eth_test->flags |= ETH_TEST_FL_FAILED; |
1886 | 1885 | ||
1887 | ixgbe_reset(adapter); | 1886 | ixgbe_reset(adapter); |
1888 | DPRINTK(HW, INFO, "eeprom testing starting\n"); | 1887 | e_info("eeprom testing starting\n"); |
1889 | if (ixgbe_eeprom_test(adapter, &data[1])) | 1888 | if (ixgbe_eeprom_test(adapter, &data[1])) |
1890 | eth_test->flags |= ETH_TEST_FL_FAILED; | 1889 | eth_test->flags |= ETH_TEST_FL_FAILED; |
1891 | 1890 | ||
1892 | ixgbe_reset(adapter); | 1891 | ixgbe_reset(adapter); |
1893 | DPRINTK(HW, INFO, "interrupt testing starting\n"); | 1892 | e_info("interrupt testing starting\n"); |
1894 | if (ixgbe_intr_test(adapter, &data[2])) | 1893 | if (ixgbe_intr_test(adapter, &data[2])) |
1895 | eth_test->flags |= ETH_TEST_FL_FAILED; | 1894 | eth_test->flags |= ETH_TEST_FL_FAILED; |
1896 | 1895 | ||
@@ -1898,14 +1897,13 @@ static void ixgbe_diag_test(struct net_device *netdev, | |||
1898 | * loopback diagnostic. */ | 1897 | * loopback diagnostic. */ |
1899 | if (adapter->flags & (IXGBE_FLAG_SRIOV_ENABLED | | 1898 | if (adapter->flags & (IXGBE_FLAG_SRIOV_ENABLED | |
1900 | IXGBE_FLAG_VMDQ_ENABLED)) { | 1899 | IXGBE_FLAG_VMDQ_ENABLED)) { |
1901 | DPRINTK(HW, INFO, "Skip MAC loopback diagnostic in VT " | 1900 | e_info("Skip MAC loopback diagnostic in VT mode\n"); |
1902 | "mode\n"); | ||
1903 | data[3] = 0; | 1901 | data[3] = 0; |
1904 | goto skip_loopback; | 1902 | goto skip_loopback; |
1905 | } | 1903 | } |
1906 | 1904 | ||
1907 | ixgbe_reset(adapter); | 1905 | ixgbe_reset(adapter); |
1908 | DPRINTK(HW, INFO, "loopback testing starting\n"); | 1906 | e_info("loopback testing starting\n"); |
1909 | if (ixgbe_loopback_test(adapter, &data[3])) | 1907 | if (ixgbe_loopback_test(adapter, &data[3])) |
1910 | eth_test->flags |= ETH_TEST_FL_FAILED; | 1908 | eth_test->flags |= ETH_TEST_FL_FAILED; |
1911 | 1909 | ||
@@ -1916,7 +1914,7 @@ skip_loopback: | |||
1916 | if (if_running) | 1914 | if (if_running) |
1917 | dev_open(netdev); | 1915 | dev_open(netdev); |
1918 | } else { | 1916 | } else { |
1919 | DPRINTK(HW, INFO, "online testing starting\n"); | 1917 | e_info("online testing starting\n"); |
1920 | /* Online tests */ | 1918 | /* Online tests */ |
1921 | if (ixgbe_link_test(adapter, &data[4])) | 1919 | if (ixgbe_link_test(adapter, &data[4])) |
1922 | eth_test->flags |= ETH_TEST_FL_FAILED; | 1920 | eth_test->flags |= ETH_TEST_FL_FAILED; |
@@ -2089,8 +2087,8 @@ static bool ixgbe_reenable_rsc(struct ixgbe_adapter *adapter, | |||
2089 | (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE)) { | 2087 | (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE)) { |
2090 | adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED; | 2088 | adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED; |
2091 | adapter->netdev->features |= NETIF_F_LRO; | 2089 | adapter->netdev->features |= NETIF_F_LRO; |
2092 | DPRINTK(PROBE, INFO, "rx-usecs set to %d, re-enabling RSC\n", | 2090 | e_info("rx-usecs set to %d, re-enabling RSC\n", |
2093 | ec->rx_coalesce_usecs); | 2091 | ec->rx_coalesce_usecs); |
2094 | return true; | 2092 | return true; |
2095 | } | 2093 | } |
2096 | return false; | 2094 | return false; |
@@ -2158,8 +2156,7 @@ static int ixgbe_set_coalesce(struct net_device *netdev, | |||
2158 | if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) { | 2156 | if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) { |
2159 | adapter->flags2 &= ~IXGBE_FLAG2_RSC_ENABLED; | 2157 | adapter->flags2 &= ~IXGBE_FLAG2_RSC_ENABLED; |
2160 | netdev->features &= ~NETIF_F_LRO; | 2158 | netdev->features &= ~NETIF_F_LRO; |
2161 | DPRINTK(PROBE, INFO, | 2159 | e_info("rx-usecs set to 0, disabling RSC\n"); |
2162 | "rx-usecs set to 0, disabling RSC\n"); | ||
2163 | 2160 | ||
2164 | need_reset = true; | 2161 | need_reset = true; |
2165 | } | 2162 | } |
diff --git a/drivers/net/ixgbe/ixgbe_fcoe.c b/drivers/net/ixgbe/ixgbe_fcoe.c index 45182ab41d6b..84e1194e0833 100644 --- a/drivers/net/ixgbe/ixgbe_fcoe.c +++ b/drivers/net/ixgbe/ixgbe_fcoe.c | |||
@@ -25,7 +25,6 @@ | |||
25 | 25 | ||
26 | *******************************************************************************/ | 26 | *******************************************************************************/ |
27 | 27 | ||
28 | |||
29 | #include "ixgbe.h" | 28 | #include "ixgbe.h" |
30 | #ifdef CONFIG_IXGBE_DCB | 29 | #ifdef CONFIG_IXGBE_DCB |
31 | #include "ixgbe_dcb_82599.h" | 30 | #include "ixgbe_dcb_82599.h" |
@@ -165,20 +164,20 @@ int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid, | |||
165 | 164 | ||
166 | adapter = netdev_priv(netdev); | 165 | adapter = netdev_priv(netdev); |
167 | if (xid >= IXGBE_FCOE_DDP_MAX) { | 166 | if (xid >= IXGBE_FCOE_DDP_MAX) { |
168 | DPRINTK(DRV, WARNING, "xid=0x%x out-of-range\n", xid); | 167 | e_warn("xid=0x%x out-of-range\n", xid); |
169 | return 0; | 168 | return 0; |
170 | } | 169 | } |
171 | 170 | ||
172 | fcoe = &adapter->fcoe; | 171 | fcoe = &adapter->fcoe; |
173 | if (!fcoe->pool) { | 172 | if (!fcoe->pool) { |
174 | DPRINTK(DRV, WARNING, "xid=0x%x no ddp pool for fcoe\n", xid); | 173 | e_warn("xid=0x%x no ddp pool for fcoe\n", xid); |
175 | return 0; | 174 | return 0; |
176 | } | 175 | } |
177 | 176 | ||
178 | ddp = &fcoe->ddp[xid]; | 177 | ddp = &fcoe->ddp[xid]; |
179 | if (ddp->sgl) { | 178 | if (ddp->sgl) { |
180 | DPRINTK(DRV, ERR, "xid 0x%x w/ non-null sgl=%p nents=%d\n", | 179 | e_err("xid 0x%x w/ non-null sgl=%p nents=%d\n", |
181 | xid, ddp->sgl, ddp->sgc); | 180 | xid, ddp->sgl, ddp->sgc); |
182 | return 0; | 181 | return 0; |
183 | } | 182 | } |
184 | ixgbe_fcoe_clear_ddp(ddp); | 183 | ixgbe_fcoe_clear_ddp(ddp); |
@@ -186,14 +185,14 @@ int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid, | |||
186 | /* setup dma from scsi command sgl */ | 185 | /* setup dma from scsi command sgl */ |
187 | dmacount = pci_map_sg(adapter->pdev, sgl, sgc, DMA_FROM_DEVICE); | 186 | dmacount = pci_map_sg(adapter->pdev, sgl, sgc, DMA_FROM_DEVICE); |
188 | if (dmacount == 0) { | 187 | if (dmacount == 0) { |
189 | DPRINTK(DRV, ERR, "xid 0x%x DMA map error\n", xid); | 188 | e_err("xid 0x%x DMA map error\n", xid); |
190 | return 0; | 189 | return 0; |
191 | } | 190 | } |
192 | 191 | ||
193 | /* alloc the udl from our ddp pool */ | 192 | /* alloc the udl from our ddp pool */ |
194 | ddp->udl = pci_pool_alloc(fcoe->pool, GFP_KERNEL, &ddp->udp); | 193 | ddp->udl = pci_pool_alloc(fcoe->pool, GFP_KERNEL, &ddp->udp); |
195 | if (!ddp->udl) { | 194 | if (!ddp->udl) { |
196 | DPRINTK(DRV, ERR, "failed allocated ddp context\n"); | 195 | e_err("failed allocated ddp context\n"); |
197 | goto out_noddp_unmap; | 196 | goto out_noddp_unmap; |
198 | } | 197 | } |
199 | ddp->sgl = sgl; | 198 | ddp->sgl = sgl; |
@@ -206,10 +205,9 @@ int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid, | |||
206 | while (len) { | 205 | while (len) { |
207 | /* max number of buffers allowed in one DDP context */ | 206 | /* max number of buffers allowed in one DDP context */ |
208 | if (j >= IXGBE_BUFFCNT_MAX) { | 207 | if (j >= IXGBE_BUFFCNT_MAX) { |
209 | netif_err(adapter, drv, adapter->netdev, | 208 | e_err("xid=%x:%d,%d,%d:addr=%llx " |
210 | "xid=%x:%d,%d,%d:addr=%llx " | 209 | "not enough descriptors\n", |
211 | "not enough descriptors\n", | 210 | xid, i, j, dmacount, (u64)addr); |
212 | xid, i, j, dmacount, (u64)addr); | ||
213 | goto out_noddp_free; | 211 | goto out_noddp_free; |
214 | } | 212 | } |
215 | 213 | ||
@@ -387,8 +385,8 @@ int ixgbe_fso(struct ixgbe_adapter *adapter, | |||
387 | struct fc_frame_header *fh; | 385 | struct fc_frame_header *fh; |
388 | 386 | ||
389 | if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_type != SKB_GSO_FCOE)) { | 387 | if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_type != SKB_GSO_FCOE)) { |
390 | DPRINTK(DRV, ERR, "Wrong gso type %d:expecting SKB_GSO_FCOE\n", | 388 | e_err("Wrong gso type %d:expecting SKB_GSO_FCOE\n", |
391 | skb_shinfo(skb)->gso_type); | 389 | skb_shinfo(skb)->gso_type); |
392 | return -EINVAL; | 390 | return -EINVAL; |
393 | } | 391 | } |
394 | 392 | ||
@@ -414,7 +412,7 @@ int ixgbe_fso(struct ixgbe_adapter *adapter, | |||
414 | fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_SOF; | 412 | fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_SOF; |
415 | break; | 413 | break; |
416 | default: | 414 | default: |
417 | DPRINTK(DRV, WARNING, "unknown sof = 0x%x\n", sof); | 415 | e_warn("unknown sof = 0x%x\n", sof); |
418 | return -EINVAL; | 416 | return -EINVAL; |
419 | } | 417 | } |
420 | 418 | ||
@@ -441,7 +439,7 @@ int ixgbe_fso(struct ixgbe_adapter *adapter, | |||
441 | fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_EOF_A; | 439 | fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_EOF_A; |
442 | break; | 440 | break; |
443 | default: | 441 | default: |
444 | DPRINTK(DRV, WARNING, "unknown eof = 0x%x\n", eof); | 442 | e_warn("unknown eof = 0x%x\n", eof); |
445 | return -EINVAL; | 443 | return -EINVAL; |
446 | } | 444 | } |
447 | 445 | ||
@@ -517,8 +515,7 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter) | |||
517 | adapter->pdev, IXGBE_FCPTR_MAX, | 515 | adapter->pdev, IXGBE_FCPTR_MAX, |
518 | IXGBE_FCPTR_ALIGN, PAGE_SIZE); | 516 | IXGBE_FCPTR_ALIGN, PAGE_SIZE); |
519 | if (!fcoe->pool) | 517 | if (!fcoe->pool) |
520 | DPRINTK(DRV, ERR, | 518 | e_err("failed to allocated FCoE DDP pool\n"); |
521 | "failed to allocated FCoE DDP pool\n"); | ||
522 | 519 | ||
523 | spin_lock_init(&fcoe->lock); | 520 | spin_lock_init(&fcoe->lock); |
524 | } | 521 | } |
@@ -614,7 +611,7 @@ int ixgbe_fcoe_enable(struct net_device *netdev) | |||
614 | if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) | 611 | if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) |
615 | goto out_enable; | 612 | goto out_enable; |
616 | 613 | ||
617 | DPRINTK(DRV, INFO, "Enabling FCoE offload features.\n"); | 614 | e_info("Enabling FCoE offload features.\n"); |
618 | if (netif_running(netdev)) | 615 | if (netif_running(netdev)) |
619 | netdev->netdev_ops->ndo_stop(netdev); | 616 | netdev->netdev_ops->ndo_stop(netdev); |
620 | 617 | ||
@@ -660,7 +657,7 @@ int ixgbe_fcoe_disable(struct net_device *netdev) | |||
660 | if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) | 657 | if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) |
661 | goto out_disable; | 658 | goto out_disable; |
662 | 659 | ||
663 | DPRINTK(DRV, INFO, "Disabling FCoE offload features.\n"); | 660 | e_info("Disabling FCoE offload features.\n"); |
664 | if (netif_running(netdev)) | 661 | if (netif_running(netdev)) |
665 | netdev->netdev_ops->ndo_stop(netdev); | 662 | netdev->netdev_ops->ndo_stop(netdev); |
666 | 663 | ||
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c index b2af2f67f604..6b483d352f23 100644 --- a/drivers/net/ixgbe/ixgbe_main.c +++ b/drivers/net/ixgbe/ixgbe_main.c | |||
@@ -696,19 +696,19 @@ static inline bool ixgbe_check_tx_hang(struct ixgbe_adapter *adapter, | |||
696 | /* detected Tx unit hang */ | 696 | /* detected Tx unit hang */ |
697 | union ixgbe_adv_tx_desc *tx_desc; | 697 | union ixgbe_adv_tx_desc *tx_desc; |
698 | tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop); | 698 | tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop); |
699 | DPRINTK(DRV, ERR, "Detected Tx Unit Hang\n" | 699 | e_err("Detected Tx Unit Hang\n" |
700 | " Tx Queue <%d>\n" | 700 | " Tx Queue <%d>\n" |
701 | " TDH, TDT <%x>, <%x>\n" | 701 | " TDH, TDT <%x>, <%x>\n" |
702 | " next_to_use <%x>\n" | 702 | " next_to_use <%x>\n" |
703 | " next_to_clean <%x>\n" | 703 | " next_to_clean <%x>\n" |
704 | "tx_buffer_info[next_to_clean]\n" | 704 | "tx_buffer_info[next_to_clean]\n" |
705 | " time_stamp <%lx>\n" | 705 | " time_stamp <%lx>\n" |
706 | " jiffies <%lx>\n", | 706 | " jiffies <%lx>\n", |
707 | tx_ring->queue_index, | 707 | tx_ring->queue_index, |
708 | IXGBE_READ_REG(hw, tx_ring->head), | 708 | IXGBE_READ_REG(hw, tx_ring->head), |
709 | IXGBE_READ_REG(hw, tx_ring->tail), | 709 | IXGBE_READ_REG(hw, tx_ring->tail), |
710 | tx_ring->next_to_use, eop, | 710 | tx_ring->next_to_use, eop, |
711 | tx_ring->tx_buffer_info[eop].time_stamp, jiffies); | 711 | tx_ring->tx_buffer_info[eop].time_stamp, jiffies); |
712 | return true; | 712 | return true; |
713 | } | 713 | } |
714 | 714 | ||
@@ -812,9 +812,8 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector, | |||
812 | if (adapter->detect_tx_hung) { | 812 | if (adapter->detect_tx_hung) { |
813 | if (ixgbe_check_tx_hang(adapter, tx_ring, i)) { | 813 | if (ixgbe_check_tx_hang(adapter, tx_ring, i)) { |
814 | /* schedule immediate reset if we believe we hung */ | 814 | /* schedule immediate reset if we believe we hung */ |
815 | DPRINTK(PROBE, INFO, | 815 | e_info("tx hang %d detected, resetting adapter\n", |
816 | "tx hang %d detected, resetting adapter\n", | 816 | adapter->tx_timeout_count + 1); |
817 | adapter->tx_timeout_count + 1); | ||
818 | ixgbe_tx_timeout(adapter->netdev); | 817 | ixgbe_tx_timeout(adapter->netdev); |
819 | } | 818 | } |
820 | } | 819 | } |
@@ -1653,10 +1652,10 @@ static void ixgbe_check_overtemp_task(struct work_struct *work) | |||
1653 | return; | 1652 | return; |
1654 | break; | 1653 | break; |
1655 | } | 1654 | } |
1656 | DPRINTK(DRV, ERR, "Network adapter has been stopped because it " | 1655 | e_crit("Network adapter has been stopped because it " |
1657 | "has over heated. Restart the computer. If the problem " | 1656 | "has over heated. Restart the computer. If the problem " |
1658 | "persists, power off the system and replace the " | 1657 | "persists, power off the system and replace the " |
1659 | "adapter\n"); | 1658 | "adapter\n"); |
1660 | /* write to clear the interrupt */ | 1659 | /* write to clear the interrupt */ |
1661 | IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP0); | 1660 | IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP0); |
1662 | } | 1661 | } |
@@ -1668,7 +1667,7 @@ static void ixgbe_check_fan_failure(struct ixgbe_adapter *adapter, u32 eicr) | |||
1668 | 1667 | ||
1669 | if ((adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) && | 1668 | if ((adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) && |
1670 | (eicr & IXGBE_EICR_GPI_SDP1)) { | 1669 | (eicr & IXGBE_EICR_GPI_SDP1)) { |
1671 | DPRINTK(PROBE, CRIT, "Fan has stopped, replace the adapter\n"); | 1670 | e_crit("Fan has stopped, replace the adapter\n"); |
1672 | /* write to clear the interrupt */ | 1671 | /* write to clear the interrupt */ |
1673 | IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1); | 1672 | IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1); |
1674 | } | 1673 | } |
@@ -2154,9 +2153,8 @@ static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter) | |||
2154 | handler, 0, adapter->name[vector], | 2153 | handler, 0, adapter->name[vector], |
2155 | adapter->q_vector[vector]); | 2154 | adapter->q_vector[vector]); |
2156 | if (err) { | 2155 | if (err) { |
2157 | DPRINTK(PROBE, ERR, | 2156 | e_err("request_irq failed for MSIX interrupt: " |
2158 | "request_irq failed for MSIX interrupt " | 2157 | "Error: %d\n", err); |
2159 | "Error: %d\n", err); | ||
2160 | goto free_queue_irqs; | 2158 | goto free_queue_irqs; |
2161 | } | 2159 | } |
2162 | } | 2160 | } |
@@ -2165,8 +2163,7 @@ static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter) | |||
2165 | err = request_irq(adapter->msix_entries[vector].vector, | 2163 | err = request_irq(adapter->msix_entries[vector].vector, |
2166 | ixgbe_msix_lsc, 0, adapter->name[vector], netdev); | 2164 | ixgbe_msix_lsc, 0, adapter->name[vector], netdev); |
2167 | if (err) { | 2165 | if (err) { |
2168 | DPRINTK(PROBE, ERR, | 2166 | e_err("request_irq for msix_lsc failed: %d\n", err); |
2169 | "request_irq for msix_lsc failed: %d\n", err); | ||
2170 | goto free_queue_irqs; | 2167 | goto free_queue_irqs; |
2171 | } | 2168 | } |
2172 | 2169 | ||
@@ -2352,7 +2349,7 @@ static int ixgbe_request_irq(struct ixgbe_adapter *adapter) | |||
2352 | } | 2349 | } |
2353 | 2350 | ||
2354 | if (err) | 2351 | if (err) |
2355 | DPRINTK(PROBE, ERR, "request_irq failed, Error %d\n", err); | 2352 | e_err("request_irq failed, Error %d\n", err); |
2356 | 2353 | ||
2357 | return err; | 2354 | return err; |
2358 | } | 2355 | } |
@@ -2423,7 +2420,7 @@ static void ixgbe_configure_msi_and_legacy(struct ixgbe_adapter *adapter) | |||
2423 | map_vector_to_rxq(adapter, 0, 0); | 2420 | map_vector_to_rxq(adapter, 0, 0); |
2424 | map_vector_to_txq(adapter, 0, 0); | 2421 | map_vector_to_txq(adapter, 0, 0); |
2425 | 2422 | ||
2426 | DPRINTK(HW, INFO, "Legacy interrupt IVAR setup done\n"); | 2423 | e_info("Legacy interrupt IVAR setup done\n"); |
2427 | } | 2424 | } |
2428 | 2425 | ||
2429 | /** | 2426 | /** |
@@ -3257,8 +3254,8 @@ static inline void ixgbe_rx_desc_queue_enable(struct ixgbe_adapter *adapter, | |||
3257 | msleep(1); | 3254 | msleep(1); |
3258 | } | 3255 | } |
3259 | if (k >= IXGBE_MAX_RX_DESC_POLL) { | 3256 | if (k >= IXGBE_MAX_RX_DESC_POLL) { |
3260 | DPRINTK(DRV, ERR, "RXDCTL.ENABLE on Rx queue %d " | 3257 | e_err("RXDCTL.ENABLE on Rx queue %d not set within " |
3261 | "not set within the polling period\n", rxr); | 3258 | "the polling period\n", rxr); |
3262 | } | 3259 | } |
3263 | ixgbe_release_rx_desc(&adapter->hw, adapter->rx_ring[rxr], | 3260 | ixgbe_release_rx_desc(&adapter->hw, adapter->rx_ring[rxr], |
3264 | (adapter->rx_ring[rxr]->count - 1)); | 3261 | (adapter->rx_ring[rxr]->count - 1)); |
@@ -3387,8 +3384,7 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter) | |||
3387 | } while (--wait_loop && | 3384 | } while (--wait_loop && |
3388 | !(txdctl & IXGBE_TXDCTL_ENABLE)); | 3385 | !(txdctl & IXGBE_TXDCTL_ENABLE)); |
3389 | if (!wait_loop) | 3386 | if (!wait_loop) |
3390 | DPRINTK(DRV, ERR, "Could not enable " | 3387 | e_err("Could not enable Tx Queue %d\n", j); |
3391 | "Tx Queue %d\n", j); | ||
3392 | } | 3388 | } |
3393 | } | 3389 | } |
3394 | 3390 | ||
@@ -3436,8 +3432,7 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter) | |||
3436 | if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) { | 3432 | if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) { |
3437 | u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); | 3433 | u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); |
3438 | if (esdp & IXGBE_ESDP_SDP1) | 3434 | if (esdp & IXGBE_ESDP_SDP1) |
3439 | DPRINTK(DRV, CRIT, | 3435 | e_crit("Fan has stopped, replace the adapter\n"); |
3440 | "Fan has stopped, replace the adapter\n"); | ||
3441 | } | 3436 | } |
3442 | 3437 | ||
3443 | /* | 3438 | /* |
@@ -3466,7 +3461,7 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter) | |||
3466 | } else { | 3461 | } else { |
3467 | err = ixgbe_non_sfp_link_config(hw); | 3462 | err = ixgbe_non_sfp_link_config(hw); |
3468 | if (err) | 3463 | if (err) |
3469 | DPRINTK(PROBE, ERR, "link_config FAILED %d\n", err); | 3464 | e_err("link_config FAILED %d\n", err); |
3470 | } | 3465 | } |
3471 | 3466 | ||
3472 | for (i = 0; i < adapter->num_tx_queues; i++) | 3467 | for (i = 0; i < adapter->num_tx_queues; i++) |
@@ -3527,19 +3522,19 @@ void ixgbe_reset(struct ixgbe_adapter *adapter) | |||
3527 | case IXGBE_ERR_SFP_NOT_PRESENT: | 3522 | case IXGBE_ERR_SFP_NOT_PRESENT: |
3528 | break; | 3523 | break; |
3529 | case IXGBE_ERR_MASTER_REQUESTS_PENDING: | 3524 | case IXGBE_ERR_MASTER_REQUESTS_PENDING: |
3530 | dev_err(&adapter->pdev->dev, "master disable timed out\n"); | 3525 | e_dev_err("master disable timed out\n"); |
3531 | break; | 3526 | break; |
3532 | case IXGBE_ERR_EEPROM_VERSION: | 3527 | case IXGBE_ERR_EEPROM_VERSION: |
3533 | /* We are running on a pre-production device, log a warning */ | 3528 | /* We are running on a pre-production device, log a warning */ |
3534 | dev_warn(&adapter->pdev->dev, "This device is a pre-production " | 3529 | e_dev_warn("This device is a pre-production adapter/LOM. " |
3535 | "adapter/LOM. Please be aware there may be issues " | 3530 | "Please be aware there may be issuesassociated with " |
3536 | "associated with your hardware. If you are " | 3531 | "your hardware. If you are experiencing problems " |
3537 | "experiencing problems please contact your Intel or " | 3532 | "please contact your Intel or hardware " |
3538 | "hardware representative who provided you with this " | 3533 | "representative who provided you with this " |
3539 | "hardware.\n"); | 3534 | "hardware.\n"); |
3540 | break; | 3535 | break; |
3541 | default: | 3536 | default: |
3542 | dev_err(&adapter->pdev->dev, "Hardware Error: %d\n", err); | 3537 | e_dev_err("Hardware Error: %d\n", err); |
3543 | } | 3538 | } |
3544 | 3539 | ||
3545 | /* reprogram the RAR[0] in case user changed it. */ | 3540 | /* reprogram the RAR[0] in case user changed it. */ |
@@ -3920,12 +3915,12 @@ static inline bool ixgbe_set_fcoe_queues(struct ixgbe_adapter *adapter) | |||
3920 | adapter->num_tx_queues = 1; | 3915 | adapter->num_tx_queues = 1; |
3921 | #ifdef CONFIG_IXGBE_DCB | 3916 | #ifdef CONFIG_IXGBE_DCB |
3922 | if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { | 3917 | if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { |
3923 | DPRINTK(PROBE, INFO, "FCoE enabled with DCB\n"); | 3918 | e_info("FCoE enabled with DCB\n"); |
3924 | ixgbe_set_dcb_queues(adapter); | 3919 | ixgbe_set_dcb_queues(adapter); |
3925 | } | 3920 | } |
3926 | #endif | 3921 | #endif |
3927 | if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) { | 3922 | if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) { |
3928 | DPRINTK(PROBE, INFO, "FCoE enabled with RSS\n"); | 3923 | e_info("FCoE enabled with RSS\n"); |
3929 | if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) || | 3924 | if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) || |
3930 | (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) | 3925 | (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) |
3931 | ixgbe_set_fdir_queues(adapter); | 3926 | ixgbe_set_fdir_queues(adapter); |
@@ -4038,7 +4033,8 @@ static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter, | |||
4038 | * This just means we'll go with either a single MSI | 4033 | * This just means we'll go with either a single MSI |
4039 | * vector or fall back to legacy interrupts. | 4034 | * vector or fall back to legacy interrupts. |
4040 | */ | 4035 | */ |
4041 | DPRINTK(HW, DEBUG, "Unable to allocate MSI-X interrupts\n"); | 4036 | netif_printk(adapter, hw, KERN_DEBUG, adapter->netdev, |
4037 | "Unable to allocate MSI-X interrupts\n"); | ||
4042 | adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED; | 4038 | adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED; |
4043 | kfree(adapter->msix_entries); | 4039 | kfree(adapter->msix_entries); |
4044 | adapter->msix_entries = NULL; | 4040 | adapter->msix_entries = NULL; |
@@ -4435,8 +4431,9 @@ static int ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter) | |||
4435 | if (!err) { | 4431 | if (!err) { |
4436 | adapter->flags |= IXGBE_FLAG_MSI_ENABLED; | 4432 | adapter->flags |= IXGBE_FLAG_MSI_ENABLED; |
4437 | } else { | 4433 | } else { |
4438 | DPRINTK(HW, DEBUG, "Unable to allocate MSI interrupt, " | 4434 | netif_printk(adapter, hw, KERN_DEBUG, adapter->netdev, |
4439 | "falling back to legacy. Error: %d\n", err); | 4435 | "Unable to allocate MSI interrupt, " |
4436 | "falling back to legacy. Error: %d\n", err); | ||
4440 | /* reset err */ | 4437 | /* reset err */ |
4441 | err = 0; | 4438 | err = 0; |
4442 | } | 4439 | } |
@@ -4557,27 +4554,25 @@ int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter) | |||
4557 | 4554 | ||
4558 | err = ixgbe_set_interrupt_capability(adapter); | 4555 | err = ixgbe_set_interrupt_capability(adapter); |
4559 | if (err) { | 4556 | if (err) { |
4560 | DPRINTK(PROBE, ERR, "Unable to setup interrupt capabilities\n"); | 4557 | e_dev_err("Unable to setup interrupt capabilities\n"); |
4561 | goto err_set_interrupt; | 4558 | goto err_set_interrupt; |
4562 | } | 4559 | } |
4563 | 4560 | ||
4564 | err = ixgbe_alloc_q_vectors(adapter); | 4561 | err = ixgbe_alloc_q_vectors(adapter); |
4565 | if (err) { | 4562 | if (err) { |
4566 | DPRINTK(PROBE, ERR, "Unable to allocate memory for queue " | 4563 | e_dev_err("Unable to allocate memory for queue vectors\n"); |
4567 | "vectors\n"); | ||
4568 | goto err_alloc_q_vectors; | 4564 | goto err_alloc_q_vectors; |
4569 | } | 4565 | } |
4570 | 4566 | ||
4571 | err = ixgbe_alloc_queues(adapter); | 4567 | err = ixgbe_alloc_queues(adapter); |
4572 | if (err) { | 4568 | if (err) { |
4573 | DPRINTK(PROBE, ERR, "Unable to allocate memory for queues\n"); | 4569 | e_dev_err("Unable to allocate memory for queues\n"); |
4574 | goto err_alloc_queues; | 4570 | goto err_alloc_queues; |
4575 | } | 4571 | } |
4576 | 4572 | ||
4577 | DPRINTK(DRV, INFO, "Multiqueue %s: Rx Queue count = %u, " | 4573 | e_dev_info("Multiqueue %s: Rx Queue count = %u, Tx Queue count = %u\n", |
4578 | "Tx Queue count = %u\n", | 4574 | (adapter->num_rx_queues > 1) ? "Enabled" : "Disabled", |
4579 | (adapter->num_rx_queues > 1) ? "Enabled" : | 4575 | adapter->num_rx_queues, adapter->num_tx_queues); |
4580 | "Disabled", adapter->num_rx_queues, adapter->num_tx_queues); | ||
4581 | 4576 | ||
4582 | set_bit(__IXGBE_DOWN, &adapter->state); | 4577 | set_bit(__IXGBE_DOWN, &adapter->state); |
4583 | 4578 | ||
@@ -4648,15 +4643,13 @@ static void ixgbe_sfp_task(struct work_struct *work) | |||
4648 | goto reschedule; | 4643 | goto reschedule; |
4649 | ret = hw->phy.ops.reset(hw); | 4644 | ret = hw->phy.ops.reset(hw); |
4650 | if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) { | 4645 | if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) { |
4651 | dev_err(&adapter->pdev->dev, "failed to initialize " | 4646 | e_dev_err("failed to initialize because an unsupported " |
4652 | "because an unsupported SFP+ module type " | 4647 | "SFP+ module type was detected.\n"); |
4653 | "was detected.\n" | 4648 | e_dev_err("Reload the driver after installing a " |
4654 | "Reload the driver after installing a " | 4649 | "supported module.\n"); |
4655 | "supported module.\n"); | ||
4656 | unregister_netdev(adapter->netdev); | 4650 | unregister_netdev(adapter->netdev); |
4657 | } else { | 4651 | } else { |
4658 | DPRINTK(PROBE, INFO, "detected SFP+: %d\n", | 4652 | e_info("detected SFP+: %d\n", hw->phy.sfp_type); |
4659 | hw->phy.sfp_type); | ||
4660 | } | 4653 | } |
4661 | /* don't need this routine any more */ | 4654 | /* don't need this routine any more */ |
4662 | clear_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state); | 4655 | clear_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state); |
@@ -4783,7 +4776,7 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter) | |||
4783 | 4776 | ||
4784 | /* initialize eeprom parameters */ | 4777 | /* initialize eeprom parameters */ |
4785 | if (ixgbe_init_eeprom_params_generic(hw)) { | 4778 | if (ixgbe_init_eeprom_params_generic(hw)) { |
4786 | dev_err(&pdev->dev, "EEPROM initialization failed\n"); | 4779 | e_dev_err("EEPROM initialization failed\n"); |
4787 | return -EIO; | 4780 | return -EIO; |
4788 | } | 4781 | } |
4789 | 4782 | ||
@@ -4836,8 +4829,7 @@ int ixgbe_setup_tx_resources(struct ixgbe_adapter *adapter, | |||
4836 | err: | 4829 | err: |
4837 | vfree(tx_ring->tx_buffer_info); | 4830 | vfree(tx_ring->tx_buffer_info); |
4838 | tx_ring->tx_buffer_info = NULL; | 4831 | tx_ring->tx_buffer_info = NULL; |
4839 | DPRINTK(PROBE, ERR, "Unable to allocate memory for the transmit " | 4832 | e_err("Unable to allocate memory for the Tx descriptor ring\n"); |
4840 | "descriptor ring\n"); | ||
4841 | return -ENOMEM; | 4833 | return -ENOMEM; |
4842 | } | 4834 | } |
4843 | 4835 | ||
@@ -4859,7 +4851,7 @@ static int ixgbe_setup_all_tx_resources(struct ixgbe_adapter *adapter) | |||
4859 | err = ixgbe_setup_tx_resources(adapter, adapter->tx_ring[i]); | 4851 | err = ixgbe_setup_tx_resources(adapter, adapter->tx_ring[i]); |
4860 | if (!err) | 4852 | if (!err) |
4861 | continue; | 4853 | continue; |
4862 | DPRINTK(PROBE, ERR, "Allocation for Tx Queue %u failed\n", i); | 4854 | e_err("Allocation for Tx Queue %u failed\n", i); |
4863 | break; | 4855 | break; |
4864 | } | 4856 | } |
4865 | 4857 | ||
@@ -4884,8 +4876,7 @@ int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter, | |||
4884 | if (!rx_ring->rx_buffer_info) | 4876 | if (!rx_ring->rx_buffer_info) |
4885 | rx_ring->rx_buffer_info = vmalloc(size); | 4877 | rx_ring->rx_buffer_info = vmalloc(size); |
4886 | if (!rx_ring->rx_buffer_info) { | 4878 | if (!rx_ring->rx_buffer_info) { |
4887 | DPRINTK(PROBE, ERR, | 4879 | e_err("vmalloc allocation failed for the Rx desc ring\n"); |
4888 | "vmalloc allocation failed for the rx desc ring\n"); | ||
4889 | goto alloc_failed; | 4880 | goto alloc_failed; |
4890 | } | 4881 | } |
4891 | memset(rx_ring->rx_buffer_info, 0, size); | 4882 | memset(rx_ring->rx_buffer_info, 0, size); |
@@ -4898,8 +4889,7 @@ int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter, | |||
4898 | &rx_ring->dma, GFP_KERNEL); | 4889 | &rx_ring->dma, GFP_KERNEL); |
4899 | 4890 | ||
4900 | if (!rx_ring->desc) { | 4891 | if (!rx_ring->desc) { |
4901 | DPRINTK(PROBE, ERR, | 4892 | e_err("Memory allocation failed for the Rx desc ring\n"); |
4902 | "Memory allocation failed for the rx desc ring\n"); | ||
4903 | vfree(rx_ring->rx_buffer_info); | 4893 | vfree(rx_ring->rx_buffer_info); |
4904 | goto alloc_failed; | 4894 | goto alloc_failed; |
4905 | } | 4895 | } |
@@ -4932,7 +4922,7 @@ static int ixgbe_setup_all_rx_resources(struct ixgbe_adapter *adapter) | |||
4932 | err = ixgbe_setup_rx_resources(adapter, adapter->rx_ring[i]); | 4922 | err = ixgbe_setup_rx_resources(adapter, adapter->rx_ring[i]); |
4933 | if (!err) | 4923 | if (!err) |
4934 | continue; | 4924 | continue; |
4935 | DPRINTK(PROBE, ERR, "Allocation for Rx Queue %u failed\n", i); | 4925 | e_err("Allocation for Rx Queue %u failed\n", i); |
4936 | break; | 4926 | break; |
4937 | } | 4927 | } |
4938 | 4928 | ||
@@ -5031,8 +5021,7 @@ static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu) | |||
5031 | if ((new_mtu < 68) || (max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE)) | 5021 | if ((new_mtu < 68) || (max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE)) |
5032 | return -EINVAL; | 5022 | return -EINVAL; |
5033 | 5023 | ||
5034 | DPRINTK(PROBE, INFO, "changing MTU from %d to %d\n", | 5024 | e_info("changing MTU from %d to %d\n", netdev->mtu, new_mtu); |
5035 | netdev->mtu, new_mtu); | ||
5036 | /* must set new MTU before calling down or up */ | 5025 | /* must set new MTU before calling down or up */ |
5037 | netdev->mtu = new_mtu; | 5026 | netdev->mtu = new_mtu; |
5038 | 5027 | ||
@@ -5145,8 +5134,7 @@ static int ixgbe_resume(struct pci_dev *pdev) | |||
5145 | 5134 | ||
5146 | err = pci_enable_device_mem(pdev); | 5135 | err = pci_enable_device_mem(pdev); |
5147 | if (err) { | 5136 | if (err) { |
5148 | printk(KERN_ERR "ixgbe: Cannot enable PCI device from " | 5137 | e_dev_err("Cannot enable PCI device from suspend\n"); |
5149 | "suspend\n"); | ||
5150 | return err; | 5138 | return err; |
5151 | } | 5139 | } |
5152 | pci_set_master(pdev); | 5140 | pci_set_master(pdev); |
@@ -5155,8 +5143,7 @@ static int ixgbe_resume(struct pci_dev *pdev) | |||
5155 | 5143 | ||
5156 | err = ixgbe_init_interrupt_scheme(adapter); | 5144 | err = ixgbe_init_interrupt_scheme(adapter); |
5157 | if (err) { | 5145 | if (err) { |
5158 | printk(KERN_ERR "ixgbe: Cannot initialize interrupts for " | 5146 | e_dev_err("Cannot initialize interrupts for device\n"); |
5159 | "device\n"); | ||
5160 | return err; | 5147 | return err; |
5161 | } | 5148 | } |
5162 | 5149 | ||
@@ -5512,10 +5499,10 @@ static void ixgbe_sfp_config_module_task(struct work_struct *work) | |||
5512 | err = hw->phy.ops.identify_sfp(hw); | 5499 | err = hw->phy.ops.identify_sfp(hw); |
5513 | 5500 | ||
5514 | if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) { | 5501 | if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) { |
5515 | dev_err(&adapter->pdev->dev, "failed to initialize because " | 5502 | e_dev_err("failed to initialize because an unsupported SFP+ " |
5516 | "an unsupported SFP+ module type was detected.\n" | 5503 | "module type was detected.\n"); |
5517 | "Reload the driver after installing a supported " | 5504 | e_dev_err("Reload the driver after installing a supported " |
5518 | "module.\n"); | 5505 | "module.\n"); |
5519 | unregister_netdev(adapter->netdev); | 5506 | unregister_netdev(adapter->netdev); |
5520 | return; | 5507 | return; |
5521 | } | 5508 | } |
@@ -5544,8 +5531,8 @@ static void ixgbe_fdir_reinit_task(struct work_struct *work) | |||
5544 | set_bit(__IXGBE_FDIR_INIT_DONE, | 5531 | set_bit(__IXGBE_FDIR_INIT_DONE, |
5545 | &(adapter->tx_ring[i]->reinit_state)); | 5532 | &(adapter->tx_ring[i]->reinit_state)); |
5546 | } else { | 5533 | } else { |
5547 | DPRINTK(PROBE, ERR, "failed to finish FDIR re-initialization, " | 5534 | e_err("failed to finish FDIR re-initialization, " |
5548 | "ignored adding FDIR ATR filters\n"); | 5535 | "ignored adding FDIR ATR filters\n"); |
5549 | } | 5536 | } |
5550 | /* Done FDIR Re-initialization, enable transmits */ | 5537 | /* Done FDIR Re-initialization, enable transmits */ |
5551 | netif_tx_start_all_queues(adapter->netdev); | 5538 | netif_tx_start_all_queues(adapter->netdev); |
@@ -5616,16 +5603,14 @@ static void ixgbe_watchdog_task(struct work_struct *work) | |||
5616 | flow_tx = !!(rmcs & IXGBE_RMCS_TFCE_802_3X); | 5603 | flow_tx = !!(rmcs & IXGBE_RMCS_TFCE_802_3X); |
5617 | } | 5604 | } |
5618 | 5605 | ||
5619 | printk(KERN_INFO "ixgbe: %s NIC Link is Up %s, " | 5606 | e_info("NIC Link is Up %s, Flow Control: %s\n", |
5620 | "Flow Control: %s\n", | ||
5621 | netdev->name, | ||
5622 | (link_speed == IXGBE_LINK_SPEED_10GB_FULL ? | 5607 | (link_speed == IXGBE_LINK_SPEED_10GB_FULL ? |
5623 | "10 Gbps" : | 5608 | "10 Gbps" : |
5624 | (link_speed == IXGBE_LINK_SPEED_1GB_FULL ? | 5609 | (link_speed == IXGBE_LINK_SPEED_1GB_FULL ? |
5625 | "1 Gbps" : "unknown speed")), | 5610 | "1 Gbps" : "unknown speed")), |
5626 | ((flow_rx && flow_tx) ? "RX/TX" : | 5611 | ((flow_rx && flow_tx) ? "RX/TX" : |
5627 | (flow_rx ? "RX" : | 5612 | (flow_rx ? "RX" : |
5628 | (flow_tx ? "TX" : "None")))); | 5613 | (flow_tx ? "TX" : "None")))); |
5629 | 5614 | ||
5630 | netif_carrier_on(netdev); | 5615 | netif_carrier_on(netdev); |
5631 | } else { | 5616 | } else { |
@@ -5636,8 +5621,7 @@ static void ixgbe_watchdog_task(struct work_struct *work) | |||
5636 | adapter->link_up = false; | 5621 | adapter->link_up = false; |
5637 | adapter->link_speed = 0; | 5622 | adapter->link_speed = 0; |
5638 | if (netif_carrier_ok(netdev)) { | 5623 | if (netif_carrier_ok(netdev)) { |
5639 | printk(KERN_INFO "ixgbe: %s NIC Link is Down\n", | 5624 | e_info("NIC Link is Down\n"); |
5640 | netdev->name); | ||
5641 | netif_carrier_off(netdev); | 5625 | netif_carrier_off(netdev); |
5642 | } | 5626 | } |
5643 | } | 5627 | } |
@@ -5813,9 +5797,8 @@ static bool ixgbe_tx_csum(struct ixgbe_adapter *adapter, | |||
5813 | break; | 5797 | break; |
5814 | default: | 5798 | default: |
5815 | if (unlikely(net_ratelimit())) { | 5799 | if (unlikely(net_ratelimit())) { |
5816 | DPRINTK(PROBE, WARNING, | 5800 | e_warn("partial checksum but " |
5817 | "partial checksum but proto=%x!\n", | 5801 | "proto=%x!\n", skb->protocol); |
5818 | skb->protocol); | ||
5819 | } | 5802 | } |
5820 | break; | 5803 | break; |
5821 | } | 5804 | } |
@@ -5926,7 +5909,7 @@ static int ixgbe_tx_map(struct ixgbe_adapter *adapter, | |||
5926 | return count; | 5909 | return count; |
5927 | 5910 | ||
5928 | dma_error: | 5911 | dma_error: |
5929 | dev_err(&pdev->dev, "TX DMA map failed\n"); | 5912 | e_dev_err("TX DMA map failed\n"); |
5930 | 5913 | ||
5931 | /* clear timestamp and dma mappings for failed tx_buffer_info map */ | 5914 | /* clear timestamp and dma mappings for failed tx_buffer_info map */ |
5932 | tx_buffer_info->dma = 0; | 5915 | tx_buffer_info->dma = 0; |
@@ -6423,8 +6406,7 @@ static void __devinit ixgbe_probe_vf(struct ixgbe_adapter *adapter, | |||
6423 | adapter->flags |= IXGBE_FLAG_SRIOV_ENABLED; | 6406 | adapter->flags |= IXGBE_FLAG_SRIOV_ENABLED; |
6424 | err = pci_enable_sriov(adapter->pdev, adapter->num_vfs); | 6407 | err = pci_enable_sriov(adapter->pdev, adapter->num_vfs); |
6425 | if (err) { | 6408 | if (err) { |
6426 | DPRINTK(PROBE, ERR, | 6409 | e_err("Failed to enable PCI sriov: %d\n", err); |
6427 | "Failed to enable PCI sriov: %d\n", err); | ||
6428 | goto err_novfs; | 6410 | goto err_novfs; |
6429 | } | 6411 | } |
6430 | /* If call to enable VFs succeeded then allocate memory | 6412 | /* If call to enable VFs succeeded then allocate memory |
@@ -6448,9 +6430,8 @@ static void __devinit ixgbe_probe_vf(struct ixgbe_adapter *adapter, | |||
6448 | } | 6430 | } |
6449 | 6431 | ||
6450 | /* Oh oh */ | 6432 | /* Oh oh */ |
6451 | DPRINTK(PROBE, ERR, | 6433 | e_err("Unable to allocate memory for VF Data Storage - SRIOV " |
6452 | "Unable to allocate memory for VF " | 6434 | "disabled\n"); |
6453 | "Data Storage - SRIOV disabled\n"); | ||
6454 | pci_disable_sriov(adapter->pdev); | 6435 | pci_disable_sriov(adapter->pdev); |
6455 | 6436 | ||
6456 | err_novfs: | 6437 | err_novfs: |
@@ -6498,8 +6479,8 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, | |||
6498 | err = dma_set_coherent_mask(&pdev->dev, | 6479 | err = dma_set_coherent_mask(&pdev->dev, |
6499 | DMA_BIT_MASK(32)); | 6480 | DMA_BIT_MASK(32)); |
6500 | if (err) { | 6481 | if (err) { |
6501 | dev_err(&pdev->dev, "No usable DMA " | 6482 | e_dev_err("No usable DMA configuration, " |
6502 | "configuration, aborting\n"); | 6483 | "aborting\n"); |
6503 | goto err_dma; | 6484 | goto err_dma; |
6504 | } | 6485 | } |
6505 | } | 6486 | } |
@@ -6509,8 +6490,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, | |||
6509 | err = pci_request_selected_regions(pdev, pci_select_bars(pdev, | 6490 | err = pci_request_selected_regions(pdev, pci_select_bars(pdev, |
6510 | IORESOURCE_MEM), ixgbe_driver_name); | 6491 | IORESOURCE_MEM), ixgbe_driver_name); |
6511 | if (err) { | 6492 | if (err) { |
6512 | dev_err(&pdev->dev, | 6493 | e_dev_err("pci_request_selected_regions failed 0x%x\n", err); |
6513 | "pci_request_selected_regions failed 0x%x\n", err); | ||
6514 | goto err_pci_reg; | 6494 | goto err_pci_reg; |
6515 | } | 6495 | } |
6516 | 6496 | ||
@@ -6621,8 +6601,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, | |||
6621 | if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) { | 6601 | if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) { |
6622 | u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); | 6602 | u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); |
6623 | if (esdp & IXGBE_ESDP_SDP1) | 6603 | if (esdp & IXGBE_ESDP_SDP1) |
6624 | DPRINTK(PROBE, CRIT, | 6604 | e_crit("Fan has stopped, replace the adapter\n"); |
6625 | "Fan has stopped, replace the adapter\n"); | ||
6626 | } | 6605 | } |
6627 | 6606 | ||
6628 | /* reset_hw fills in the perm_addr as well */ | 6607 | /* reset_hw fills in the perm_addr as well */ |
@@ -6641,19 +6620,19 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, | |||
6641 | round_jiffies(jiffies + (2 * HZ))); | 6620 | round_jiffies(jiffies + (2 * HZ))); |
6642 | err = 0; | 6621 | err = 0; |
6643 | } else if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) { | 6622 | } else if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) { |
6644 | dev_err(&adapter->pdev->dev, "failed to initialize because " | 6623 | e_dev_err("failed to initialize because an unsupported SFP+ " |
6645 | "an unsupported SFP+ module type was detected.\n" | 6624 | "module type was detected.\n"); |
6646 | "Reload the driver after installing a supported " | 6625 | e_dev_err("Reload the driver after installing a supported " |
6647 | "module.\n"); | 6626 | "module.\n"); |
6648 | goto err_sw_init; | 6627 | goto err_sw_init; |
6649 | } else if (err) { | 6628 | } else if (err) { |
6650 | dev_err(&adapter->pdev->dev, "HW Init failed: %d\n", err); | 6629 | e_dev_err("HW Init failed: %d\n", err); |
6651 | goto err_sw_init; | 6630 | goto err_sw_init; |
6652 | } | 6631 | } |
6653 | 6632 | ||
6654 | ixgbe_probe_vf(adapter, ii); | 6633 | ixgbe_probe_vf(adapter, ii); |
6655 | 6634 | ||
6656 | netdev->features = NETIF_F_SG | | 6635 | netdev->features = NETIF_F_SG | |
6657 | NETIF_F_IP_CSUM | | 6636 | NETIF_F_IP_CSUM | |
6658 | NETIF_F_HW_VLAN_TX | | 6637 | NETIF_F_HW_VLAN_TX | |
6659 | NETIF_F_HW_VLAN_RX | | 6638 | NETIF_F_HW_VLAN_RX | |
@@ -6700,7 +6679,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, | |||
6700 | 6679 | ||
6701 | /* make sure the EEPROM is good */ | 6680 | /* make sure the EEPROM is good */ |
6702 | if (hw->eeprom.ops.validate_checksum(hw, NULL) < 0) { | 6681 | if (hw->eeprom.ops.validate_checksum(hw, NULL) < 0) { |
6703 | dev_err(&pdev->dev, "The EEPROM Checksum Is Not Valid\n"); | 6682 | e_dev_err("The EEPROM Checksum Is Not Valid\n"); |
6704 | err = -EIO; | 6683 | err = -EIO; |
6705 | goto err_eeprom; | 6684 | goto err_eeprom; |
6706 | } | 6685 | } |
@@ -6709,7 +6688,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, | |||
6709 | memcpy(netdev->perm_addr, hw->mac.perm_addr, netdev->addr_len); | 6688 | memcpy(netdev->perm_addr, hw->mac.perm_addr, netdev->addr_len); |
6710 | 6689 | ||
6711 | if (ixgbe_validate_mac_addr(netdev->perm_addr)) { | 6690 | if (ixgbe_validate_mac_addr(netdev->perm_addr)) { |
6712 | dev_err(&pdev->dev, "invalid MAC address\n"); | 6691 | e_dev_err("invalid MAC address\n"); |
6713 | err = -EIO; | 6692 | err = -EIO; |
6714 | goto err_eeprom; | 6693 | goto err_eeprom; |
6715 | } | 6694 | } |
@@ -6744,7 +6723,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, | |||
6744 | hw->mac.ops.get_bus_info(hw); | 6723 | hw->mac.ops.get_bus_info(hw); |
6745 | 6724 | ||
6746 | /* print bus type/speed/width info */ | 6725 | /* print bus type/speed/width info */ |
6747 | dev_info(&pdev->dev, "(PCI Express:%s:%s) %pM\n", | 6726 | e_dev_info("(PCI Express:%s:%s) %pM\n", |
6748 | ((hw->bus.speed == ixgbe_bus_speed_5000) ? "5.0Gb/s": | 6727 | ((hw->bus.speed == ixgbe_bus_speed_5000) ? "5.0Gb/s": |
6749 | (hw->bus.speed == ixgbe_bus_speed_2500) ? "2.5Gb/s":"Unknown"), | 6728 | (hw->bus.speed == ixgbe_bus_speed_2500) ? "2.5Gb/s":"Unknown"), |
6750 | ((hw->bus.width == ixgbe_bus_width_pcie_x8) ? "Width x8" : | 6729 | ((hw->bus.width == ixgbe_bus_width_pcie_x8) ? "Width x8" : |
@@ -6754,20 +6733,20 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, | |||
6754 | netdev->dev_addr); | 6733 | netdev->dev_addr); |
6755 | ixgbe_read_pba_num_generic(hw, &part_num); | 6734 | ixgbe_read_pba_num_generic(hw, &part_num); |
6756 | if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present) | 6735 | if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present) |
6757 | dev_info(&pdev->dev, "MAC: %d, PHY: %d, SFP+: %d, PBA No: %06x-%03x\n", | 6736 | e_dev_info("MAC: %d, PHY: %d, SFP+: %d, " |
6758 | hw->mac.type, hw->phy.type, hw->phy.sfp_type, | 6737 | "PBA No: %06x-%03x\n", |
6759 | (part_num >> 8), (part_num & 0xff)); | 6738 | hw->mac.type, hw->phy.type, hw->phy.sfp_type, |
6739 | (part_num >> 8), (part_num & 0xff)); | ||
6760 | else | 6740 | else |
6761 | dev_info(&pdev->dev, "MAC: %d, PHY: %d, PBA No: %06x-%03x\n", | 6741 | e_dev_info("MAC: %d, PHY: %d, PBA No: %06x-%03x\n", |
6762 | hw->mac.type, hw->phy.type, | 6742 | hw->mac.type, hw->phy.type, |
6763 | (part_num >> 8), (part_num & 0xff)); | 6743 | (part_num >> 8), (part_num & 0xff)); |
6764 | 6744 | ||
6765 | if (hw->bus.width <= ixgbe_bus_width_pcie_x4) { | 6745 | if (hw->bus.width <= ixgbe_bus_width_pcie_x4) { |
6766 | dev_warn(&pdev->dev, "PCI-Express bandwidth available for " | 6746 | e_dev_warn("PCI-Express bandwidth available for this card is " |
6767 | "this card is not sufficient for optimal " | 6747 | "not sufficient for optimal performance.\n"); |
6768 | "performance.\n"); | 6748 | e_dev_warn("For optimal performance a x8 PCI-Express slot " |
6769 | dev_warn(&pdev->dev, "For optimal performance a x8 " | 6749 | "is required.\n"); |
6770 | "PCI-Express slot is required.\n"); | ||
6771 | } | 6750 | } |
6772 | 6751 | ||
6773 | /* save off EEPROM version number */ | 6752 | /* save off EEPROM version number */ |
@@ -6778,12 +6757,12 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, | |||
6778 | 6757 | ||
6779 | if (err == IXGBE_ERR_EEPROM_VERSION) { | 6758 | if (err == IXGBE_ERR_EEPROM_VERSION) { |
6780 | /* We are running on a pre-production device, log a warning */ | 6759 | /* We are running on a pre-production device, log a warning */ |
6781 | dev_warn(&pdev->dev, "This device is a pre-production " | 6760 | e_dev_warn("This device is a pre-production adapter/LOM. " |
6782 | "adapter/LOM. Please be aware there may be issues " | 6761 | "Please be aware there may be issues associated " |
6783 | "associated with your hardware. If you are " | 6762 | "with your hardware. If you are experiencing " |
6784 | "experiencing problems please contact your Intel or " | 6763 | "problems please contact your Intel or hardware " |
6785 | "hardware representative who provided you with this " | 6764 | "representative who provided you with this " |
6786 | "hardware.\n"); | 6765 | "hardware.\n"); |
6787 | } | 6766 | } |
6788 | strcpy(netdev->name, "eth%d"); | 6767 | strcpy(netdev->name, "eth%d"); |
6789 | err = register_netdev(netdev); | 6768 | err = register_netdev(netdev); |
@@ -6806,8 +6785,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, | |||
6806 | } | 6785 | } |
6807 | #endif | 6786 | #endif |
6808 | if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) { | 6787 | if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) { |
6809 | DPRINTK(PROBE, INFO, "IOV is enabled with %d VFs\n", | 6788 | e_info("IOV is enabled with %d VFs\n", adapter->num_vfs); |
6810 | adapter->num_vfs); | ||
6811 | for (i = 0; i < adapter->num_vfs; i++) | 6789 | for (i = 0; i < adapter->num_vfs; i++) |
6812 | ixgbe_vf_configuration(pdev, (i | 0x10000000)); | 6790 | ixgbe_vf_configuration(pdev, (i | 0x10000000)); |
6813 | } | 6791 | } |
@@ -6815,7 +6793,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, | |||
6815 | /* add san mac addr to netdev */ | 6793 | /* add san mac addr to netdev */ |
6816 | ixgbe_add_sanmac_netdev(netdev); | 6794 | ixgbe_add_sanmac_netdev(netdev); |
6817 | 6795 | ||
6818 | dev_info(&pdev->dev, "Intel(R) 10 Gigabit Network Connection\n"); | 6796 | e_dev_info("Intel(R) 10 Gigabit Network Connection\n"); |
6819 | cards_found++; | 6797 | cards_found++; |
6820 | return 0; | 6798 | return 0; |
6821 | 6799 | ||
@@ -6905,7 +6883,7 @@ static void __devexit ixgbe_remove(struct pci_dev *pdev) | |||
6905 | pci_release_selected_regions(pdev, pci_select_bars(pdev, | 6883 | pci_release_selected_regions(pdev, pci_select_bars(pdev, |
6906 | IORESOURCE_MEM)); | 6884 | IORESOURCE_MEM)); |
6907 | 6885 | ||
6908 | DPRINTK(PROBE, INFO, "complete\n"); | 6886 | e_dev_info("complete\n"); |
6909 | 6887 | ||
6910 | free_netdev(netdev); | 6888 | free_netdev(netdev); |
6911 | 6889 | ||
@@ -6955,8 +6933,7 @@ static pci_ers_result_t ixgbe_io_slot_reset(struct pci_dev *pdev) | |||
6955 | int err; | 6933 | int err; |
6956 | 6934 | ||
6957 | if (pci_enable_device_mem(pdev)) { | 6935 | if (pci_enable_device_mem(pdev)) { |
6958 | DPRINTK(PROBE, ERR, | 6936 | e_err("Cannot re-enable PCI device after reset.\n"); |
6959 | "Cannot re-enable PCI device after reset.\n"); | ||
6960 | result = PCI_ERS_RESULT_DISCONNECT; | 6937 | result = PCI_ERS_RESULT_DISCONNECT; |
6961 | } else { | 6938 | } else { |
6962 | pci_set_master(pdev); | 6939 | pci_set_master(pdev); |
@@ -6972,8 +6949,8 @@ static pci_ers_result_t ixgbe_io_slot_reset(struct pci_dev *pdev) | |||
6972 | 6949 | ||
6973 | err = pci_cleanup_aer_uncorrect_error_status(pdev); | 6950 | err = pci_cleanup_aer_uncorrect_error_status(pdev); |
6974 | if (err) { | 6951 | if (err) { |
6975 | dev_err(&pdev->dev, | 6952 | e_dev_err("pci_cleanup_aer_uncorrect_error_status " |
6976 | "pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n", err); | 6953 | "failed 0x%0x\n", err); |
6977 | /* non-fatal, continue */ | 6954 | /* non-fatal, continue */ |
6978 | } | 6955 | } |
6979 | 6956 | ||
@@ -6994,7 +6971,7 @@ static void ixgbe_io_resume(struct pci_dev *pdev) | |||
6994 | 6971 | ||
6995 | if (netif_running(netdev)) { | 6972 | if (netif_running(netdev)) { |
6996 | if (ixgbe_up(adapter)) { | 6973 | if (ixgbe_up(adapter)) { |
6997 | DPRINTK(PROBE, INFO, "ixgbe_up failed after reset\n"); | 6974 | e_info("ixgbe_up failed after reset\n"); |
6998 | return; | 6975 | return; |
6999 | } | 6976 | } |
7000 | } | 6977 | } |
@@ -7030,10 +7007,9 @@ static struct pci_driver ixgbe_driver = { | |||
7030 | static int __init ixgbe_init_module(void) | 7007 | static int __init ixgbe_init_module(void) |
7031 | { | 7008 | { |
7032 | int ret; | 7009 | int ret; |
7033 | printk(KERN_INFO "%s: %s - version %s\n", ixgbe_driver_name, | 7010 | pr_info("%s - version %s\n", ixgbe_driver_string, |
7034 | ixgbe_driver_string, ixgbe_driver_version); | 7011 | ixgbe_driver_version); |
7035 | 7012 | pr_info("%s\n", ixgbe_copyright); | |
7036 | printk(KERN_INFO "%s: %s\n", ixgbe_driver_name, ixgbe_copyright); | ||
7037 | 7013 | ||
7038 | #ifdef CONFIG_IXGBE_DCA | 7014 | #ifdef CONFIG_IXGBE_DCA |
7039 | dca_register_notify(&dca_notifier); | 7015 | dca_register_notify(&dca_notifier); |
@@ -7072,18 +7048,17 @@ static int ixgbe_notify_dca(struct notifier_block *nb, unsigned long event, | |||
7072 | } | 7048 | } |
7073 | 7049 | ||
7074 | #endif /* CONFIG_IXGBE_DCA */ | 7050 | #endif /* CONFIG_IXGBE_DCA */ |
7075 | #ifdef DEBUG | 7051 | |
7076 | /** | 7052 | /** |
7077 | * ixgbe_get_hw_dev_name - return device name string | 7053 | * ixgbe_get_hw_dev return device |
7078 | * used by hardware layer to print debugging information | 7054 | * used by hardware layer to print debugging information |
7079 | **/ | 7055 | **/ |
7080 | char *ixgbe_get_hw_dev_name(struct ixgbe_hw *hw) | 7056 | struct net_device *ixgbe_get_hw_dev(struct ixgbe_hw *hw) |
7081 | { | 7057 | { |
7082 | struct ixgbe_adapter *adapter = hw->back; | 7058 | struct ixgbe_adapter *adapter = hw->back; |
7083 | return adapter->netdev->name; | 7059 | return adapter->netdev; |
7084 | } | 7060 | } |
7085 | 7061 | ||
7086 | #endif | ||
7087 | module_exit(ixgbe_exit_module); | 7062 | module_exit(ixgbe_exit_module); |
7088 | 7063 | ||
7089 | /* ixgbe_main.c */ | 7064 | /* ixgbe_main.c */ |
diff --git a/drivers/net/ixgbe/ixgbe_sriov.c b/drivers/net/ixgbe/ixgbe_sriov.c index f6cee94ec8e8..66f6e62b8cb0 100644 --- a/drivers/net/ixgbe/ixgbe_sriov.c +++ b/drivers/net/ixgbe/ixgbe_sriov.c | |||
@@ -25,7 +25,6 @@ | |||
25 | 25 | ||
26 | *******************************************************************************/ | 26 | *******************************************************************************/ |
27 | 27 | ||
28 | |||
29 | #include <linux/types.h> | 28 | #include <linux/types.h> |
30 | #include <linux/module.h> | 29 | #include <linux/module.h> |
31 | #include <linux/pci.h> | 30 | #include <linux/pci.h> |
@@ -174,7 +173,7 @@ int ixgbe_set_vf_mac(struct ixgbe_adapter *adapter, | |||
174 | adapter->vfinfo[vf].rar = hw->mac.ops.set_rar(hw, vf + 1, mac_addr, | 173 | adapter->vfinfo[vf].rar = hw->mac.ops.set_rar(hw, vf + 1, mac_addr, |
175 | vf, IXGBE_RAH_AV); | 174 | vf, IXGBE_RAH_AV); |
176 | if (adapter->vfinfo[vf].rar < 0) { | 175 | if (adapter->vfinfo[vf].rar < 0) { |
177 | DPRINTK(DRV, ERR, "Could not set MAC Filter for VF %d\n", vf); | 176 | e_err("Could not set MAC Filter for VF %d\n", vf); |
178 | return -1; | 177 | return -1; |
179 | } | 178 | } |
180 | 179 | ||
@@ -194,11 +193,7 @@ int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask) | |||
194 | 193 | ||
195 | if (enable) { | 194 | if (enable) { |
196 | random_ether_addr(vf_mac_addr); | 195 | random_ether_addr(vf_mac_addr); |
197 | DPRINTK(PROBE, INFO, "IOV: VF %d is enabled " | 196 | e_info("IOV: VF %d is enabled MAC %pM\n", vfn, vf_mac_addr); |
198 | "mac %02X:%02X:%02X:%02X:%02X:%02X\n", | ||
199 | vfn, | ||
200 | vf_mac_addr[0], vf_mac_addr[1], vf_mac_addr[2], | ||
201 | vf_mac_addr[3], vf_mac_addr[4], vf_mac_addr[5]); | ||
202 | /* | 197 | /* |
203 | * Store away the VF "permananet" MAC address, it will ask | 198 | * Store away the VF "permananet" MAC address, it will ask |
204 | * for it later. | 199 | * for it later. |
@@ -243,7 +238,7 @@ static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf) | |||
243 | retval = ixgbe_read_mbx(hw, msgbuf, mbx_size, vf); | 238 | retval = ixgbe_read_mbx(hw, msgbuf, mbx_size, vf); |
244 | 239 | ||
245 | if (retval) | 240 | if (retval) |
246 | printk(KERN_ERR "Error receiving message from VF\n"); | 241 | pr_err("Error receiving message from VF\n"); |
247 | 242 | ||
248 | /* this is a message we already processed, do nothing */ | 243 | /* this is a message we already processed, do nothing */ |
249 | if (msgbuf[0] & (IXGBE_VT_MSGTYPE_ACK | IXGBE_VT_MSGTYPE_NACK)) | 244 | if (msgbuf[0] & (IXGBE_VT_MSGTYPE_ACK | IXGBE_VT_MSGTYPE_NACK)) |
@@ -257,7 +252,7 @@ static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf) | |||
257 | if (msgbuf[0] == IXGBE_VF_RESET) { | 252 | if (msgbuf[0] == IXGBE_VF_RESET) { |
258 | unsigned char *vf_mac = adapter->vfinfo[vf].vf_mac_addresses; | 253 | unsigned char *vf_mac = adapter->vfinfo[vf].vf_mac_addresses; |
259 | u8 *addr = (u8 *)(&msgbuf[1]); | 254 | u8 *addr = (u8 *)(&msgbuf[1]); |
260 | DPRINTK(PROBE, INFO, "VF Reset msg received from vf %d\n", vf); | 255 | e_info("VF Reset msg received from vf %d\n", vf); |
261 | adapter->vfinfo[vf].clear_to_send = false; | 256 | adapter->vfinfo[vf].clear_to_send = false; |
262 | ixgbe_vf_reset_msg(adapter, vf); | 257 | ixgbe_vf_reset_msg(adapter, vf); |
263 | adapter->vfinfo[vf].clear_to_send = true; | 258 | adapter->vfinfo[vf].clear_to_send = true; |
@@ -310,7 +305,7 @@ static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf) | |||
310 | retval = ixgbe_set_vf_vlan(adapter, add, vid, vf); | 305 | retval = ixgbe_set_vf_vlan(adapter, add, vid, vf); |
311 | break; | 306 | break; |
312 | default: | 307 | default: |
313 | DPRINTK(DRV, ERR, "Unhandled Msg %8.8x\n", msgbuf[0]); | 308 | e_err("Unhandled Msg %8.8x\n", msgbuf[0]); |
314 | retval = IXGBE_ERR_MBX; | 309 | retval = IXGBE_ERR_MBX; |
315 | break; | 310 | break; |
316 | } | 311 | } |
diff --git a/drivers/net/ixgbevf/ixgbevf_main.c b/drivers/net/ixgbevf/ixgbevf_main.c index a16cff7e54a3..73f1e75f68d4 100644 --- a/drivers/net/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ixgbevf/ixgbevf_main.c | |||
@@ -3411,6 +3411,7 @@ static int __devinit ixgbevf_probe(struct pci_dev *pdev, | |||
3411 | netdev->features |= NETIF_F_IPV6_CSUM; | 3411 | netdev->features |= NETIF_F_IPV6_CSUM; |
3412 | netdev->features |= NETIF_F_TSO; | 3412 | netdev->features |= NETIF_F_TSO; |
3413 | netdev->features |= NETIF_F_TSO6; | 3413 | netdev->features |= NETIF_F_TSO6; |
3414 | netdev->features |= NETIF_F_GRO; | ||
3414 | netdev->vlan_features |= NETIF_F_TSO; | 3415 | netdev->vlan_features |= NETIF_F_TSO; |
3415 | netdev->vlan_features |= NETIF_F_TSO6; | 3416 | netdev->vlan_features |= NETIF_F_TSO6; |
3416 | netdev->vlan_features |= NETIF_F_IP_CSUM; | 3417 | netdev->vlan_features |= NETIF_F_IP_CSUM; |
diff --git a/drivers/net/mac8390.c b/drivers/net/mac8390.c index 1136c9a22b67..3832fa4961dd 100644 --- a/drivers/net/mac8390.c +++ b/drivers/net/mac8390.c | |||
@@ -157,6 +157,8 @@ static void dayna_block_output(struct net_device *dev, int count, | |||
157 | #define memcpy_fromio(a, b, c) memcpy((a), (void *)(b), (c)) | 157 | #define memcpy_fromio(a, b, c) memcpy((a), (void *)(b), (c)) |
158 | #define memcpy_toio(a, b, c) memcpy((void *)(a), (b), (c)) | 158 | #define memcpy_toio(a, b, c) memcpy((void *)(a), (b), (c)) |
159 | 159 | ||
160 | #define memcmp_withio(a, b, c) memcmp((a), (void *)(b), (c)) | ||
161 | |||
160 | /* Slow Sane (16-bit chunk memory read/write) Cabletron uses this */ | 162 | /* Slow Sane (16-bit chunk memory read/write) Cabletron uses this */ |
161 | static void slow_sane_get_8390_hdr(struct net_device *dev, | 163 | static void slow_sane_get_8390_hdr(struct net_device *dev, |
162 | struct e8390_pkt_hdr *hdr, int ring_page); | 164 | struct e8390_pkt_hdr *hdr, int ring_page); |
@@ -164,8 +166,8 @@ static void slow_sane_block_input(struct net_device *dev, int count, | |||
164 | struct sk_buff *skb, int ring_offset); | 166 | struct sk_buff *skb, int ring_offset); |
165 | static void slow_sane_block_output(struct net_device *dev, int count, | 167 | static void slow_sane_block_output(struct net_device *dev, int count, |
166 | const unsigned char *buf, int start_page); | 168 | const unsigned char *buf, int start_page); |
167 | static void word_memcpy_tocard(void *tp, const void *fp, int count); | 169 | static void word_memcpy_tocard(unsigned long tp, const void *fp, int count); |
168 | static void word_memcpy_fromcard(void *tp, const void *fp, int count); | 170 | static void word_memcpy_fromcard(void *tp, unsigned long fp, int count); |
169 | 171 | ||
170 | static enum mac8390_type __init mac8390_ident(struct nubus_dev *dev) | 172 | static enum mac8390_type __init mac8390_ident(struct nubus_dev *dev) |
171 | { | 173 | { |
@@ -245,9 +247,9 @@ static enum mac8390_access __init mac8390_testio(volatile unsigned long membase) | |||
245 | unsigned long outdata = 0xA5A0B5B0; | 247 | unsigned long outdata = 0xA5A0B5B0; |
246 | unsigned long indata = 0x00000000; | 248 | unsigned long indata = 0x00000000; |
247 | /* Try writing 32 bits */ | 249 | /* Try writing 32 bits */ |
248 | memcpy(membase, &outdata, 4); | 250 | memcpy_toio(membase, &outdata, 4); |
249 | /* Now compare them */ | 251 | /* Now compare them */ |
250 | if (memcmp((char *)&outdata, (char *)membase, 4) == 0) | 252 | if (memcmp_withio(&outdata, membase, 4) == 0) |
251 | return ACCESS_32; | 253 | return ACCESS_32; |
252 | /* Write 16 bit output */ | 254 | /* Write 16 bit output */ |
253 | word_memcpy_tocard(membase, &outdata, 4); | 255 | word_memcpy_tocard(membase, &outdata, 4); |
@@ -554,7 +556,7 @@ static int __init mac8390_initdev(struct net_device *dev, | |||
554 | case MAC8390_APPLE: | 556 | case MAC8390_APPLE: |
555 | switch (mac8390_testio(dev->mem_start)) { | 557 | switch (mac8390_testio(dev->mem_start)) { |
556 | case ACCESS_UNKNOWN: | 558 | case ACCESS_UNKNOWN: |
557 | pr_info("Don't know how to access card memory!\n"); | 559 | pr_err("Don't know how to access card memory!\n"); |
558 | return -ENODEV; | 560 | return -ENODEV; |
559 | break; | 561 | break; |
560 | 562 | ||
@@ -641,12 +643,13 @@ static int __init mac8390_initdev(struct net_device *dev, | |||
641 | 643 | ||
642 | static int mac8390_open(struct net_device *dev) | 644 | static int mac8390_open(struct net_device *dev) |
643 | { | 645 | { |
646 | int err; | ||
647 | |||
644 | __ei_open(dev); | 648 | __ei_open(dev); |
645 | if (request_irq(dev->irq, __ei_interrupt, 0, "8390 Ethernet", dev)) { | 649 | err = request_irq(dev->irq, __ei_interrupt, 0, "8390 Ethernet", dev); |
646 | pr_info("%s: unable to get IRQ %d.\n", dev->name, dev->irq); | 650 | if (err) |
647 | return -EAGAIN; | 651 | pr_err("%s: unable to get IRQ %d\n", dev->name, dev->irq); |
648 | } | 652 | return err; |
649 | return 0; | ||
650 | } | 653 | } |
651 | 654 | ||
652 | static int mac8390_close(struct net_device *dev) | 655 | static int mac8390_close(struct net_device *dev) |
@@ -731,7 +734,7 @@ static void sane_get_8390_hdr(struct net_device *dev, | |||
731 | struct e8390_pkt_hdr *hdr, int ring_page) | 734 | struct e8390_pkt_hdr *hdr, int ring_page) |
732 | { | 735 | { |
733 | unsigned long hdr_start = (ring_page - WD_START_PG)<<8; | 736 | unsigned long hdr_start = (ring_page - WD_START_PG)<<8; |
734 | memcpy_fromio((void *)hdr, (char *)dev->mem_start + hdr_start, 4); | 737 | memcpy_fromio(hdr, dev->mem_start + hdr_start, 4); |
735 | /* Fix endianness */ | 738 | /* Fix endianness */ |
736 | hdr->count = swab16(hdr->count); | 739 | hdr->count = swab16(hdr->count); |
737 | } | 740 | } |
@@ -745,14 +748,13 @@ static void sane_block_input(struct net_device *dev, int count, | |||
745 | if (xfer_start + count > ei_status.rmem_end) { | 748 | if (xfer_start + count > ei_status.rmem_end) { |
746 | /* We must wrap the input move. */ | 749 | /* We must wrap the input move. */ |
747 | int semi_count = ei_status.rmem_end - xfer_start; | 750 | int semi_count = ei_status.rmem_end - xfer_start; |
748 | memcpy_fromio(skb->data, (char *)dev->mem_start + xfer_base, | 751 | memcpy_fromio(skb->data, dev->mem_start + xfer_base, |
749 | semi_count); | 752 | semi_count); |
750 | count -= semi_count; | 753 | count -= semi_count; |
751 | memcpy_toio(skb->data + semi_count, | 754 | memcpy_fromio(skb->data + semi_count, ei_status.rmem_start, |
752 | (char *)ei_status.rmem_start, count); | ||
753 | } else { | ||
754 | memcpy_fromio(skb->data, (char *)dev->mem_start + xfer_base, | ||
755 | count); | 755 | count); |
756 | } else { | ||
757 | memcpy_fromio(skb->data, dev->mem_start + xfer_base, count); | ||
756 | } | 758 | } |
757 | } | 759 | } |
758 | 760 | ||
@@ -761,7 +763,7 @@ static void sane_block_output(struct net_device *dev, int count, | |||
761 | { | 763 | { |
762 | long shmem = (start_page - WD_START_PG)<<8; | 764 | long shmem = (start_page - WD_START_PG)<<8; |
763 | 765 | ||
764 | memcpy_toio((char *)dev->mem_start + shmem, buf, count); | 766 | memcpy_toio(dev->mem_start + shmem, buf, count); |
765 | } | 767 | } |
766 | 768 | ||
767 | /* dayna block input/output */ | 769 | /* dayna block input/output */ |
@@ -812,7 +814,7 @@ static void slow_sane_get_8390_hdr(struct net_device *dev, | |||
812 | int ring_page) | 814 | int ring_page) |
813 | { | 815 | { |
814 | unsigned long hdr_start = (ring_page - WD_START_PG)<<8; | 816 | unsigned long hdr_start = (ring_page - WD_START_PG)<<8; |
815 | word_memcpy_fromcard(hdr, (char *)dev->mem_start + hdr_start, 4); | 817 | word_memcpy_fromcard(hdr, dev->mem_start + hdr_start, 4); |
816 | /* Register endianism - fix here rather than 8390.c */ | 818 | /* Register endianism - fix here rather than 8390.c */ |
817 | hdr->count = (hdr->count&0xFF)<<8|(hdr->count>>8); | 819 | hdr->count = (hdr->count&0xFF)<<8|(hdr->count>>8); |
818 | } | 820 | } |
@@ -826,15 +828,14 @@ static void slow_sane_block_input(struct net_device *dev, int count, | |||
826 | if (xfer_start + count > ei_status.rmem_end) { | 828 | if (xfer_start + count > ei_status.rmem_end) { |
827 | /* We must wrap the input move. */ | 829 | /* We must wrap the input move. */ |
828 | int semi_count = ei_status.rmem_end - xfer_start; | 830 | int semi_count = ei_status.rmem_end - xfer_start; |
829 | word_memcpy_fromcard(skb->data, | 831 | word_memcpy_fromcard(skb->data, dev->mem_start + xfer_base, |
830 | (char *)dev->mem_start + xfer_base, | ||
831 | semi_count); | 832 | semi_count); |
832 | count -= semi_count; | 833 | count -= semi_count; |
833 | word_memcpy_fromcard(skb->data + semi_count, | 834 | word_memcpy_fromcard(skb->data + semi_count, |
834 | (char *)ei_status.rmem_start, count); | 835 | ei_status.rmem_start, count); |
835 | } else { | 836 | } else { |
836 | word_memcpy_fromcard(skb->data, | 837 | word_memcpy_fromcard(skb->data, dev->mem_start + xfer_base, |
837 | (char *)dev->mem_start + xfer_base, count); | 838 | count); |
838 | } | 839 | } |
839 | } | 840 | } |
840 | 841 | ||
@@ -843,12 +844,12 @@ static void slow_sane_block_output(struct net_device *dev, int count, | |||
843 | { | 844 | { |
844 | long shmem = (start_page - WD_START_PG)<<8; | 845 | long shmem = (start_page - WD_START_PG)<<8; |
845 | 846 | ||
846 | word_memcpy_tocard((char *)dev->mem_start + shmem, buf, count); | 847 | word_memcpy_tocard(dev->mem_start + shmem, buf, count); |
847 | } | 848 | } |
848 | 849 | ||
849 | static void word_memcpy_tocard(void *tp, const void *fp, int count) | 850 | static void word_memcpy_tocard(unsigned long tp, const void *fp, int count) |
850 | { | 851 | { |
851 | volatile unsigned short *to = tp; | 852 | volatile unsigned short *to = (void *)tp; |
852 | const unsigned short *from = fp; | 853 | const unsigned short *from = fp; |
853 | 854 | ||
854 | count++; | 855 | count++; |
@@ -858,10 +859,10 @@ static void word_memcpy_tocard(void *tp, const void *fp, int count) | |||
858 | *to++ = *from++; | 859 | *to++ = *from++; |
859 | } | 860 | } |
860 | 861 | ||
861 | static void word_memcpy_fromcard(void *tp, const void *fp, int count) | 862 | static void word_memcpy_fromcard(void *tp, unsigned long fp, int count) |
862 | { | 863 | { |
863 | unsigned short *to = tp; | 864 | unsigned short *to = tp; |
864 | const volatile unsigned short *from = fp; | 865 | const volatile unsigned short *from = (const void *)fp; |
865 | 866 | ||
866 | count++; | 867 | count++; |
867 | count /= 2; | 868 | count /= 2; |
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index 87e8d4cb4057..53422ce26f7f 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c | |||
@@ -145,15 +145,16 @@ static void macvlan_broadcast(struct sk_buff *skb, | |||
145 | } | 145 | } |
146 | 146 | ||
147 | /* called under rcu_read_lock() from netif_receive_skb */ | 147 | /* called under rcu_read_lock() from netif_receive_skb */ |
148 | static struct sk_buff *macvlan_handle_frame(struct macvlan_port *port, | 148 | static struct sk_buff *macvlan_handle_frame(struct sk_buff *skb) |
149 | struct sk_buff *skb) | ||
150 | { | 149 | { |
150 | struct macvlan_port *port; | ||
151 | const struct ethhdr *eth = eth_hdr(skb); | 151 | const struct ethhdr *eth = eth_hdr(skb); |
152 | const struct macvlan_dev *vlan; | 152 | const struct macvlan_dev *vlan; |
153 | const struct macvlan_dev *src; | 153 | const struct macvlan_dev *src; |
154 | struct net_device *dev; | 154 | struct net_device *dev; |
155 | unsigned int len; | 155 | unsigned int len; |
156 | 156 | ||
157 | port = rcu_dereference(skb->dev->macvlan_port); | ||
157 | if (is_multicast_ether_addr(eth->h_dest)) { | 158 | if (is_multicast_ether_addr(eth->h_dest)) { |
158 | src = macvlan_hash_lookup(port, eth->h_source); | 159 | src = macvlan_hash_lookup(port, eth->h_source); |
159 | if (!src) | 160 | if (!src) |
@@ -515,6 +516,7 @@ static int macvlan_port_create(struct net_device *dev) | |||
515 | { | 516 | { |
516 | struct macvlan_port *port; | 517 | struct macvlan_port *port; |
517 | unsigned int i; | 518 | unsigned int i; |
519 | int err; | ||
518 | 520 | ||
519 | if (dev->type != ARPHRD_ETHER || dev->flags & IFF_LOOPBACK) | 521 | if (dev->type != ARPHRD_ETHER || dev->flags & IFF_LOOPBACK) |
520 | return -EINVAL; | 522 | return -EINVAL; |
@@ -528,13 +530,21 @@ static int macvlan_port_create(struct net_device *dev) | |||
528 | for (i = 0; i < MACVLAN_HASH_SIZE; i++) | 530 | for (i = 0; i < MACVLAN_HASH_SIZE; i++) |
529 | INIT_HLIST_HEAD(&port->vlan_hash[i]); | 531 | INIT_HLIST_HEAD(&port->vlan_hash[i]); |
530 | rcu_assign_pointer(dev->macvlan_port, port); | 532 | rcu_assign_pointer(dev->macvlan_port, port); |
531 | return 0; | 533 | |
534 | err = netdev_rx_handler_register(dev, macvlan_handle_frame); | ||
535 | if (err) { | ||
536 | rcu_assign_pointer(dev->macvlan_port, NULL); | ||
537 | kfree(port); | ||
538 | } | ||
539 | |||
540 | return err; | ||
532 | } | 541 | } |
533 | 542 | ||
534 | static void macvlan_port_destroy(struct net_device *dev) | 543 | static void macvlan_port_destroy(struct net_device *dev) |
535 | { | 544 | { |
536 | struct macvlan_port *port = dev->macvlan_port; | 545 | struct macvlan_port *port = dev->macvlan_port; |
537 | 546 | ||
547 | netdev_rx_handler_unregister(dev); | ||
538 | rcu_assign_pointer(dev->macvlan_port, NULL); | 548 | rcu_assign_pointer(dev->macvlan_port, NULL); |
539 | synchronize_rcu(); | 549 | synchronize_rcu(); |
540 | kfree(port); | 550 | kfree(port); |
@@ -767,14 +777,12 @@ static int __init macvlan_init_module(void) | |||
767 | int err; | 777 | int err; |
768 | 778 | ||
769 | register_netdevice_notifier(&macvlan_notifier_block); | 779 | register_netdevice_notifier(&macvlan_notifier_block); |
770 | macvlan_handle_frame_hook = macvlan_handle_frame; | ||
771 | 780 | ||
772 | err = macvlan_link_register(&macvlan_link_ops); | 781 | err = macvlan_link_register(&macvlan_link_ops); |
773 | if (err < 0) | 782 | if (err < 0) |
774 | goto err1; | 783 | goto err1; |
775 | return 0; | 784 | return 0; |
776 | err1: | 785 | err1: |
777 | macvlan_handle_frame_hook = NULL; | ||
778 | unregister_netdevice_notifier(&macvlan_notifier_block); | 786 | unregister_netdevice_notifier(&macvlan_notifier_block); |
779 | return err; | 787 | return err; |
780 | } | 788 | } |
@@ -782,7 +790,6 @@ err1: | |||
782 | static void __exit macvlan_cleanup_module(void) | 790 | static void __exit macvlan_cleanup_module(void) |
783 | { | 791 | { |
784 | rtnl_link_unregister(&macvlan_link_ops); | 792 | rtnl_link_unregister(&macvlan_link_ops); |
785 | macvlan_handle_frame_hook = NULL; | ||
786 | unregister_netdevice_notifier(&macvlan_notifier_block); | 793 | unregister_netdevice_notifier(&macvlan_notifier_block); |
787 | } | 794 | } |
788 | 795 | ||
diff --git a/drivers/net/mlx4/en_netdev.c b/drivers/net/mlx4/en_netdev.c index 96180c0ec206..a0d8a26f5a02 100644 --- a/drivers/net/mlx4/en_netdev.c +++ b/drivers/net/mlx4/en_netdev.c | |||
@@ -961,6 +961,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, | |||
961 | } | 961 | } |
962 | 962 | ||
963 | SET_NETDEV_DEV(dev, &mdev->dev->pdev->dev); | 963 | SET_NETDEV_DEV(dev, &mdev->dev->pdev->dev); |
964 | dev->dev_id = port - 1; | ||
964 | 965 | ||
965 | /* | 966 | /* |
966 | * Initialize driver private data | 967 | * Initialize driver private data |
diff --git a/drivers/net/mlx4/eq.c b/drivers/net/mlx4/eq.c index 423053482ed5..22d0b3b796b4 100644 --- a/drivers/net/mlx4/eq.c +++ b/drivers/net/mlx4/eq.c | |||
@@ -110,7 +110,7 @@ struct mlx4_eqe { | |||
110 | u32 raw[6]; | 110 | u32 raw[6]; |
111 | struct { | 111 | struct { |
112 | __be32 cqn; | 112 | __be32 cqn; |
113 | } __attribute__((packed)) comp; | 113 | } __packed comp; |
114 | struct { | 114 | struct { |
115 | u16 reserved1; | 115 | u16 reserved1; |
116 | __be16 token; | 116 | __be16 token; |
@@ -118,27 +118,27 @@ struct mlx4_eqe { | |||
118 | u8 reserved3[3]; | 118 | u8 reserved3[3]; |
119 | u8 status; | 119 | u8 status; |
120 | __be64 out_param; | 120 | __be64 out_param; |
121 | } __attribute__((packed)) cmd; | 121 | } __packed cmd; |
122 | struct { | 122 | struct { |
123 | __be32 qpn; | 123 | __be32 qpn; |
124 | } __attribute__((packed)) qp; | 124 | } __packed qp; |
125 | struct { | 125 | struct { |
126 | __be32 srqn; | 126 | __be32 srqn; |
127 | } __attribute__((packed)) srq; | 127 | } __packed srq; |
128 | struct { | 128 | struct { |
129 | __be32 cqn; | 129 | __be32 cqn; |
130 | u32 reserved1; | 130 | u32 reserved1; |
131 | u8 reserved2[3]; | 131 | u8 reserved2[3]; |
132 | u8 syndrome; | 132 | u8 syndrome; |
133 | } __attribute__((packed)) cq_err; | 133 | } __packed cq_err; |
134 | struct { | 134 | struct { |
135 | u32 reserved1[2]; | 135 | u32 reserved1[2]; |
136 | __be32 port; | 136 | __be32 port; |
137 | } __attribute__((packed)) port_change; | 137 | } __packed port_change; |
138 | } event; | 138 | } event; |
139 | u8 reserved3[3]; | 139 | u8 reserved3[3]; |
140 | u8 owner; | 140 | u8 owner; |
141 | } __attribute__((packed)); | 141 | } __packed; |
142 | 142 | ||
143 | static void eq_set_ci(struct mlx4_eq *eq, int req_not) | 143 | static void eq_set_ci(struct mlx4_eq *eq, int req_not) |
144 | { | 144 | { |
diff --git a/drivers/net/mlx4/mr.c b/drivers/net/mlx4/mr.c index 3dc69be4949f..9c188bdd7f4f 100644 --- a/drivers/net/mlx4/mr.c +++ b/drivers/net/mlx4/mr.c | |||
@@ -58,7 +58,7 @@ struct mlx4_mpt_entry { | |||
58 | __be32 mtt_sz; | 58 | __be32 mtt_sz; |
59 | __be32 entity_size; | 59 | __be32 entity_size; |
60 | __be32 first_byte_offset; | 60 | __be32 first_byte_offset; |
61 | } __attribute__((packed)); | 61 | } __packed; |
62 | 62 | ||
63 | #define MLX4_MPT_FLAG_SW_OWNS (0xfUL << 28) | 63 | #define MLX4_MPT_FLAG_SW_OWNS (0xfUL << 28) |
64 | #define MLX4_MPT_FLAG_FREE (0x3UL << 28) | 64 | #define MLX4_MPT_FLAG_FREE (0x3UL << 28) |
diff --git a/drivers/net/ppp_generic.c b/drivers/net/ppp_generic.c index 1b2c29150202..e7b4187da057 100644 --- a/drivers/net/ppp_generic.c +++ b/drivers/net/ppp_generic.c | |||
@@ -69,7 +69,6 @@ | |||
69 | 69 | ||
70 | #define MPHDRLEN 6 /* multilink protocol header length */ | 70 | #define MPHDRLEN 6 /* multilink protocol header length */ |
71 | #define MPHDRLEN_SSN 4 /* ditto with short sequence numbers */ | 71 | #define MPHDRLEN_SSN 4 /* ditto with short sequence numbers */ |
72 | #define MIN_FRAG_SIZE 64 | ||
73 | 72 | ||
74 | /* | 73 | /* |
75 | * An instance of /dev/ppp can be associated with either a ppp | 74 | * An instance of /dev/ppp can be associated with either a ppp |
@@ -539,14 +538,9 @@ static int get_filter(void __user *arg, struct sock_filter **p) | |||
539 | } | 538 | } |
540 | 539 | ||
541 | len = uprog.len * sizeof(struct sock_filter); | 540 | len = uprog.len * sizeof(struct sock_filter); |
542 | code = kmalloc(len, GFP_KERNEL); | 541 | code = memdup_user(uprog.filter, len); |
543 | if (code == NULL) | 542 | if (IS_ERR(code)) |
544 | return -ENOMEM; | 543 | return PTR_ERR(code); |
545 | |||
546 | if (copy_from_user(code, uprog.filter, len)) { | ||
547 | kfree(code); | ||
548 | return -EFAULT; | ||
549 | } | ||
550 | 544 | ||
551 | err = sk_chk_filter(code, uprog.len); | 545 | err = sk_chk_filter(code, uprog.len); |
552 | if (err) { | 546 | if (err) { |
@@ -1933,9 +1927,9 @@ ppp_receive_mp_frame(struct ppp *ppp, struct sk_buff *skb, struct channel *pch) | |||
1933 | /* If the queue is getting long, don't wait any longer for packets | 1927 | /* If the queue is getting long, don't wait any longer for packets |
1934 | before the start of the queue. */ | 1928 | before the start of the queue. */ |
1935 | if (skb_queue_len(&ppp->mrq) >= PPP_MP_MAX_QLEN) { | 1929 | if (skb_queue_len(&ppp->mrq) >= PPP_MP_MAX_QLEN) { |
1936 | struct sk_buff *skb = skb_peek(&ppp->mrq); | 1930 | struct sk_buff *mskb = skb_peek(&ppp->mrq); |
1937 | if (seq_before(ppp->minseq, skb->sequence)) | 1931 | if (seq_before(ppp->minseq, mskb->sequence)) |
1938 | ppp->minseq = skb->sequence; | 1932 | ppp->minseq = mskb->sequence; |
1939 | } | 1933 | } |
1940 | 1934 | ||
1941 | /* Pull completed packets off the queue and receive them. */ | 1935 | /* Pull completed packets off the queue and receive them. */ |
diff --git a/drivers/net/pppoe.c b/drivers/net/pppoe.c index 805b64d1e893..7ebb8e87efa4 100644 --- a/drivers/net/pppoe.c +++ b/drivers/net/pppoe.c | |||
@@ -949,7 +949,7 @@ static int __pppoe_xmit(struct sock *sk, struct sk_buff *skb) | |||
949 | 949 | ||
950 | abort: | 950 | abort: |
951 | kfree_skb(skb); | 951 | kfree_skb(skb); |
952 | return 1; | 952 | return 0; |
953 | } | 953 | } |
954 | 954 | ||
955 | /************************************************************************ | 955 | /************************************************************************ |
diff --git a/drivers/net/ps3_gelic_wireless.h b/drivers/net/ps3_gelic_wireless.h index 0a88b535197a..f7e51b7d7049 100644 --- a/drivers/net/ps3_gelic_wireless.h +++ b/drivers/net/ps3_gelic_wireless.h | |||
@@ -74,7 +74,7 @@ struct gelic_eurus_common_cfg { | |||
74 | u16 bss_type; /* infra or adhoc */ | 74 | u16 bss_type; /* infra or adhoc */ |
75 | u16 auth_method; /* shared key or open */ | 75 | u16 auth_method; /* shared key or open */ |
76 | u16 op_mode; /* B/G */ | 76 | u16 op_mode; /* B/G */ |
77 | } __attribute__((packed)); | 77 | } __packed; |
78 | 78 | ||
79 | 79 | ||
80 | /* for GELIC_EURUS_CMD_WEP_CFG */ | 80 | /* for GELIC_EURUS_CMD_WEP_CFG */ |
@@ -88,7 +88,7 @@ struct gelic_eurus_wep_cfg { | |||
88 | /* all fields are big endian */ | 88 | /* all fields are big endian */ |
89 | u16 security; | 89 | u16 security; |
90 | u8 key[4][16]; | 90 | u8 key[4][16]; |
91 | } __attribute__((packed)); | 91 | } __packed; |
92 | 92 | ||
93 | /* for GELIC_EURUS_CMD_WPA_CFG */ | 93 | /* for GELIC_EURUS_CMD_WPA_CFG */ |
94 | enum gelic_eurus_wpa_security { | 94 | enum gelic_eurus_wpa_security { |
@@ -120,7 +120,7 @@ struct gelic_eurus_wpa_cfg { | |||
120 | u16 security; | 120 | u16 security; |
121 | u16 psk_type; /* psk key encoding type */ | 121 | u16 psk_type; /* psk key encoding type */ |
122 | u8 psk[GELIC_WL_EURUS_PSK_MAX_LEN]; /* psk key; hex or passphrase */ | 122 | u8 psk[GELIC_WL_EURUS_PSK_MAX_LEN]; /* psk key; hex or passphrase */ |
123 | } __attribute__((packed)); | 123 | } __packed; |
124 | 124 | ||
125 | /* for GELIC_EURUS_CMD_{START,GET}_SCAN */ | 125 | /* for GELIC_EURUS_CMD_{START,GET}_SCAN */ |
126 | enum gelic_eurus_scan_capability { | 126 | enum gelic_eurus_scan_capability { |
@@ -171,7 +171,7 @@ struct gelic_eurus_scan_info { | |||
171 | __be32 reserved3; | 171 | __be32 reserved3; |
172 | __be32 reserved4; | 172 | __be32 reserved4; |
173 | u8 elements[0]; /* ie */ | 173 | u8 elements[0]; /* ie */ |
174 | } __attribute__ ((packed)); | 174 | } __packed; |
175 | 175 | ||
176 | /* the hypervisor returns bbs up to 16 */ | 176 | /* the hypervisor returns bbs up to 16 */ |
177 | #define GELIC_EURUS_MAX_SCAN (16) | 177 | #define GELIC_EURUS_MAX_SCAN (16) |
@@ -193,7 +193,7 @@ struct gelic_wl_scan_info { | |||
193 | struct gelic_eurus_rssi_info { | 193 | struct gelic_eurus_rssi_info { |
194 | /* big endian */ | 194 | /* big endian */ |
195 | __be16 rssi; | 195 | __be16 rssi; |
196 | } __attribute__ ((packed)); | 196 | } __packed; |
197 | 197 | ||
198 | 198 | ||
199 | /* for 'stat' member of gelic_wl_info */ | 199 | /* for 'stat' member of gelic_wl_info */ |
diff --git a/drivers/net/qlcnic/qlcnic.h b/drivers/net/qlcnic/qlcnic.h index 896d40df9a13..02db363f20cd 100644 --- a/drivers/net/qlcnic/qlcnic.h +++ b/drivers/net/qlcnic/qlcnic.h | |||
@@ -51,8 +51,8 @@ | |||
51 | 51 | ||
52 | #define _QLCNIC_LINUX_MAJOR 5 | 52 | #define _QLCNIC_LINUX_MAJOR 5 |
53 | #define _QLCNIC_LINUX_MINOR 0 | 53 | #define _QLCNIC_LINUX_MINOR 0 |
54 | #define _QLCNIC_LINUX_SUBVERSION 2 | 54 | #define _QLCNIC_LINUX_SUBVERSION 3 |
55 | #define QLCNIC_LINUX_VERSIONID "5.0.2" | 55 | #define QLCNIC_LINUX_VERSIONID "5.0.3" |
56 | #define QLCNIC_DRV_IDC_VER 0x01 | 56 | #define QLCNIC_DRV_IDC_VER 0x01 |
57 | 57 | ||
58 | #define QLCNIC_VERSION_CODE(a, b, c) (((a) << 24) + ((b) << 16) + (c)) | 58 | #define QLCNIC_VERSION_CODE(a, b, c) (((a) << 24) + ((b) << 16) + (c)) |
@@ -197,8 +197,7 @@ struct cmd_desc_type0 { | |||
197 | 197 | ||
198 | __le64 addr_buffer4; | 198 | __le64 addr_buffer4; |
199 | 199 | ||
200 | __le32 reserved2; | 200 | u8 eth_addr[ETH_ALEN]; |
201 | __le16 reserved; | ||
202 | __le16 vlan_TCI; | 201 | __le16 vlan_TCI; |
203 | 202 | ||
204 | } __attribute__ ((aligned(64))); | 203 | } __attribute__ ((aligned(64))); |
@@ -315,6 +314,8 @@ struct uni_data_desc{ | |||
315 | #define QLCNIC_BRDTYPE_P3_10G_XFP 0x0032 | 314 | #define QLCNIC_BRDTYPE_P3_10G_XFP 0x0032 |
316 | #define QLCNIC_BRDTYPE_P3_10G_TP 0x0080 | 315 | #define QLCNIC_BRDTYPE_P3_10G_TP 0x0080 |
317 | 316 | ||
317 | #define QLCNIC_MSIX_TABLE_OFFSET 0x44 | ||
318 | |||
318 | /* Flash memory map */ | 319 | /* Flash memory map */ |
319 | #define QLCNIC_BRDCFG_START 0x4000 /* board config */ | 320 | #define QLCNIC_BRDCFG_START 0x4000 /* board config */ |
320 | #define QLCNIC_BOOTLD_START 0x10000 /* bootld */ | 321 | #define QLCNIC_BOOTLD_START 0x10000 /* bootld */ |
@@ -542,7 +543,17 @@ struct qlcnic_recv_context { | |||
542 | #define QLCNIC_CDRP_CMD_READ_PEXQ_PARAMETERS 0x0000001c | 543 | #define QLCNIC_CDRP_CMD_READ_PEXQ_PARAMETERS 0x0000001c |
543 | #define QLCNIC_CDRP_CMD_GET_LIC_CAPABILITIES 0x0000001d | 544 | #define QLCNIC_CDRP_CMD_GET_LIC_CAPABILITIES 0x0000001d |
544 | #define QLCNIC_CDRP_CMD_READ_MAX_LRO_PER_BOARD 0x0000001e | 545 | #define QLCNIC_CDRP_CMD_READ_MAX_LRO_PER_BOARD 0x0000001e |
545 | #define QLCNIC_CDRP_CMD_MAX 0x0000001f | 546 | #define QLCNIC_CDRP_CMD_MAC_ADDRESS 0x0000001f |
547 | |||
548 | #define QLCNIC_CDRP_CMD_GET_PCI_INFO 0x00000020 | ||
549 | #define QLCNIC_CDRP_CMD_GET_NIC_INFO 0x00000021 | ||
550 | #define QLCNIC_CDRP_CMD_SET_NIC_INFO 0x00000022 | ||
551 | #define QLCNIC_CDRP_CMD_RESET_NPAR 0x00000023 | ||
552 | #define QLCNIC_CDRP_CMD_GET_ESWITCH_CAPABILITY 0x00000024 | ||
553 | #define QLCNIC_CDRP_CMD_TOGGLE_ESWITCH 0x00000025 | ||
554 | #define QLCNIC_CDRP_CMD_GET_ESWITCH_STATUS 0x00000026 | ||
555 | #define QLCNIC_CDRP_CMD_SET_PORTMIRRORING 0x00000027 | ||
556 | #define QLCNIC_CDRP_CMD_CONFIGURE_ESWITCH 0x00000028 | ||
546 | 557 | ||
547 | #define QLCNIC_RCODE_SUCCESS 0 | 558 | #define QLCNIC_RCODE_SUCCESS 0 |
548 | #define QLCNIC_RCODE_TIMEOUT 17 | 559 | #define QLCNIC_RCODE_TIMEOUT 17 |
@@ -560,7 +571,6 @@ struct qlcnic_recv_context { | |||
560 | /* | 571 | /* |
561 | * Context state | 572 | * Context state |
562 | */ | 573 | */ |
563 | #define QLCHAL_VERSION 1 | ||
564 | 574 | ||
565 | #define QLCNIC_HOST_CTX_STATE_ACTIVE 2 | 575 | #define QLCNIC_HOST_CTX_STATE_ACTIVE 2 |
566 | 576 | ||
@@ -881,12 +891,14 @@ struct qlcnic_mac_req { | |||
881 | #define QLCNIC_LRO_ENABLED 0x08 | 891 | #define QLCNIC_LRO_ENABLED 0x08 |
882 | #define QLCNIC_BRIDGE_ENABLED 0X10 | 892 | #define QLCNIC_BRIDGE_ENABLED 0X10 |
883 | #define QLCNIC_DIAG_ENABLED 0x20 | 893 | #define QLCNIC_DIAG_ENABLED 0x20 |
894 | #define QLCNIC_NPAR_ENABLED 0x40 | ||
884 | #define QLCNIC_IS_MSI_FAMILY(adapter) \ | 895 | #define QLCNIC_IS_MSI_FAMILY(adapter) \ |
885 | ((adapter)->flags & (QLCNIC_MSI_ENABLED | QLCNIC_MSIX_ENABLED)) | 896 | ((adapter)->flags & (QLCNIC_MSI_ENABLED | QLCNIC_MSIX_ENABLED)) |
886 | 897 | ||
887 | #define MSIX_ENTRIES_PER_ADAPTER NUM_STS_DESC_RINGS | 898 | #define MSIX_ENTRIES_PER_ADAPTER NUM_STS_DESC_RINGS |
888 | #define QLCNIC_MSIX_TBL_SPACE 8192 | 899 | #define QLCNIC_MSIX_TBL_SPACE 8192 |
889 | #define QLCNIC_PCI_REG_MSIX_TBL 0x44 | 900 | #define QLCNIC_PCI_REG_MSIX_TBL 0x44 |
901 | #define QLCNIC_MSIX_TBL_PGSIZE 4096 | ||
890 | 902 | ||
891 | #define QLCNIC_NETDEV_WEIGHT 128 | 903 | #define QLCNIC_NETDEV_WEIGHT 128 |
892 | #define QLCNIC_ADAPTER_UP_MAGIC 777 | 904 | #define QLCNIC_ADAPTER_UP_MAGIC 777 |
@@ -923,7 +935,6 @@ struct qlcnic_adapter { | |||
923 | u8 mc_enabled; | 935 | u8 mc_enabled; |
924 | u8 max_mc_count; | 936 | u8 max_mc_count; |
925 | u8 rss_supported; | 937 | u8 rss_supported; |
926 | u8 rsrvd1; | ||
927 | u8 fw_wait_cnt; | 938 | u8 fw_wait_cnt; |
928 | u8 fw_fail_cnt; | 939 | u8 fw_fail_cnt; |
929 | u8 tx_timeo_cnt; | 940 | u8 tx_timeo_cnt; |
@@ -940,6 +951,15 @@ struct qlcnic_adapter { | |||
940 | u16 link_autoneg; | 951 | u16 link_autoneg; |
941 | u16 module_type; | 952 | u16 module_type; |
942 | 953 | ||
954 | u16 op_mode; | ||
955 | u16 switch_mode; | ||
956 | u16 max_tx_ques; | ||
957 | u16 max_rx_ques; | ||
958 | u16 min_tx_bw; | ||
959 | u16 max_tx_bw; | ||
960 | u16 max_mtu; | ||
961 | |||
962 | u32 fw_hal_version; | ||
943 | u32 capabilities; | 963 | u32 capabilities; |
944 | u32 flags; | 964 | u32 flags; |
945 | u32 irq; | 965 | u32 irq; |
@@ -948,18 +968,22 @@ struct qlcnic_adapter { | |||
948 | u32 int_vec_bit; | 968 | u32 int_vec_bit; |
949 | u32 heartbit; | 969 | u32 heartbit; |
950 | 970 | ||
971 | u8 max_mac_filters; | ||
951 | u8 dev_state; | 972 | u8 dev_state; |
952 | u8 diag_test; | 973 | u8 diag_test; |
953 | u8 diag_cnt; | 974 | u8 diag_cnt; |
954 | u8 reset_ack_timeo; | 975 | u8 reset_ack_timeo; |
955 | u8 dev_init_timeo; | 976 | u8 dev_init_timeo; |
956 | u8 rsrd1; | ||
957 | u16 msg_enable; | 977 | u16 msg_enable; |
958 | 978 | ||
959 | u8 mac_addr[ETH_ALEN]; | 979 | u8 mac_addr[ETH_ALEN]; |
960 | 980 | ||
961 | u64 dev_rst_time; | 981 | u64 dev_rst_time; |
962 | 982 | ||
983 | struct qlcnic_pci_info *npars; | ||
984 | struct qlcnic_eswitch *eswitch; | ||
985 | struct qlcnic_nic_template *nic_ops; | ||
986 | |||
963 | struct qlcnic_adapter_stats stats; | 987 | struct qlcnic_adapter_stats stats; |
964 | 988 | ||
965 | struct qlcnic_recv_context recv_ctx; | 989 | struct qlcnic_recv_context recv_ctx; |
@@ -984,6 +1008,53 @@ struct qlcnic_adapter { | |||
984 | const struct firmware *fw; | 1008 | const struct firmware *fw; |
985 | }; | 1009 | }; |
986 | 1010 | ||
1011 | struct qlcnic_info { | ||
1012 | __le16 pci_func; | ||
1013 | __le16 op_mode; /* 1 = Priv, 2 = NP, 3 = NP passthru */ | ||
1014 | __le16 phys_port; | ||
1015 | __le16 switch_mode; /* 0 = disabled, 1 = int, 2 = ext */ | ||
1016 | |||
1017 | __le32 capabilities; | ||
1018 | u8 max_mac_filters; | ||
1019 | u8 reserved1; | ||
1020 | __le16 max_mtu; | ||
1021 | |||
1022 | __le16 max_tx_ques; | ||
1023 | __le16 max_rx_ques; | ||
1024 | __le16 min_tx_bw; | ||
1025 | __le16 max_tx_bw; | ||
1026 | u8 reserved2[104]; | ||
1027 | }; | ||
1028 | |||
1029 | struct qlcnic_pci_info { | ||
1030 | __le16 id; /* pci function id */ | ||
1031 | __le16 active; /* 1 = Enabled */ | ||
1032 | __le16 type; /* 1 = NIC, 2 = FCoE, 3 = iSCSI */ | ||
1033 | __le16 default_port; /* default port number */ | ||
1034 | |||
1035 | __le16 tx_min_bw; /* Multiple of 100mbpc */ | ||
1036 | __le16 tx_max_bw; | ||
1037 | __le16 reserved1[2]; | ||
1038 | |||
1039 | u8 mac[ETH_ALEN]; | ||
1040 | u8 reserved2[106]; | ||
1041 | }; | ||
1042 | |||
1043 | struct qlcnic_eswitch { | ||
1044 | u8 port; | ||
1045 | u8 active_vports; | ||
1046 | u8 active_vlans; | ||
1047 | u8 active_ucast_filters; | ||
1048 | u8 max_ucast_filters; | ||
1049 | u8 max_active_vlans; | ||
1050 | |||
1051 | u32 flags; | ||
1052 | #define QLCNIC_SWITCH_ENABLE BIT_1 | ||
1053 | #define QLCNIC_SWITCH_VLAN_FILTERING BIT_2 | ||
1054 | #define QLCNIC_SWITCH_PROMISC_MODE BIT_3 | ||
1055 | #define QLCNIC_SWITCH_PORT_MIRRORING BIT_4 | ||
1056 | }; | ||
1057 | |||
987 | int qlcnic_fw_cmd_query_phy(struct qlcnic_adapter *adapter, u32 reg, u32 *val); | 1058 | int qlcnic_fw_cmd_query_phy(struct qlcnic_adapter *adapter, u32 reg, u32 *val); |
988 | int qlcnic_fw_cmd_set_phy(struct qlcnic_adapter *adapter, u32 reg, u32 val); | 1059 | int qlcnic_fw_cmd_set_phy(struct qlcnic_adapter *adapter, u32 reg, u32 val); |
989 | 1060 | ||
@@ -1070,13 +1141,14 @@ void qlcnic_advert_link_change(struct qlcnic_adapter *adapter, int linkup); | |||
1070 | int qlcnic_fw_cmd_set_mtu(struct qlcnic_adapter *adapter, int mtu); | 1141 | int qlcnic_fw_cmd_set_mtu(struct qlcnic_adapter *adapter, int mtu); |
1071 | int qlcnic_change_mtu(struct net_device *netdev, int new_mtu); | 1142 | int qlcnic_change_mtu(struct net_device *netdev, int new_mtu); |
1072 | int qlcnic_config_hw_lro(struct qlcnic_adapter *adapter, int enable); | 1143 | int qlcnic_config_hw_lro(struct qlcnic_adapter *adapter, int enable); |
1073 | int qlcnic_config_bridged_mode(struct qlcnic_adapter *adapter, int enable); | 1144 | int qlcnic_config_bridged_mode(struct qlcnic_adapter *adapter, u32 enable); |
1074 | int qlcnic_send_lro_cleanup(struct qlcnic_adapter *adapter); | 1145 | int qlcnic_send_lro_cleanup(struct qlcnic_adapter *adapter); |
1075 | void qlcnic_update_cmd_producer(struct qlcnic_adapter *adapter, | 1146 | void qlcnic_update_cmd_producer(struct qlcnic_adapter *adapter, |
1076 | struct qlcnic_host_tx_ring *tx_ring); | 1147 | struct qlcnic_host_tx_ring *tx_ring); |
1077 | int qlcnic_get_mac_addr(struct qlcnic_adapter *adapter, u64 *mac); | 1148 | int qlcnic_get_mac_addr(struct qlcnic_adapter *adapter, u8 *mac); |
1078 | void qlcnic_clear_ilb_mode(struct qlcnic_adapter *adapter); | 1149 | void qlcnic_clear_ilb_mode(struct qlcnic_adapter *adapter); |
1079 | int qlcnic_set_ilb_mode(struct qlcnic_adapter *adapter); | 1150 | int qlcnic_set_ilb_mode(struct qlcnic_adapter *adapter); |
1151 | void qlcnic_fetch_mac(struct qlcnic_adapter *, u32, u32, u8, u8 *); | ||
1080 | 1152 | ||
1081 | /* Functions from qlcnic_main.c */ | 1153 | /* Functions from qlcnic_main.c */ |
1082 | int qlcnic_reset_context(struct qlcnic_adapter *); | 1154 | int qlcnic_reset_context(struct qlcnic_adapter *); |
@@ -1088,6 +1160,25 @@ int qlcnic_check_loopback_buff(unsigned char *data); | |||
1088 | netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev); | 1160 | netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev); |
1089 | void qlcnic_process_rcv_ring_diag(struct qlcnic_host_sds_ring *sds_ring); | 1161 | void qlcnic_process_rcv_ring_diag(struct qlcnic_host_sds_ring *sds_ring); |
1090 | 1162 | ||
1163 | /* Management functions */ | ||
1164 | int qlcnic_set_mac_address(struct qlcnic_adapter *, u8*); | ||
1165 | int qlcnic_get_mac_address(struct qlcnic_adapter *, u8*); | ||
1166 | int qlcnic_get_nic_info(struct qlcnic_adapter *, u8); | ||
1167 | int qlcnic_set_nic_info(struct qlcnic_adapter *, struct qlcnic_info *); | ||
1168 | int qlcnic_get_pci_info(struct qlcnic_adapter *); | ||
1169 | int qlcnic_reset_partition(struct qlcnic_adapter *, u8); | ||
1170 | |||
1171 | /* eSwitch management functions */ | ||
1172 | int qlcnic_get_eswitch_capabilities(struct qlcnic_adapter *, u8, | ||
1173 | struct qlcnic_eswitch *); | ||
1174 | int qlcnic_get_eswitch_status(struct qlcnic_adapter *, u8, | ||
1175 | struct qlcnic_eswitch *); | ||
1176 | int qlcnic_toggle_eswitch(struct qlcnic_adapter *, u8, u8); | ||
1177 | int qlcnic_config_switch_port(struct qlcnic_adapter *, u8, int, u8, u8, | ||
1178 | u8, u8, u16); | ||
1179 | int qlcnic_config_port_mirroring(struct qlcnic_adapter *, u8, u8, u8); | ||
1180 | extern int qlcnic_config_tso; | ||
1181 | |||
1091 | /* | 1182 | /* |
1092 | * QLOGIC Board information | 1183 | * QLOGIC Board information |
1093 | */ | 1184 | */ |
@@ -1131,6 +1222,15 @@ static inline u32 qlcnic_tx_avail(struct qlcnic_host_tx_ring *tx_ring) | |||
1131 | 1222 | ||
1132 | extern const struct ethtool_ops qlcnic_ethtool_ops; | 1223 | extern const struct ethtool_ops qlcnic_ethtool_ops; |
1133 | 1224 | ||
1225 | struct qlcnic_nic_template { | ||
1226 | int (*get_mac_addr) (struct qlcnic_adapter *, u8*); | ||
1227 | int (*config_bridged_mode) (struct qlcnic_adapter *, u32); | ||
1228 | int (*config_led) (struct qlcnic_adapter *, u32, u32); | ||
1229 | int (*set_ilb_mode) (struct qlcnic_adapter *); | ||
1230 | void (*clear_ilb_mode) (struct qlcnic_adapter *); | ||
1231 | int (*start_firmware) (struct qlcnic_adapter *); | ||
1232 | }; | ||
1233 | |||
1134 | #define QLCDB(adapter, lvl, _fmt, _args...) do { \ | 1234 | #define QLCDB(adapter, lvl, _fmt, _args...) do { \ |
1135 | if (NETIF_MSG_##lvl & adapter->msg_enable) \ | 1235 | if (NETIF_MSG_##lvl & adapter->msg_enable) \ |
1136 | printk(KERN_INFO "%s: %s: " _fmt, \ | 1236 | printk(KERN_INFO "%s: %s: " _fmt, \ |
diff --git a/drivers/net/qlcnic/qlcnic_ctx.c b/drivers/net/qlcnic/qlcnic_ctx.c index c2c1f5cc16c6..1e1dc58cddca 100644 --- a/drivers/net/qlcnic/qlcnic_ctx.c +++ b/drivers/net/qlcnic/qlcnic_ctx.c | |||
@@ -88,12 +88,12 @@ qlcnic_fw_cmd_set_mtu(struct qlcnic_adapter *adapter, int mtu) | |||
88 | 88 | ||
89 | if (recv_ctx->state == QLCNIC_HOST_CTX_STATE_ACTIVE) { | 89 | if (recv_ctx->state == QLCNIC_HOST_CTX_STATE_ACTIVE) { |
90 | if (qlcnic_issue_cmd(adapter, | 90 | if (qlcnic_issue_cmd(adapter, |
91 | adapter->ahw.pci_func, | 91 | adapter->ahw.pci_func, |
92 | QLCHAL_VERSION, | 92 | adapter->fw_hal_version, |
93 | recv_ctx->context_id, | 93 | recv_ctx->context_id, |
94 | mtu, | 94 | mtu, |
95 | 0, | 95 | 0, |
96 | QLCNIC_CDRP_CMD_SET_MTU)) { | 96 | QLCNIC_CDRP_CMD_SET_MTU)) { |
97 | 97 | ||
98 | dev_err(&adapter->pdev->dev, "Failed to set mtu\n"); | 98 | dev_err(&adapter->pdev->dev, "Failed to set mtu\n"); |
99 | return -EIO; | 99 | return -EIO; |
@@ -121,7 +121,7 @@ qlcnic_fw_cmd_create_rx_ctx(struct qlcnic_adapter *adapter) | |||
121 | 121 | ||
122 | int i, nrds_rings, nsds_rings; | 122 | int i, nrds_rings, nsds_rings; |
123 | size_t rq_size, rsp_size; | 123 | size_t rq_size, rsp_size; |
124 | u32 cap, reg, val; | 124 | u32 cap, reg, val, reg2; |
125 | int err; | 125 | int err; |
126 | 126 | ||
127 | struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx; | 127 | struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx; |
@@ -197,7 +197,7 @@ qlcnic_fw_cmd_create_rx_ctx(struct qlcnic_adapter *adapter) | |||
197 | phys_addr = hostrq_phys_addr; | 197 | phys_addr = hostrq_phys_addr; |
198 | err = qlcnic_issue_cmd(adapter, | 198 | err = qlcnic_issue_cmd(adapter, |
199 | adapter->ahw.pci_func, | 199 | adapter->ahw.pci_func, |
200 | QLCHAL_VERSION, | 200 | adapter->fw_hal_version, |
201 | (u32)(phys_addr >> 32), | 201 | (u32)(phys_addr >> 32), |
202 | (u32)(phys_addr & 0xffffffff), | 202 | (u32)(phys_addr & 0xffffffff), |
203 | rq_size, | 203 | rq_size, |
@@ -216,8 +216,12 @@ qlcnic_fw_cmd_create_rx_ctx(struct qlcnic_adapter *adapter) | |||
216 | rds_ring = &recv_ctx->rds_rings[i]; | 216 | rds_ring = &recv_ctx->rds_rings[i]; |
217 | 217 | ||
218 | reg = le32_to_cpu(prsp_rds[i].host_producer_crb); | 218 | reg = le32_to_cpu(prsp_rds[i].host_producer_crb); |
219 | rds_ring->crb_rcv_producer = qlcnic_get_ioaddr(adapter, | 219 | if (adapter->fw_hal_version == QLCNIC_FW_BASE) |
220 | rds_ring->crb_rcv_producer = qlcnic_get_ioaddr(adapter, | ||
220 | QLCNIC_REG(reg - 0x200)); | 221 | QLCNIC_REG(reg - 0x200)); |
222 | else | ||
223 | rds_ring->crb_rcv_producer = adapter->ahw.pci_base0 + | ||
224 | reg; | ||
221 | } | 225 | } |
222 | 226 | ||
223 | prsp_sds = ((struct qlcnic_cardrsp_sds_ring *) | 227 | prsp_sds = ((struct qlcnic_cardrsp_sds_ring *) |
@@ -227,12 +231,18 @@ qlcnic_fw_cmd_create_rx_ctx(struct qlcnic_adapter *adapter) | |||
227 | sds_ring = &recv_ctx->sds_rings[i]; | 231 | sds_ring = &recv_ctx->sds_rings[i]; |
228 | 232 | ||
229 | reg = le32_to_cpu(prsp_sds[i].host_consumer_crb); | 233 | reg = le32_to_cpu(prsp_sds[i].host_consumer_crb); |
230 | sds_ring->crb_sts_consumer = qlcnic_get_ioaddr(adapter, | 234 | reg2 = le32_to_cpu(prsp_sds[i].interrupt_crb); |
231 | QLCNIC_REG(reg - 0x200)); | ||
232 | 235 | ||
233 | reg = le32_to_cpu(prsp_sds[i].interrupt_crb); | 236 | if (adapter->fw_hal_version == QLCNIC_FW_BASE) { |
234 | sds_ring->crb_intr_mask = qlcnic_get_ioaddr(adapter, | 237 | sds_ring->crb_sts_consumer = qlcnic_get_ioaddr(adapter, |
235 | QLCNIC_REG(reg - 0x200)); | 238 | QLCNIC_REG(reg - 0x200)); |
239 | sds_ring->crb_intr_mask = qlcnic_get_ioaddr(adapter, | ||
240 | QLCNIC_REG(reg2 - 0x200)); | ||
241 | } else { | ||
242 | sds_ring->crb_sts_consumer = adapter->ahw.pci_base0 + | ||
243 | reg; | ||
244 | sds_ring->crb_intr_mask = adapter->ahw.pci_base0 + reg2; | ||
245 | } | ||
236 | } | 246 | } |
237 | 247 | ||
238 | recv_ctx->state = le32_to_cpu(prsp->host_ctx_state); | 248 | recv_ctx->state = le32_to_cpu(prsp->host_ctx_state); |
@@ -253,7 +263,7 @@ qlcnic_fw_cmd_destroy_rx_ctx(struct qlcnic_adapter *adapter) | |||
253 | 263 | ||
254 | if (qlcnic_issue_cmd(adapter, | 264 | if (qlcnic_issue_cmd(adapter, |
255 | adapter->ahw.pci_func, | 265 | adapter->ahw.pci_func, |
256 | QLCHAL_VERSION, | 266 | adapter->fw_hal_version, |
257 | recv_ctx->context_id, | 267 | recv_ctx->context_id, |
258 | QLCNIC_DESTROY_CTX_RESET, | 268 | QLCNIC_DESTROY_CTX_RESET, |
259 | 0, | 269 | 0, |
@@ -319,7 +329,7 @@ qlcnic_fw_cmd_create_tx_ctx(struct qlcnic_adapter *adapter) | |||
319 | phys_addr = rq_phys_addr; | 329 | phys_addr = rq_phys_addr; |
320 | err = qlcnic_issue_cmd(adapter, | 330 | err = qlcnic_issue_cmd(adapter, |
321 | adapter->ahw.pci_func, | 331 | adapter->ahw.pci_func, |
322 | QLCHAL_VERSION, | 332 | adapter->fw_hal_version, |
323 | (u32)(phys_addr >> 32), | 333 | (u32)(phys_addr >> 32), |
324 | ((u32)phys_addr & 0xffffffff), | 334 | ((u32)phys_addr & 0xffffffff), |
325 | rq_size, | 335 | rq_size, |
@@ -327,8 +337,12 @@ qlcnic_fw_cmd_create_tx_ctx(struct qlcnic_adapter *adapter) | |||
327 | 337 | ||
328 | if (err == QLCNIC_RCODE_SUCCESS) { | 338 | if (err == QLCNIC_RCODE_SUCCESS) { |
329 | temp = le32_to_cpu(prsp->cds_ring.host_producer_crb); | 339 | temp = le32_to_cpu(prsp->cds_ring.host_producer_crb); |
330 | tx_ring->crb_cmd_producer = qlcnic_get_ioaddr(adapter, | 340 | if (adapter->fw_hal_version == QLCNIC_FW_BASE) |
341 | tx_ring->crb_cmd_producer = qlcnic_get_ioaddr(adapter, | ||
331 | QLCNIC_REG(temp - 0x200)); | 342 | QLCNIC_REG(temp - 0x200)); |
343 | else | ||
344 | tx_ring->crb_cmd_producer = adapter->ahw.pci_base0 + | ||
345 | temp; | ||
332 | 346 | ||
333 | adapter->tx_context_id = | 347 | adapter->tx_context_id = |
334 | le16_to_cpu(prsp->context_id); | 348 | le16_to_cpu(prsp->context_id); |
@@ -351,7 +365,7 @@ qlcnic_fw_cmd_destroy_tx_ctx(struct qlcnic_adapter *adapter) | |||
351 | { | 365 | { |
352 | if (qlcnic_issue_cmd(adapter, | 366 | if (qlcnic_issue_cmd(adapter, |
353 | adapter->ahw.pci_func, | 367 | adapter->ahw.pci_func, |
354 | QLCHAL_VERSION, | 368 | adapter->fw_hal_version, |
355 | adapter->tx_context_id, | 369 | adapter->tx_context_id, |
356 | QLCNIC_DESTROY_CTX_RESET, | 370 | QLCNIC_DESTROY_CTX_RESET, |
357 | 0, | 371 | 0, |
@@ -368,7 +382,7 @@ qlcnic_fw_cmd_query_phy(struct qlcnic_adapter *adapter, u32 reg, u32 *val) | |||
368 | 382 | ||
369 | if (qlcnic_issue_cmd(adapter, | 383 | if (qlcnic_issue_cmd(adapter, |
370 | adapter->ahw.pci_func, | 384 | adapter->ahw.pci_func, |
371 | QLCHAL_VERSION, | 385 | adapter->fw_hal_version, |
372 | reg, | 386 | reg, |
373 | 0, | 387 | 0, |
374 | 0, | 388 | 0, |
@@ -385,7 +399,7 @@ qlcnic_fw_cmd_set_phy(struct qlcnic_adapter *adapter, u32 reg, u32 val) | |||
385 | { | 399 | { |
386 | return qlcnic_issue_cmd(adapter, | 400 | return qlcnic_issue_cmd(adapter, |
387 | adapter->ahw.pci_func, | 401 | adapter->ahw.pci_func, |
388 | QLCHAL_VERSION, | 402 | adapter->fw_hal_version, |
389 | reg, | 403 | reg, |
390 | val, | 404 | val, |
391 | 0, | 405 | 0, |
@@ -533,3 +547,464 @@ void qlcnic_free_hw_resources(struct qlcnic_adapter *adapter) | |||
533 | } | 547 | } |
534 | } | 548 | } |
535 | 549 | ||
550 | /* Set MAC address of a NIC partition */ | ||
551 | int qlcnic_set_mac_address(struct qlcnic_adapter *adapter, u8* mac) | ||
552 | { | ||
553 | int err = 0; | ||
554 | u32 arg1, arg2, arg3; | ||
555 | |||
556 | arg1 = adapter->ahw.pci_func | BIT_9; | ||
557 | arg2 = mac[0] | (mac[1] << 8) | (mac[2] << 16) | (mac[3] << 24); | ||
558 | arg3 = mac[4] | (mac[5] << 16); | ||
559 | |||
560 | err = qlcnic_issue_cmd(adapter, | ||
561 | adapter->ahw.pci_func, | ||
562 | adapter->fw_hal_version, | ||
563 | arg1, | ||
564 | arg2, | ||
565 | arg3, | ||
566 | QLCNIC_CDRP_CMD_MAC_ADDRESS); | ||
567 | |||
568 | if (err != QLCNIC_RCODE_SUCCESS) { | ||
569 | dev_err(&adapter->pdev->dev, | ||
570 | "Failed to set mac address%d\n", err); | ||
571 | err = -EIO; | ||
572 | } | ||
573 | |||
574 | return err; | ||
575 | } | ||
576 | |||
577 | /* Get MAC address of a NIC partition */ | ||
578 | int qlcnic_get_mac_address(struct qlcnic_adapter *adapter, u8 *mac) | ||
579 | { | ||
580 | int err; | ||
581 | u32 arg1; | ||
582 | |||
583 | arg1 = adapter->ahw.pci_func | BIT_8; | ||
584 | err = qlcnic_issue_cmd(adapter, | ||
585 | adapter->ahw.pci_func, | ||
586 | adapter->fw_hal_version, | ||
587 | arg1, | ||
588 | 0, | ||
589 | 0, | ||
590 | QLCNIC_CDRP_CMD_MAC_ADDRESS); | ||
591 | |||
592 | if (err == QLCNIC_RCODE_SUCCESS) { | ||
593 | qlcnic_fetch_mac(adapter, QLCNIC_ARG1_CRB_OFFSET, | ||
594 | QLCNIC_ARG2_CRB_OFFSET, 0, mac); | ||
595 | dev_info(&adapter->pdev->dev, "MAC address: %pM\n", mac); | ||
596 | } else { | ||
597 | dev_err(&adapter->pdev->dev, | ||
598 | "Failed to get mac address%d\n", err); | ||
599 | err = -EIO; | ||
600 | } | ||
601 | |||
602 | return err; | ||
603 | } | ||
604 | |||
605 | /* Get info of a NIC partition */ | ||
606 | int qlcnic_get_nic_info(struct qlcnic_adapter *adapter, u8 func_id) | ||
607 | { | ||
608 | int err; | ||
609 | dma_addr_t nic_dma_t; | ||
610 | struct qlcnic_info *nic_info; | ||
611 | void *nic_info_addr; | ||
612 | size_t nic_size = sizeof(struct qlcnic_info); | ||
613 | |||
614 | nic_info_addr = pci_alloc_consistent(adapter->pdev, | ||
615 | nic_size, &nic_dma_t); | ||
616 | if (!nic_info_addr) | ||
617 | return -ENOMEM; | ||
618 | memset(nic_info_addr, 0, nic_size); | ||
619 | |||
620 | nic_info = (struct qlcnic_info *) nic_info_addr; | ||
621 | err = qlcnic_issue_cmd(adapter, | ||
622 | adapter->ahw.pci_func, | ||
623 | adapter->fw_hal_version, | ||
624 | MSD(nic_dma_t), | ||
625 | LSD(nic_dma_t), | ||
626 | (func_id << 16 | nic_size), | ||
627 | QLCNIC_CDRP_CMD_GET_NIC_INFO); | ||
628 | |||
629 | if (err == QLCNIC_RCODE_SUCCESS) { | ||
630 | adapter->physical_port = le16_to_cpu(nic_info->phys_port); | ||
631 | adapter->switch_mode = le16_to_cpu(nic_info->switch_mode); | ||
632 | adapter->max_tx_ques = le16_to_cpu(nic_info->max_tx_ques); | ||
633 | adapter->max_rx_ques = le16_to_cpu(nic_info->max_rx_ques); | ||
634 | adapter->min_tx_bw = le16_to_cpu(nic_info->min_tx_bw); | ||
635 | adapter->max_tx_bw = le16_to_cpu(nic_info->max_tx_bw); | ||
636 | adapter->max_mtu = le16_to_cpu(nic_info->max_mtu); | ||
637 | adapter->capabilities = le32_to_cpu(nic_info->capabilities); | ||
638 | adapter->max_mac_filters = nic_info->max_mac_filters; | ||
639 | |||
640 | dev_info(&adapter->pdev->dev, | ||
641 | "phy port: %d switch_mode: %d,\n" | ||
642 | "\tmax_tx_q: %d max_rx_q: %d min_tx_bw: 0x%x,\n" | ||
643 | "\tmax_tx_bw: 0x%x max_mtu:0x%x, capabilities: 0x%x\n", | ||
644 | adapter->physical_port, adapter->switch_mode, | ||
645 | adapter->max_tx_ques, adapter->max_rx_ques, | ||
646 | adapter->min_tx_bw, adapter->max_tx_bw, | ||
647 | adapter->max_mtu, adapter->capabilities); | ||
648 | } else { | ||
649 | dev_err(&adapter->pdev->dev, | ||
650 | "Failed to get nic info%d\n", err); | ||
651 | err = -EIO; | ||
652 | } | ||
653 | |||
654 | pci_free_consistent(adapter->pdev, nic_size, nic_info_addr, nic_dma_t); | ||
655 | return err; | ||
656 | } | ||
657 | |||
658 | /* Configure a NIC partition */ | ||
659 | int qlcnic_set_nic_info(struct qlcnic_adapter *adapter, struct qlcnic_info *nic) | ||
660 | { | ||
661 | int err = -EIO; | ||
662 | u32 func_state; | ||
663 | dma_addr_t nic_dma_t; | ||
664 | void *nic_info_addr; | ||
665 | struct qlcnic_info *nic_info; | ||
666 | size_t nic_size = sizeof(struct qlcnic_info); | ||
667 | |||
668 | if (adapter->op_mode != QLCNIC_MGMT_FUNC) | ||
669 | return err; | ||
670 | |||
671 | if (qlcnic_api_lock(adapter)) | ||
672 | return err; | ||
673 | |||
674 | func_state = QLCRD32(adapter, QLCNIC_CRB_DEV_REF_COUNT); | ||
675 | if (QLC_DEV_CHECK_ACTIVE(func_state, nic->pci_func)) { | ||
676 | qlcnic_api_unlock(adapter); | ||
677 | return err; | ||
678 | } | ||
679 | |||
680 | qlcnic_api_unlock(adapter); | ||
681 | |||
682 | nic_info_addr = pci_alloc_consistent(adapter->pdev, nic_size, | ||
683 | &nic_dma_t); | ||
684 | if (!nic_info_addr) | ||
685 | return -ENOMEM; | ||
686 | |||
687 | memset(nic_info_addr, 0, nic_size); | ||
688 | nic_info = (struct qlcnic_info *)nic_info_addr; | ||
689 | |||
690 | nic_info->pci_func = cpu_to_le16(nic->pci_func); | ||
691 | nic_info->op_mode = cpu_to_le16(nic->op_mode); | ||
692 | nic_info->phys_port = cpu_to_le16(nic->phys_port); | ||
693 | nic_info->switch_mode = cpu_to_le16(nic->switch_mode); | ||
694 | nic_info->capabilities = cpu_to_le32(nic->capabilities); | ||
695 | nic_info->max_mac_filters = nic->max_mac_filters; | ||
696 | nic_info->max_tx_ques = cpu_to_le16(nic->max_tx_ques); | ||
697 | nic_info->max_rx_ques = cpu_to_le16(nic->max_rx_ques); | ||
698 | nic_info->min_tx_bw = cpu_to_le16(nic->min_tx_bw); | ||
699 | nic_info->max_tx_bw = cpu_to_le16(nic->max_tx_bw); | ||
700 | |||
701 | err = qlcnic_issue_cmd(adapter, | ||
702 | adapter->ahw.pci_func, | ||
703 | adapter->fw_hal_version, | ||
704 | MSD(nic_dma_t), | ||
705 | LSD(nic_dma_t), | ||
706 | nic_size, | ||
707 | QLCNIC_CDRP_CMD_SET_NIC_INFO); | ||
708 | |||
709 | if (err != QLCNIC_RCODE_SUCCESS) { | ||
710 | dev_err(&adapter->pdev->dev, | ||
711 | "Failed to set nic info%d\n", err); | ||
712 | err = -EIO; | ||
713 | } | ||
714 | |||
715 | pci_free_consistent(adapter->pdev, nic_size, nic_info_addr, nic_dma_t); | ||
716 | return err; | ||
717 | } | ||
718 | |||
719 | /* Get PCI Info of a partition */ | ||
720 | int qlcnic_get_pci_info(struct qlcnic_adapter *adapter) | ||
721 | { | ||
722 | int err = 0, i; | ||
723 | dma_addr_t pci_info_dma_t; | ||
724 | struct qlcnic_pci_info *npar; | ||
725 | void *pci_info_addr; | ||
726 | size_t npar_size = sizeof(struct qlcnic_pci_info); | ||
727 | size_t pci_size = npar_size * QLCNIC_MAX_PCI_FUNC; | ||
728 | |||
729 | pci_info_addr = pci_alloc_consistent(adapter->pdev, pci_size, | ||
730 | &pci_info_dma_t); | ||
731 | if (!pci_info_addr) | ||
732 | return -ENOMEM; | ||
733 | memset(pci_info_addr, 0, pci_size); | ||
734 | |||
735 | if (!adapter->npars) | ||
736 | adapter->npars = kzalloc(pci_size, GFP_KERNEL); | ||
737 | if (!adapter->npars) { | ||
738 | err = -ENOMEM; | ||
739 | goto err_npar; | ||
740 | } | ||
741 | |||
742 | if (!adapter->eswitch) | ||
743 | adapter->eswitch = kzalloc(sizeof(struct qlcnic_eswitch) * | ||
744 | QLCNIC_NIU_MAX_XG_PORTS, GFP_KERNEL); | ||
745 | if (!adapter->eswitch) { | ||
746 | err = -ENOMEM; | ||
747 | goto err_eswitch; | ||
748 | } | ||
749 | |||
750 | npar = (struct qlcnic_pci_info *) pci_info_addr; | ||
751 | err = qlcnic_issue_cmd(adapter, | ||
752 | adapter->ahw.pci_func, | ||
753 | adapter->fw_hal_version, | ||
754 | MSD(pci_info_dma_t), | ||
755 | LSD(pci_info_dma_t), | ||
756 | pci_size, | ||
757 | QLCNIC_CDRP_CMD_GET_PCI_INFO); | ||
758 | |||
759 | if (err == QLCNIC_RCODE_SUCCESS) { | ||
760 | for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++, npar++) { | ||
761 | adapter->npars[i].id = le32_to_cpu(npar->id); | ||
762 | adapter->npars[i].active = le32_to_cpu(npar->active); | ||
763 | adapter->npars[i].type = le32_to_cpu(npar->type); | ||
764 | adapter->npars[i].default_port = | ||
765 | le32_to_cpu(npar->default_port); | ||
766 | adapter->npars[i].tx_min_bw = | ||
767 | le32_to_cpu(npar->tx_min_bw); | ||
768 | adapter->npars[i].tx_max_bw = | ||
769 | le32_to_cpu(npar->tx_max_bw); | ||
770 | memcpy(adapter->npars[i].mac, npar->mac, ETH_ALEN); | ||
771 | } | ||
772 | } else { | ||
773 | dev_err(&adapter->pdev->dev, | ||
774 | "Failed to get PCI Info%d\n", err); | ||
775 | kfree(adapter->npars); | ||
776 | err = -EIO; | ||
777 | } | ||
778 | goto err_npar; | ||
779 | |||
780 | err_eswitch: | ||
781 | kfree(adapter->npars); | ||
782 | adapter->npars = NULL; | ||
783 | |||
784 | err_npar: | ||
785 | pci_free_consistent(adapter->pdev, pci_size, pci_info_addr, | ||
786 | pci_info_dma_t); | ||
787 | return err; | ||
788 | } | ||
789 | |||
790 | /* Reset a NIC partition */ | ||
791 | |||
792 | int qlcnic_reset_partition(struct qlcnic_adapter *adapter, u8 func_no) | ||
793 | { | ||
794 | int err = -EIO; | ||
795 | |||
796 | if (adapter->op_mode != QLCNIC_MGMT_FUNC) | ||
797 | return err; | ||
798 | |||
799 | err = qlcnic_issue_cmd(adapter, | ||
800 | adapter->ahw.pci_func, | ||
801 | adapter->fw_hal_version, | ||
802 | func_no, | ||
803 | 0, | ||
804 | 0, | ||
805 | QLCNIC_CDRP_CMD_RESET_NPAR); | ||
806 | |||
807 | if (err != QLCNIC_RCODE_SUCCESS) { | ||
808 | dev_err(&adapter->pdev->dev, | ||
809 | "Failed to issue reset partition%d\n", err); | ||
810 | err = -EIO; | ||
811 | } | ||
812 | |||
813 | return err; | ||
814 | } | ||
815 | |||
816 | /* Get eSwitch Capabilities */ | ||
817 | int qlcnic_get_eswitch_capabilities(struct qlcnic_adapter *adapter, u8 port, | ||
818 | struct qlcnic_eswitch *eswitch) | ||
819 | { | ||
820 | int err = -EIO; | ||
821 | u32 arg1, arg2; | ||
822 | |||
823 | if (adapter->op_mode == QLCNIC_NON_PRIV_FUNC) | ||
824 | return err; | ||
825 | |||
826 | err = qlcnic_issue_cmd(adapter, | ||
827 | adapter->ahw.pci_func, | ||
828 | adapter->fw_hal_version, | ||
829 | port, | ||
830 | 0, | ||
831 | 0, | ||
832 | QLCNIC_CDRP_CMD_GET_ESWITCH_CAPABILITY); | ||
833 | |||
834 | if (err == QLCNIC_RCODE_SUCCESS) { | ||
835 | arg1 = QLCRD32(adapter, QLCNIC_ARG1_CRB_OFFSET); | ||
836 | arg2 = QLCRD32(adapter, QLCNIC_ARG2_CRB_OFFSET); | ||
837 | |||
838 | eswitch->port = arg1 & 0xf; | ||
839 | eswitch->active_vports = LSB(arg2); | ||
840 | eswitch->max_ucast_filters = MSB(arg2); | ||
841 | eswitch->max_active_vlans = LSB(MSW(arg2)); | ||
842 | if (arg1 & BIT_6) | ||
843 | eswitch->flags |= QLCNIC_SWITCH_VLAN_FILTERING; | ||
844 | if (arg1 & BIT_7) | ||
845 | eswitch->flags |= QLCNIC_SWITCH_PROMISC_MODE; | ||
846 | if (arg1 & BIT_8) | ||
847 | eswitch->flags |= QLCNIC_SWITCH_PORT_MIRRORING; | ||
848 | } else { | ||
849 | dev_err(&adapter->pdev->dev, | ||
850 | "Failed to get eswitch capabilities%d\n", err); | ||
851 | } | ||
852 | |||
853 | return err; | ||
854 | } | ||
855 | |||
856 | /* Get current status of eswitch */ | ||
857 | int qlcnic_get_eswitch_status(struct qlcnic_adapter *adapter, u8 port, | ||
858 | struct qlcnic_eswitch *eswitch) | ||
859 | { | ||
860 | int err = -EIO; | ||
861 | u32 arg1, arg2; | ||
862 | |||
863 | if (adapter->op_mode != QLCNIC_MGMT_FUNC) | ||
864 | return err; | ||
865 | |||
866 | err = qlcnic_issue_cmd(adapter, | ||
867 | adapter->ahw.pci_func, | ||
868 | adapter->fw_hal_version, | ||
869 | port, | ||
870 | 0, | ||
871 | 0, | ||
872 | QLCNIC_CDRP_CMD_GET_ESWITCH_STATUS); | ||
873 | |||
874 | if (err == QLCNIC_RCODE_SUCCESS) { | ||
875 | arg1 = QLCRD32(adapter, QLCNIC_ARG1_CRB_OFFSET); | ||
876 | arg2 = QLCRD32(adapter, QLCNIC_ARG2_CRB_OFFSET); | ||
877 | |||
878 | eswitch->port = arg1 & 0xf; | ||
879 | eswitch->active_vports = LSB(arg2); | ||
880 | eswitch->active_ucast_filters = MSB(arg2); | ||
881 | eswitch->active_vlans = LSB(MSW(arg2)); | ||
882 | if (arg1 & BIT_6) | ||
883 | eswitch->flags |= QLCNIC_SWITCH_VLAN_FILTERING; | ||
884 | if (arg1 & BIT_8) | ||
885 | eswitch->flags |= QLCNIC_SWITCH_PORT_MIRRORING; | ||
886 | |||
887 | } else { | ||
888 | dev_err(&adapter->pdev->dev, | ||
889 | "Failed to get eswitch status%d\n", err); | ||
890 | } | ||
891 | |||
892 | return err; | ||
893 | } | ||
894 | |||
895 | /* Enable/Disable eSwitch */ | ||
896 | int qlcnic_toggle_eswitch(struct qlcnic_adapter *adapter, u8 id, u8 enable) | ||
897 | { | ||
898 | int err = -EIO; | ||
899 | u32 arg1, arg2; | ||
900 | struct qlcnic_eswitch *eswitch; | ||
901 | |||
902 | if (adapter->op_mode != QLCNIC_MGMT_FUNC) | ||
903 | return err; | ||
904 | |||
905 | eswitch = &adapter->eswitch[id]; | ||
906 | if (!eswitch) | ||
907 | return err; | ||
908 | |||
909 | arg1 = eswitch->port | (enable ? BIT_4 : 0); | ||
910 | arg2 = eswitch->active_vports | (eswitch->max_ucast_filters << 8) | | ||
911 | (eswitch->max_active_vlans << 16); | ||
912 | err = qlcnic_issue_cmd(adapter, | ||
913 | adapter->ahw.pci_func, | ||
914 | adapter->fw_hal_version, | ||
915 | arg1, | ||
916 | arg2, | ||
917 | 0, | ||
918 | QLCNIC_CDRP_CMD_TOGGLE_ESWITCH); | ||
919 | |||
920 | if (err != QLCNIC_RCODE_SUCCESS) { | ||
921 | dev_err(&adapter->pdev->dev, | ||
922 | "Failed to enable eswitch%d\n", eswitch->port); | ||
923 | eswitch->flags &= ~QLCNIC_SWITCH_ENABLE; | ||
924 | err = -EIO; | ||
925 | } else { | ||
926 | eswitch->flags |= QLCNIC_SWITCH_ENABLE; | ||
927 | dev_info(&adapter->pdev->dev, | ||
928 | "Enabled eSwitch for port %d\n", eswitch->port); | ||
929 | } | ||
930 | |||
931 | return err; | ||
932 | } | ||
933 | |||
934 | /* Configure eSwitch for port mirroring */ | ||
935 | int qlcnic_config_port_mirroring(struct qlcnic_adapter *adapter, u8 id, | ||
936 | u8 enable_mirroring, u8 pci_func) | ||
937 | { | ||
938 | int err = -EIO; | ||
939 | u32 arg1; | ||
940 | |||
941 | if (adapter->op_mode != QLCNIC_MGMT_FUNC || | ||
942 | !(adapter->eswitch[id].flags & QLCNIC_SWITCH_ENABLE)) | ||
943 | return err; | ||
944 | |||
945 | arg1 = id | (enable_mirroring ? BIT_4 : 0); | ||
946 | arg1 |= pci_func << 8; | ||
947 | |||
948 | err = qlcnic_issue_cmd(adapter, | ||
949 | adapter->ahw.pci_func, | ||
950 | adapter->fw_hal_version, | ||
951 | arg1, | ||
952 | 0, | ||
953 | 0, | ||
954 | QLCNIC_CDRP_CMD_SET_PORTMIRRORING); | ||
955 | |||
956 | if (err != QLCNIC_RCODE_SUCCESS) { | ||
957 | dev_err(&adapter->pdev->dev, | ||
958 | "Failed to configure port mirroring%d on eswitch:%d\n", | ||
959 | pci_func, id); | ||
960 | } else { | ||
961 | dev_info(&adapter->pdev->dev, | ||
962 | "Configured eSwitch %d for port mirroring:%d\n", | ||
963 | id, pci_func); | ||
964 | } | ||
965 | |||
966 | return err; | ||
967 | } | ||
968 | |||
969 | /* Configure eSwitch port */ | ||
970 | int qlcnic_config_switch_port(struct qlcnic_adapter *adapter, u8 id, | ||
971 | int vlan_tagging, u8 discard_tagged, u8 promsc_mode, | ||
972 | u8 mac_learn, u8 pci_func, u16 vlan_id) | ||
973 | { | ||
974 | int err = -EIO; | ||
975 | u32 arg1; | ||
976 | struct qlcnic_eswitch *eswitch; | ||
977 | |||
978 | if (adapter->op_mode != QLCNIC_MGMT_FUNC) | ||
979 | return err; | ||
980 | |||
981 | eswitch = &adapter->eswitch[id]; | ||
982 | if (!(eswitch->flags & QLCNIC_SWITCH_ENABLE)) | ||
983 | return err; | ||
984 | |||
985 | arg1 = eswitch->port | (discard_tagged ? BIT_4 : 0); | ||
986 | arg1 |= (promsc_mode ? BIT_6 : 0) | (mac_learn ? BIT_7 : 0); | ||
987 | arg1 |= pci_func << 8; | ||
988 | if (vlan_tagging) | ||
989 | arg1 |= BIT_5 | (vlan_id << 16); | ||
990 | |||
991 | err = qlcnic_issue_cmd(adapter, | ||
992 | adapter->ahw.pci_func, | ||
993 | adapter->fw_hal_version, | ||
994 | arg1, | ||
995 | 0, | ||
996 | 0, | ||
997 | QLCNIC_CDRP_CMD_CONFIGURE_ESWITCH); | ||
998 | |||
999 | if (err != QLCNIC_RCODE_SUCCESS) { | ||
1000 | dev_err(&adapter->pdev->dev, | ||
1001 | "Failed to configure eswitch port%d\n", eswitch->port); | ||
1002 | eswitch->flags |= QLCNIC_SWITCH_ENABLE; | ||
1003 | } else { | ||
1004 | eswitch->flags &= ~QLCNIC_SWITCH_ENABLE; | ||
1005 | dev_info(&adapter->pdev->dev, | ||
1006 | "Configured eSwitch for port %d\n", eswitch->port); | ||
1007 | } | ||
1008 | |||
1009 | return err; | ||
1010 | } | ||
diff --git a/drivers/net/qlcnic/qlcnic_ethtool.c b/drivers/net/qlcnic/qlcnic_ethtool.c index 3bd514ec7e8f..3e4822ad5a80 100644 --- a/drivers/net/qlcnic/qlcnic_ethtool.c +++ b/drivers/net/qlcnic/qlcnic_ethtool.c | |||
@@ -683,13 +683,13 @@ static int qlcnic_loopback_test(struct net_device *netdev) | |||
683 | if (ret) | 683 | if (ret) |
684 | goto clear_it; | 684 | goto clear_it; |
685 | 685 | ||
686 | ret = qlcnic_set_ilb_mode(adapter); | 686 | ret = adapter->nic_ops->set_ilb_mode(adapter); |
687 | if (ret) | 687 | if (ret) |
688 | goto done; | 688 | goto done; |
689 | 689 | ||
690 | ret = qlcnic_do_ilb_test(adapter); | 690 | ret = qlcnic_do_ilb_test(adapter); |
691 | 691 | ||
692 | qlcnic_clear_ilb_mode(adapter); | 692 | adapter->nic_ops->clear_ilb_mode(adapter); |
693 | 693 | ||
694 | done: | 694 | done: |
695 | qlcnic_diag_free_res(netdev, max_sds_rings); | 695 | qlcnic_diag_free_res(netdev, max_sds_rings); |
@@ -715,7 +715,8 @@ static int qlcnic_irq_test(struct net_device *netdev) | |||
715 | 715 | ||
716 | adapter->diag_cnt = 0; | 716 | adapter->diag_cnt = 0; |
717 | ret = qlcnic_issue_cmd(adapter, adapter->ahw.pci_func, | 717 | ret = qlcnic_issue_cmd(adapter, adapter->ahw.pci_func, |
718 | QLCHAL_VERSION, adapter->portnum, 0, 0, 0x00000011); | 718 | adapter->fw_hal_version, adapter->portnum, |
719 | 0, 0, 0x00000011); | ||
719 | if (ret) | 720 | if (ret) |
720 | goto done; | 721 | goto done; |
721 | 722 | ||
@@ -834,7 +835,7 @@ static int qlcnic_blink_led(struct net_device *dev, u32 val) | |||
834 | struct qlcnic_adapter *adapter = netdev_priv(dev); | 835 | struct qlcnic_adapter *adapter = netdev_priv(dev); |
835 | int ret; | 836 | int ret; |
836 | 837 | ||
837 | ret = qlcnic_config_led(adapter, 1, 0xf); | 838 | ret = adapter->nic_ops->config_led(adapter, 1, 0xf); |
838 | if (ret) { | 839 | if (ret) { |
839 | dev_err(&adapter->pdev->dev, | 840 | dev_err(&adapter->pdev->dev, |
840 | "Failed to set LED blink state.\n"); | 841 | "Failed to set LED blink state.\n"); |
@@ -843,7 +844,7 @@ static int qlcnic_blink_led(struct net_device *dev, u32 val) | |||
843 | 844 | ||
844 | msleep_interruptible(val * 1000); | 845 | msleep_interruptible(val * 1000); |
845 | 846 | ||
846 | ret = qlcnic_config_led(adapter, 0, 0xf); | 847 | ret = adapter->nic_ops->config_led(adapter, 0, 0xf); |
847 | if (ret) { | 848 | if (ret) { |
848 | dev_err(&adapter->pdev->dev, | 849 | dev_err(&adapter->pdev->dev, |
849 | "Failed to reset LED blink state.\n"); | 850 | "Failed to reset LED blink state.\n"); |
diff --git a/drivers/net/qlcnic/qlcnic_hdr.h b/drivers/net/qlcnic/qlcnic_hdr.h index ad9d167723c4..7b81cab27002 100644 --- a/drivers/net/qlcnic/qlcnic_hdr.h +++ b/drivers/net/qlcnic/qlcnic_hdr.h | |||
@@ -208,6 +208,39 @@ enum { | |||
208 | QLCNIC_HW_PX_MAP_CRB_PGR0 | 208 | QLCNIC_HW_PX_MAP_CRB_PGR0 |
209 | }; | 209 | }; |
210 | 210 | ||
211 | #define BIT_0 0x1 | ||
212 | #define BIT_1 0x2 | ||
213 | #define BIT_2 0x4 | ||
214 | #define BIT_3 0x8 | ||
215 | #define BIT_4 0x10 | ||
216 | #define BIT_5 0x20 | ||
217 | #define BIT_6 0x40 | ||
218 | #define BIT_7 0x80 | ||
219 | #define BIT_8 0x100 | ||
220 | #define BIT_9 0x200 | ||
221 | #define BIT_10 0x400 | ||
222 | #define BIT_11 0x800 | ||
223 | #define BIT_12 0x1000 | ||
224 | #define BIT_13 0x2000 | ||
225 | #define BIT_14 0x4000 | ||
226 | #define BIT_15 0x8000 | ||
227 | #define BIT_16 0x10000 | ||
228 | #define BIT_17 0x20000 | ||
229 | #define BIT_18 0x40000 | ||
230 | #define BIT_19 0x80000 | ||
231 | #define BIT_20 0x100000 | ||
232 | #define BIT_21 0x200000 | ||
233 | #define BIT_22 0x400000 | ||
234 | #define BIT_23 0x800000 | ||
235 | #define BIT_24 0x1000000 | ||
236 | #define BIT_25 0x2000000 | ||
237 | #define BIT_26 0x4000000 | ||
238 | #define BIT_27 0x8000000 | ||
239 | #define BIT_28 0x10000000 | ||
240 | #define BIT_29 0x20000000 | ||
241 | #define BIT_30 0x40000000 | ||
242 | #define BIT_31 0x80000000 | ||
243 | |||
211 | /* This field defines CRB adr [31:20] of the agents */ | 244 | /* This field defines CRB adr [31:20] of the agents */ |
212 | 245 | ||
213 | #define QLCNIC_HW_CRB_HUB_AGT_ADR_MN \ | 246 | #define QLCNIC_HW_CRB_HUB_AGT_ADR_MN \ |
@@ -668,10 +701,11 @@ enum { | |||
668 | #define QLCNIC_CRB_DEV_REF_COUNT (QLCNIC_CAM_RAM(0x138)) | 701 | #define QLCNIC_CRB_DEV_REF_COUNT (QLCNIC_CAM_RAM(0x138)) |
669 | #define QLCNIC_CRB_DEV_STATE (QLCNIC_CAM_RAM(0x140)) | 702 | #define QLCNIC_CRB_DEV_STATE (QLCNIC_CAM_RAM(0x140)) |
670 | 703 | ||
671 | #define QLCNIC_CRB_DRV_STATE (QLCNIC_CAM_RAM(0x144)) | 704 | #define QLCNIC_CRB_DRV_STATE (QLCNIC_CAM_RAM(0x144)) |
672 | #define QLCNIC_CRB_DRV_SCRATCH (QLCNIC_CAM_RAM(0x148)) | 705 | #define QLCNIC_CRB_DRV_SCRATCH (QLCNIC_CAM_RAM(0x148)) |
673 | #define QLCNIC_CRB_DEV_PARTITION_INFO (QLCNIC_CAM_RAM(0x14c)) | 706 | #define QLCNIC_CRB_DEV_PARTITION_INFO (QLCNIC_CAM_RAM(0x14c)) |
674 | #define QLCNIC_CRB_DRV_IDC_VER (QLCNIC_CAM_RAM(0x174)) | 707 | #define QLCNIC_CRB_DRV_IDC_VER (QLCNIC_CAM_RAM(0x174)) |
708 | #define QLCNIC_CRB_DEV_NPAR_STATE (QLCNIC_CAM_RAM(0x19c)) | ||
675 | #define QLCNIC_ROM_DEV_INIT_TIMEOUT (0x3e885c) | 709 | #define QLCNIC_ROM_DEV_INIT_TIMEOUT (0x3e885c) |
676 | #define QLCNIC_ROM_DRV_RESET_TIMEOUT (0x3e8860) | 710 | #define QLCNIC_ROM_DRV_RESET_TIMEOUT (0x3e8860) |
677 | 711 | ||
@@ -684,15 +718,26 @@ enum { | |||
684 | #define QLCNIC_DEV_FAILED 0x6 | 718 | #define QLCNIC_DEV_FAILED 0x6 |
685 | #define QLCNIC_DEV_QUISCENT 0x7 | 719 | #define QLCNIC_DEV_QUISCENT 0x7 |
686 | 720 | ||
721 | #define QLCNIC_DEV_NPAR_NOT_RDY 0 | ||
722 | #define QLCNIC_DEV_NPAR_RDY 1 | ||
723 | |||
724 | #define QLC_DEV_CHECK_ACTIVE(VAL, FN) ((VAL) &= (1 << (FN * 4))) | ||
687 | #define QLC_DEV_SET_REF_CNT(VAL, FN) ((VAL) |= (1 << (FN * 4))) | 725 | #define QLC_DEV_SET_REF_CNT(VAL, FN) ((VAL) |= (1 << (FN * 4))) |
688 | #define QLC_DEV_CLR_REF_CNT(VAL, FN) ((VAL) &= ~(1 << (FN * 4))) | 726 | #define QLC_DEV_CLR_REF_CNT(VAL, FN) ((VAL) &= ~(1 << (FN * 4))) |
689 | #define QLC_DEV_SET_RST_RDY(VAL, FN) ((VAL) |= (1 << (FN * 4))) | 727 | #define QLC_DEV_SET_RST_RDY(VAL, FN) ((VAL) |= (1 << (FN * 4))) |
690 | #define QLC_DEV_SET_QSCNT_RDY(VAL, FN) ((VAL) |= (2 << (FN * 4))) | 728 | #define QLC_DEV_SET_QSCNT_RDY(VAL, FN) ((VAL) |= (2 << (FN * 4))) |
691 | #define QLC_DEV_CLR_RST_QSCNT(VAL, FN) ((VAL) &= ~(3 << (FN * 4))) | 729 | #define QLC_DEV_CLR_RST_QSCNT(VAL, FN) ((VAL) &= ~(3 << (FN * 4))) |
692 | 730 | ||
731 | #define QLC_DEV_GET_DRV(VAL, FN) (0xf & ((VAL) >> (FN * 4))) | ||
732 | #define QLC_DEV_SET_DRV(VAL, FN) ((VAL) << (FN * 4)) | ||
733 | |||
734 | #define QLCNIC_TYPE_NIC 1 | ||
735 | #define QLCNIC_TYPE_FCOE 2 | ||
736 | #define QLCNIC_TYPE_ISCSI 3 | ||
737 | |||
693 | #define QLCNIC_RCODE_DRIVER_INFO 0x20000000 | 738 | #define QLCNIC_RCODE_DRIVER_INFO 0x20000000 |
694 | #define QLCNIC_RCODE_DRIVER_CAN_RELOAD 0x40000000 | 739 | #define QLCNIC_RCODE_DRIVER_CAN_RELOAD BIT_30 |
695 | #define QLCNIC_RCODE_FATAL_ERROR 0x80000000 | 740 | #define QLCNIC_RCODE_FATAL_ERROR BIT_31 |
696 | #define QLCNIC_FWERROR_PEGNUM(code) ((code) & 0xff) | 741 | #define QLCNIC_FWERROR_PEGNUM(code) ((code) & 0xff) |
697 | #define QLCNIC_FWERROR_CODE(code) ((code >> 8) & 0xfffff) | 742 | #define QLCNIC_FWERROR_CODE(code) ((code >> 8) & 0xfffff) |
698 | 743 | ||
@@ -721,6 +766,35 @@ struct qlcnic_legacy_intr_set { | |||
721 | u32 pci_int_reg; | 766 | u32 pci_int_reg; |
722 | }; | 767 | }; |
723 | 768 | ||
769 | #define QLCNIC_FW_API 0x1b216c | ||
770 | #define QLCNIC_DRV_OP_MODE 0x1b2170 | ||
771 | #define QLCNIC_MSIX_BASE 0x132110 | ||
772 | #define QLCNIC_MAX_PCI_FUNC 8 | ||
773 | |||
774 | /* PCI function operational mode */ | ||
775 | enum { | ||
776 | QLCNIC_MGMT_FUNC = 0, | ||
777 | QLCNIC_PRIV_FUNC = 1, | ||
778 | QLCNIC_NON_PRIV_FUNC = 2 | ||
779 | }; | ||
780 | |||
781 | /* FW HAL api version */ | ||
782 | enum { | ||
783 | QLCNIC_FW_BASE = 1, | ||
784 | QLCNIC_FW_NPAR = 2 | ||
785 | }; | ||
786 | |||
787 | #define QLC_DEV_DRV_DEFAULT 0x11111111 | ||
788 | |||
789 | #define LSB(x) ((uint8_t)(x)) | ||
790 | #define MSB(x) ((uint8_t)((uint16_t)(x) >> 8)) | ||
791 | |||
792 | #define LSW(x) ((uint16_t)((uint32_t)(x))) | ||
793 | #define MSW(x) ((uint16_t)((uint32_t)(x) >> 16)) | ||
794 | |||
795 | #define LSD(x) ((uint32_t)((uint64_t)(x))) | ||
796 | #define MSD(x) ((uint32_t)((((uint64_t)(x)) >> 16) >> 16)) | ||
797 | |||
724 | #define QLCNIC_LEGACY_INTR_CONFIG \ | 798 | #define QLCNIC_LEGACY_INTR_CONFIG \ |
725 | { \ | 799 | { \ |
726 | { \ | 800 | { \ |
diff --git a/drivers/net/qlcnic/qlcnic_hw.c b/drivers/net/qlcnic/qlcnic_hw.c index 0c2e1f08f459..f776956d2d6c 100644 --- a/drivers/net/qlcnic/qlcnic_hw.c +++ b/drivers/net/qlcnic/qlcnic_hw.c | |||
@@ -538,7 +538,7 @@ int qlcnic_config_hw_lro(struct qlcnic_adapter *adapter, int enable) | |||
538 | return rv; | 538 | return rv; |
539 | } | 539 | } |
540 | 540 | ||
541 | int qlcnic_config_bridged_mode(struct qlcnic_adapter *adapter, int enable) | 541 | int qlcnic_config_bridged_mode(struct qlcnic_adapter *adapter, u32 enable) |
542 | { | 542 | { |
543 | struct qlcnic_nic_req req; | 543 | struct qlcnic_nic_req req; |
544 | u64 word; | 544 | u64 word; |
@@ -704,21 +704,15 @@ int qlcnic_change_mtu(struct net_device *netdev, int mtu) | |||
704 | return rc; | 704 | return rc; |
705 | } | 705 | } |
706 | 706 | ||
707 | int qlcnic_get_mac_addr(struct qlcnic_adapter *adapter, u64 *mac) | 707 | int qlcnic_get_mac_addr(struct qlcnic_adapter *adapter, u8 *mac) |
708 | { | 708 | { |
709 | u32 crbaddr, mac_hi, mac_lo; | 709 | u32 crbaddr; |
710 | int pci_func = adapter->ahw.pci_func; | 710 | int pci_func = adapter->ahw.pci_func; |
711 | 711 | ||
712 | crbaddr = CRB_MAC_BLOCK_START + | 712 | crbaddr = CRB_MAC_BLOCK_START + |
713 | (4 * ((pci_func/2) * 3)) + (4 * (pci_func & 1)); | 713 | (4 * ((pci_func/2) * 3)) + (4 * (pci_func & 1)); |
714 | 714 | ||
715 | mac_lo = QLCRD32(adapter, crbaddr); | 715 | qlcnic_fetch_mac(adapter, crbaddr, crbaddr+4, pci_func & 1, mac); |
716 | mac_hi = QLCRD32(adapter, crbaddr+4); | ||
717 | |||
718 | if (pci_func & 1) | ||
719 | *mac = le64_to_cpu((mac_lo >> 16) | ((u64)mac_hi << 16)); | ||
720 | else | ||
721 | *mac = le64_to_cpu((u64)mac_lo | ((u64)mac_hi << 32)); | ||
722 | 716 | ||
723 | return 0; | 717 | return 0; |
724 | } | 718 | } |
diff --git a/drivers/net/qlcnic/qlcnic_init.c b/drivers/net/qlcnic/qlcnic_init.c index 71a4e664ad76..635c99022f06 100644 --- a/drivers/net/qlcnic/qlcnic_init.c +++ b/drivers/net/qlcnic/qlcnic_init.c | |||
@@ -520,17 +520,16 @@ qlcnic_setup_idc_param(struct qlcnic_adapter *adapter) { | |||
520 | int timeo; | 520 | int timeo; |
521 | u32 val; | 521 | u32 val; |
522 | 522 | ||
523 | val = QLCRD32(adapter, QLCNIC_CRB_DEV_PARTITION_INFO); | 523 | if (adapter->fw_hal_version == QLCNIC_FW_BASE) { |
524 | val = (val >> (adapter->portnum * 4)) & 0xf; | 524 | val = QLCRD32(adapter, QLCNIC_CRB_DEV_PARTITION_INFO); |
525 | 525 | val = QLC_DEV_GET_DRV(val, adapter->portnum); | |
526 | if ((val & 0x3) != 1) { | 526 | if ((val & 0x3) != QLCNIC_TYPE_NIC) { |
527 | dev_err(&adapter->pdev->dev, "Not an Ethernet NIC func=%u\n", | 527 | dev_err(&adapter->pdev->dev, |
528 | val); | 528 | "Not an Ethernet NIC func=%u\n", val); |
529 | return -EIO; | 529 | return -EIO; |
530 | } | ||
531 | adapter->physical_port = (val >> 2); | ||
530 | } | 532 | } |
531 | |||
532 | adapter->physical_port = (val >> 2); | ||
533 | |||
534 | if (qlcnic_rom_fast_read(adapter, QLCNIC_ROM_DEV_INIT_TIMEOUT, &timeo)) | 533 | if (qlcnic_rom_fast_read(adapter, QLCNIC_ROM_DEV_INIT_TIMEOUT, &timeo)) |
535 | timeo = 30; | 534 | timeo = 30; |
536 | 535 | ||
@@ -1701,3 +1700,24 @@ qlcnic_process_rcv_ring_diag(struct qlcnic_host_sds_ring *sds_ring) | |||
1701 | sds_ring->consumer = consumer; | 1700 | sds_ring->consumer = consumer; |
1702 | writel(consumer, sds_ring->crb_sts_consumer); | 1701 | writel(consumer, sds_ring->crb_sts_consumer); |
1703 | } | 1702 | } |
1703 | |||
1704 | void | ||
1705 | qlcnic_fetch_mac(struct qlcnic_adapter *adapter, u32 off1, u32 off2, | ||
1706 | u8 alt_mac, u8 *mac) | ||
1707 | { | ||
1708 | u32 mac_low, mac_high; | ||
1709 | int i; | ||
1710 | |||
1711 | mac_low = QLCRD32(adapter, off1); | ||
1712 | mac_high = QLCRD32(adapter, off2); | ||
1713 | |||
1714 | if (alt_mac) { | ||
1715 | mac_low |= (mac_low >> 16) | (mac_high << 16); | ||
1716 | mac_high >>= 16; | ||
1717 | } | ||
1718 | |||
1719 | for (i = 0; i < 2; i++) | ||
1720 | mac[i] = (u8)(mac_high >> ((1 - i) * 8)); | ||
1721 | for (i = 2; i < 6; i++) | ||
1722 | mac[i] = (u8)(mac_low >> ((5 - i) * 8)); | ||
1723 | } | ||
diff --git a/drivers/net/qlcnic/qlcnic_main.c b/drivers/net/qlcnic/qlcnic_main.c index 23ea9caa5261..99371bcaa547 100644 --- a/drivers/net/qlcnic/qlcnic_main.c +++ b/drivers/net/qlcnic/qlcnic_main.c | |||
@@ -65,6 +65,10 @@ static int load_fw_file; | |||
65 | module_param(load_fw_file, int, 0644); | 65 | module_param(load_fw_file, int, 0644); |
66 | MODULE_PARM_DESC(load_fw_file, "Load firmware from (0=flash, 1=file"); | 66 | MODULE_PARM_DESC(load_fw_file, "Load firmware from (0=flash, 1=file"); |
67 | 67 | ||
68 | static int qlcnic_config_npars; | ||
69 | module_param(qlcnic_config_npars, int, 0644); | ||
70 | MODULE_PARM_DESC(qlcnic_config_npars, "Configure NPARs (0=disabled, 1=enabled"); | ||
71 | |||
68 | static int __devinit qlcnic_probe(struct pci_dev *pdev, | 72 | static int __devinit qlcnic_probe(struct pci_dev *pdev, |
69 | const struct pci_device_id *ent); | 73 | const struct pci_device_id *ent); |
70 | static void __devexit qlcnic_remove(struct pci_dev *pdev); | 74 | static void __devexit qlcnic_remove(struct pci_dev *pdev); |
@@ -99,7 +103,14 @@ static irqreturn_t qlcnic_msix_intr(int irq, void *data); | |||
99 | 103 | ||
100 | static struct net_device_stats *qlcnic_get_stats(struct net_device *netdev); | 104 | static struct net_device_stats *qlcnic_get_stats(struct net_device *netdev); |
101 | static void qlcnic_config_indev_addr(struct net_device *dev, unsigned long); | 105 | static void qlcnic_config_indev_addr(struct net_device *dev, unsigned long); |
102 | 106 | static int qlcnic_start_firmware(struct qlcnic_adapter *); | |
107 | |||
108 | static void qlcnic_dev_set_npar_ready(struct qlcnic_adapter *); | ||
109 | static void qlcnicvf_clear_ilb_mode(struct qlcnic_adapter *); | ||
110 | static int qlcnicvf_set_ilb_mode(struct qlcnic_adapter *); | ||
111 | static int qlcnicvf_config_led(struct qlcnic_adapter *, u32, u32); | ||
112 | static int qlcnicvf_config_bridged_mode(struct qlcnic_adapter *, u32); | ||
113 | static int qlcnicvf_start_firmware(struct qlcnic_adapter *); | ||
103 | /* PCI Device ID Table */ | 114 | /* PCI Device ID Table */ |
104 | #define ENTRY(device) \ | 115 | #define ENTRY(device) \ |
105 | {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, (device)), \ | 116 | {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, (device)), \ |
@@ -307,19 +318,14 @@ static void qlcnic_init_msix_entries(struct qlcnic_adapter *adapter, int count) | |||
307 | static int | 318 | static int |
308 | qlcnic_read_mac_addr(struct qlcnic_adapter *adapter) | 319 | qlcnic_read_mac_addr(struct qlcnic_adapter *adapter) |
309 | { | 320 | { |
310 | int i; | 321 | u8 mac_addr[ETH_ALEN]; |
311 | unsigned char *p; | ||
312 | u64 mac_addr; | ||
313 | struct net_device *netdev = adapter->netdev; | 322 | struct net_device *netdev = adapter->netdev; |
314 | struct pci_dev *pdev = adapter->pdev; | 323 | struct pci_dev *pdev = adapter->pdev; |
315 | 324 | ||
316 | if (qlcnic_get_mac_addr(adapter, &mac_addr) != 0) | 325 | if (adapter->nic_ops->get_mac_addr(adapter, mac_addr) != 0) |
317 | return -EIO; | 326 | return -EIO; |
318 | 327 | ||
319 | p = (unsigned char *)&mac_addr; | 328 | memcpy(netdev->dev_addr, mac_addr, ETH_ALEN); |
320 | for (i = 0; i < 6; i++) | ||
321 | netdev->dev_addr[i] = *(p + 5 - i); | ||
322 | |||
323 | memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len); | 329 | memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len); |
324 | memcpy(adapter->mac_addr, netdev->dev_addr, netdev->addr_len); | 330 | memcpy(adapter->mac_addr, netdev->dev_addr, netdev->addr_len); |
325 | 331 | ||
@@ -371,6 +377,33 @@ static const struct net_device_ops qlcnic_netdev_ops = { | |||
371 | #endif | 377 | #endif |
372 | }; | 378 | }; |
373 | 379 | ||
380 | static struct qlcnic_nic_template qlcnic_ops = { | ||
381 | .get_mac_addr = qlcnic_get_mac_addr, | ||
382 | .config_bridged_mode = qlcnic_config_bridged_mode, | ||
383 | .config_led = qlcnic_config_led, | ||
384 | .set_ilb_mode = qlcnic_set_ilb_mode, | ||
385 | .clear_ilb_mode = qlcnic_clear_ilb_mode, | ||
386 | .start_firmware = qlcnic_start_firmware | ||
387 | }; | ||
388 | |||
389 | static struct qlcnic_nic_template qlcnic_pf_ops = { | ||
390 | .get_mac_addr = qlcnic_get_mac_address, | ||
391 | .config_bridged_mode = qlcnic_config_bridged_mode, | ||
392 | .config_led = qlcnic_config_led, | ||
393 | .set_ilb_mode = qlcnic_set_ilb_mode, | ||
394 | .clear_ilb_mode = qlcnic_clear_ilb_mode, | ||
395 | .start_firmware = qlcnic_start_firmware | ||
396 | }; | ||
397 | |||
398 | static struct qlcnic_nic_template qlcnic_vf_ops = { | ||
399 | .get_mac_addr = qlcnic_get_mac_address, | ||
400 | .config_bridged_mode = qlcnicvf_config_bridged_mode, | ||
401 | .config_led = qlcnicvf_config_led, | ||
402 | .set_ilb_mode = qlcnicvf_set_ilb_mode, | ||
403 | .clear_ilb_mode = qlcnicvf_clear_ilb_mode, | ||
404 | .start_firmware = qlcnicvf_start_firmware | ||
405 | }; | ||
406 | |||
374 | static void | 407 | static void |
375 | qlcnic_setup_intr(struct qlcnic_adapter *adapter) | 408 | qlcnic_setup_intr(struct qlcnic_adapter *adapter) |
376 | { | 409 | { |
@@ -453,6 +486,132 @@ qlcnic_cleanup_pci_map(struct qlcnic_adapter *adapter) | |||
453 | } | 486 | } |
454 | 487 | ||
455 | static int | 488 | static int |
489 | qlcnic_set_function_modes(struct qlcnic_adapter *adapter) | ||
490 | { | ||
491 | u8 id; | ||
492 | u32 ref_count; | ||
493 | int i, ret = 1; | ||
494 | u32 data = QLCNIC_MGMT_FUNC; | ||
495 | void __iomem *priv_op = adapter->ahw.pci_base0 + QLCNIC_DRV_OP_MODE; | ||
496 | |||
497 | /* If other drivers are not in use set their privilege level */ | ||
498 | ref_count = QLCRD32(adapter, QLCNIC_CRB_DEV_REF_COUNT); | ||
499 | ret = qlcnic_api_lock(adapter); | ||
500 | if (ret) | ||
501 | goto err_lock; | ||
502 | if (QLC_DEV_CLR_REF_CNT(ref_count, adapter->ahw.pci_func)) | ||
503 | goto err_npar; | ||
504 | |||
505 | for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) { | ||
506 | id = adapter->npars[i].id; | ||
507 | if (adapter->npars[i].type != QLCNIC_TYPE_NIC || | ||
508 | id == adapter->ahw.pci_func) | ||
509 | continue; | ||
510 | data |= (qlcnic_config_npars & QLC_DEV_SET_DRV(0xf, id)); | ||
511 | } | ||
512 | writel(data, priv_op); | ||
513 | |||
514 | err_npar: | ||
515 | qlcnic_api_unlock(adapter); | ||
516 | err_lock: | ||
517 | return ret; | ||
518 | } | ||
519 | |||
520 | static u8 | ||
521 | qlcnic_set_mgmt_driver(struct qlcnic_adapter *adapter) | ||
522 | { | ||
523 | u8 i, ret = 0; | ||
524 | |||
525 | if (qlcnic_get_pci_info(adapter)) | ||
526 | return ret; | ||
527 | /* Set the eswitch */ | ||
528 | for (i = 0; i < QLCNIC_NIU_MAX_XG_PORTS; i++) { | ||
529 | if (!qlcnic_get_eswitch_capabilities(adapter, i, | ||
530 | &adapter->eswitch[i])) { | ||
531 | ret++; | ||
532 | qlcnic_toggle_eswitch(adapter, i, ret); | ||
533 | } | ||
534 | } | ||
535 | return ret; | ||
536 | } | ||
537 | |||
538 | static u32 | ||
539 | qlcnic_get_driver_mode(struct qlcnic_adapter *adapter) | ||
540 | { | ||
541 | void __iomem *msix_base_addr; | ||
542 | void __iomem *priv_op; | ||
543 | u32 func; | ||
544 | u32 msix_base; | ||
545 | u32 op_mode, priv_level; | ||
546 | |||
547 | /* Determine FW API version */ | ||
548 | adapter->fw_hal_version = readl(adapter->ahw.pci_base0 + QLCNIC_FW_API); | ||
549 | if (adapter->fw_hal_version == ~0) { | ||
550 | adapter->nic_ops = &qlcnic_ops; | ||
551 | adapter->fw_hal_version = QLCNIC_FW_BASE; | ||
552 | adapter->ahw.pci_func = PCI_FUNC(adapter->pdev->devfn); | ||
553 | dev_info(&adapter->pdev->dev, | ||
554 | "FW does not support nic partion\n"); | ||
555 | return adapter->fw_hal_version; | ||
556 | } | ||
557 | |||
558 | /* Find PCI function number */ | ||
559 | pci_read_config_dword(adapter->pdev, QLCNIC_MSIX_TABLE_OFFSET, &func); | ||
560 | msix_base_addr = adapter->ahw.pci_base0 + QLCNIC_MSIX_BASE; | ||
561 | msix_base = readl(msix_base_addr); | ||
562 | func = (func - msix_base)/QLCNIC_MSIX_TBL_PGSIZE; | ||
563 | adapter->ahw.pci_func = func; | ||
564 | |||
565 | /* Determine function privilege level */ | ||
566 | priv_op = adapter->ahw.pci_base0 + QLCNIC_DRV_OP_MODE; | ||
567 | op_mode = readl(priv_op); | ||
568 | if (op_mode == QLC_DEV_DRV_DEFAULT) { | ||
569 | priv_level = QLCNIC_MGMT_FUNC; | ||
570 | if (qlcnic_api_lock(adapter)) | ||
571 | return 0; | ||
572 | op_mode = (op_mode & ~QLC_DEV_SET_DRV(0xf, func)) | | ||
573 | (QLC_DEV_SET_DRV(QLCNIC_MGMT_FUNC, func)); | ||
574 | writel(op_mode, priv_op); | ||
575 | qlcnic_api_unlock(adapter); | ||
576 | |||
577 | } else | ||
578 | priv_level = QLC_DEV_GET_DRV(op_mode, adapter->ahw.pci_func); | ||
579 | |||
580 | switch (priv_level) { | ||
581 | case QLCNIC_MGMT_FUNC: | ||
582 | adapter->op_mode = QLCNIC_MGMT_FUNC; | ||
583 | adapter->nic_ops = &qlcnic_pf_ops; | ||
584 | /* Set privilege level for other functions */ | ||
585 | if (qlcnic_config_npars) | ||
586 | qlcnic_set_function_modes(adapter); | ||
587 | qlcnic_dev_set_npar_ready(adapter); | ||
588 | dev_info(&adapter->pdev->dev, | ||
589 | "HAL Version: %d, Management function\n", | ||
590 | adapter->fw_hal_version); | ||
591 | break; | ||
592 | case QLCNIC_PRIV_FUNC: | ||
593 | adapter->op_mode = QLCNIC_PRIV_FUNC; | ||
594 | dev_info(&adapter->pdev->dev, | ||
595 | "HAL Version: %d, Privileged function\n", | ||
596 | adapter->fw_hal_version); | ||
597 | adapter->nic_ops = &qlcnic_pf_ops; | ||
598 | break; | ||
599 | case QLCNIC_NON_PRIV_FUNC: | ||
600 | adapter->op_mode = QLCNIC_NON_PRIV_FUNC; | ||
601 | dev_info(&adapter->pdev->dev, | ||
602 | "HAL Version: %d Non Privileged function\n", | ||
603 | adapter->fw_hal_version); | ||
604 | adapter->nic_ops = &qlcnic_vf_ops; | ||
605 | break; | ||
606 | default: | ||
607 | dev_info(&adapter->pdev->dev, "Unknown function mode: %d\n", | ||
608 | priv_level); | ||
609 | return 0; | ||
610 | } | ||
611 | return adapter->fw_hal_version; | ||
612 | } | ||
613 | |||
614 | static int | ||
456 | qlcnic_setup_pci_map(struct qlcnic_adapter *adapter) | 615 | qlcnic_setup_pci_map(struct qlcnic_adapter *adapter) |
457 | { | 616 | { |
458 | void __iomem *mem_ptr0 = NULL; | 617 | void __iomem *mem_ptr0 = NULL; |
@@ -460,7 +619,6 @@ qlcnic_setup_pci_map(struct qlcnic_adapter *adapter) | |||
460 | unsigned long mem_len, pci_len0 = 0; | 619 | unsigned long mem_len, pci_len0 = 0; |
461 | 620 | ||
462 | struct pci_dev *pdev = adapter->pdev; | 621 | struct pci_dev *pdev = adapter->pdev; |
463 | int pci_func = adapter->ahw.pci_func; | ||
464 | 622 | ||
465 | /* remap phys address */ | 623 | /* remap phys address */ |
466 | mem_base = pci_resource_start(pdev, 0); /* 0 is for BAR 0 */ | 624 | mem_base = pci_resource_start(pdev, 0); /* 0 is for BAR 0 */ |
@@ -483,8 +641,13 @@ qlcnic_setup_pci_map(struct qlcnic_adapter *adapter) | |||
483 | adapter->ahw.pci_base0 = mem_ptr0; | 641 | adapter->ahw.pci_base0 = mem_ptr0; |
484 | adapter->ahw.pci_len0 = pci_len0; | 642 | adapter->ahw.pci_len0 = pci_len0; |
485 | 643 | ||
644 | if (!qlcnic_get_driver_mode(adapter)) { | ||
645 | iounmap(adapter->ahw.pci_base0); | ||
646 | return -EIO; | ||
647 | } | ||
648 | |||
486 | adapter->ahw.ocm_win_crb = qlcnic_get_ioaddr(adapter, | 649 | adapter->ahw.ocm_win_crb = qlcnic_get_ioaddr(adapter, |
487 | QLCNIC_PCIX_PS_REG(PCIX_OCM_WINDOW_REG(pci_func))); | 650 | QLCNIC_PCIX_PS_REG(PCIX_OCM_WINDOW_REG(adapter->ahw.pci_func))); |
488 | 651 | ||
489 | return 0; | 652 | return 0; |
490 | } | 653 | } |
@@ -553,7 +716,10 @@ qlcnic_check_options(struct qlcnic_adapter *adapter) | |||
553 | dev_info(&pdev->dev, "firmware v%d.%d.%d\n", | 716 | dev_info(&pdev->dev, "firmware v%d.%d.%d\n", |
554 | fw_major, fw_minor, fw_build); | 717 | fw_major, fw_minor, fw_build); |
555 | 718 | ||
556 | adapter->capabilities = QLCRD32(adapter, CRB_FW_CAPABILITIES_1); | 719 | if (adapter->fw_hal_version == QLCNIC_FW_NPAR) |
720 | qlcnic_get_nic_info(adapter, adapter->ahw.pci_func); | ||
721 | else | ||
722 | adapter->capabilities = QLCRD32(adapter, CRB_FW_CAPABILITIES_1); | ||
557 | 723 | ||
558 | adapter->flags &= ~QLCNIC_LRO_ENABLED; | 724 | adapter->flags &= ~QLCNIC_LRO_ENABLED; |
559 | 725 | ||
@@ -631,8 +797,14 @@ wait_init: | |||
631 | QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_READY); | 797 | QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_READY); |
632 | qlcnic_idc_debug_info(adapter, 1); | 798 | qlcnic_idc_debug_info(adapter, 1); |
633 | 799 | ||
800 | qlcnic_dev_set_npar_ready(adapter); | ||
801 | |||
634 | qlcnic_check_options(adapter); | 802 | qlcnic_check_options(adapter); |
635 | 803 | ||
804 | if (adapter->fw_hal_version != QLCNIC_FW_BASE && | ||
805 | adapter->op_mode == QLCNIC_MGMT_FUNC) | ||
806 | qlcnic_set_mgmt_driver(adapter); | ||
807 | |||
636 | adapter->need_fw_reset = 0; | 808 | adapter->need_fw_reset = 0; |
637 | 809 | ||
638 | qlcnic_release_firmware(adapter); | 810 | qlcnic_release_firmware(adapter); |
@@ -977,12 +1149,11 @@ qlcnic_setup_netdev(struct qlcnic_adapter *adapter, | |||
977 | 1149 | ||
978 | SET_ETHTOOL_OPS(netdev, &qlcnic_ethtool_ops); | 1150 | SET_ETHTOOL_OPS(netdev, &qlcnic_ethtool_ops); |
979 | 1151 | ||
980 | netdev->features |= (NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO); | 1152 | netdev->features |= (NETIF_F_SG | NETIF_F_IP_CSUM | |
981 | netdev->features |= (NETIF_F_GRO); | 1153 | NETIF_F_IPV6_CSUM | NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6); |
982 | netdev->vlan_features |= (NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO); | ||
983 | 1154 | ||
984 | netdev->features |= (NETIF_F_IPV6_CSUM | NETIF_F_TSO6); | 1155 | netdev->vlan_features |= (NETIF_F_SG | NETIF_F_IP_CSUM | |
985 | netdev->vlan_features |= (NETIF_F_IPV6_CSUM | NETIF_F_TSO6); | 1156 | NETIF_F_IPV6_CSUM | NETIF_F_TSO | NETIF_F_TSO6); |
986 | 1157 | ||
987 | if (pci_using_dac) { | 1158 | if (pci_using_dac) { |
988 | netdev->features |= NETIF_F_HIGHDMA; | 1159 | netdev->features |= NETIF_F_HIGHDMA; |
@@ -1036,7 +1207,6 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
1036 | struct net_device *netdev = NULL; | 1207 | struct net_device *netdev = NULL; |
1037 | struct qlcnic_adapter *adapter = NULL; | 1208 | struct qlcnic_adapter *adapter = NULL; |
1038 | int err; | 1209 | int err; |
1039 | int pci_func_id = PCI_FUNC(pdev->devfn); | ||
1040 | uint8_t revision_id; | 1210 | uint8_t revision_id; |
1041 | uint8_t pci_using_dac; | 1211 | uint8_t pci_using_dac; |
1042 | 1212 | ||
@@ -1072,7 +1242,6 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
1072 | adapter->netdev = netdev; | 1242 | adapter->netdev = netdev; |
1073 | adapter->pdev = pdev; | 1243 | adapter->pdev = pdev; |
1074 | adapter->dev_rst_time = jiffies; | 1244 | adapter->dev_rst_time = jiffies; |
1075 | adapter->ahw.pci_func = pci_func_id; | ||
1076 | 1245 | ||
1077 | revision_id = pdev->revision; | 1246 | revision_id = pdev->revision; |
1078 | adapter->ahw.revision_id = revision_id; | 1247 | adapter->ahw.revision_id = revision_id; |
@@ -1088,7 +1257,7 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
1088 | goto err_out_free_netdev; | 1257 | goto err_out_free_netdev; |
1089 | 1258 | ||
1090 | /* This will be reset for mezz cards */ | 1259 | /* This will be reset for mezz cards */ |
1091 | adapter->portnum = pci_func_id; | 1260 | adapter->portnum = adapter->ahw.pci_func; |
1092 | 1261 | ||
1093 | err = qlcnic_get_board_info(adapter); | 1262 | err = qlcnic_get_board_info(adapter); |
1094 | if (err) { | 1263 | if (err) { |
@@ -1102,7 +1271,7 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
1102 | if (qlcnic_setup_idc_param(adapter)) | 1271 | if (qlcnic_setup_idc_param(adapter)) |
1103 | goto err_out_iounmap; | 1272 | goto err_out_iounmap; |
1104 | 1273 | ||
1105 | err = qlcnic_start_firmware(adapter); | 1274 | err = adapter->nic_ops->start_firmware(adapter); |
1106 | if (err) { | 1275 | if (err) { |
1107 | dev_err(&pdev->dev, "Loading fw failed.Please Reboot\n"); | 1276 | dev_err(&pdev->dev, "Loading fw failed.Please Reboot\n"); |
1108 | goto err_out_decr_ref; | 1277 | goto err_out_decr_ref; |
@@ -1175,6 +1344,11 @@ static void __devexit qlcnic_remove(struct pci_dev *pdev) | |||
1175 | 1344 | ||
1176 | qlcnic_detach(adapter); | 1345 | qlcnic_detach(adapter); |
1177 | 1346 | ||
1347 | if (adapter->npars != NULL) | ||
1348 | kfree(adapter->npars); | ||
1349 | if (adapter->eswitch != NULL) | ||
1350 | kfree(adapter->eswitch); | ||
1351 | |||
1178 | qlcnic_clr_all_drv_state(adapter); | 1352 | qlcnic_clr_all_drv_state(adapter); |
1179 | 1353 | ||
1180 | clear_bit(__QLCNIC_RESETTING, &adapter->state); | 1354 | clear_bit(__QLCNIC_RESETTING, &adapter->state); |
@@ -1263,7 +1437,7 @@ qlcnic_resume(struct pci_dev *pdev) | |||
1263 | pci_set_master(pdev); | 1437 | pci_set_master(pdev); |
1264 | pci_restore_state(pdev); | 1438 | pci_restore_state(pdev); |
1265 | 1439 | ||
1266 | err = qlcnic_start_firmware(adapter); | 1440 | err = adapter->nic_ops->start_firmware(adapter); |
1267 | if (err) { | 1441 | if (err) { |
1268 | dev_err(&pdev->dev, "failed to start firmware\n"); | 1442 | dev_err(&pdev->dev, "failed to start firmware\n"); |
1269 | return err; | 1443 | return err; |
@@ -1340,11 +1514,11 @@ qlcnic_tso_check(struct net_device *netdev, | |||
1340 | u8 opcode = TX_ETHER_PKT; | 1514 | u8 opcode = TX_ETHER_PKT; |
1341 | __be16 protocol = skb->protocol; | 1515 | __be16 protocol = skb->protocol; |
1342 | u16 flags = 0, vid = 0; | 1516 | u16 flags = 0, vid = 0; |
1343 | u32 producer; | ||
1344 | int copied, offset, copy_len, hdr_len = 0, tso = 0, vlan_oob = 0; | 1517 | int copied, offset, copy_len, hdr_len = 0, tso = 0, vlan_oob = 0; |
1345 | struct cmd_desc_type0 *hwdesc; | 1518 | struct cmd_desc_type0 *hwdesc; |
1346 | struct vlan_ethhdr *vh; | 1519 | struct vlan_ethhdr *vh; |
1347 | struct qlcnic_adapter *adapter = netdev_priv(netdev); | 1520 | struct qlcnic_adapter *adapter = netdev_priv(netdev); |
1521 | u32 producer = tx_ring->producer; | ||
1348 | 1522 | ||
1349 | if (protocol == cpu_to_be16(ETH_P_8021Q)) { | 1523 | if (protocol == cpu_to_be16(ETH_P_8021Q)) { |
1350 | 1524 | ||
@@ -1360,6 +1534,11 @@ qlcnic_tso_check(struct net_device *netdev, | |||
1360 | vlan_oob = 1; | 1534 | vlan_oob = 1; |
1361 | } | 1535 | } |
1362 | 1536 | ||
1537 | if (*(skb->data) & BIT_0) { | ||
1538 | flags |= BIT_0; | ||
1539 | memcpy(&first_desc->eth_addr, skb->data, ETH_ALEN); | ||
1540 | } | ||
1541 | |||
1363 | if ((netdev->features & (NETIF_F_TSO | NETIF_F_TSO6)) && | 1542 | if ((netdev->features & (NETIF_F_TSO | NETIF_F_TSO6)) && |
1364 | skb_shinfo(skb)->gso_size > 0) { | 1543 | skb_shinfo(skb)->gso_size > 0) { |
1365 | 1544 | ||
@@ -1409,7 +1588,6 @@ qlcnic_tso_check(struct net_device *netdev, | |||
1409 | /* For LSO, we need to copy the MAC/IP/TCP headers into | 1588 | /* For LSO, we need to copy the MAC/IP/TCP headers into |
1410 | * the descriptor ring | 1589 | * the descriptor ring |
1411 | */ | 1590 | */ |
1412 | producer = tx_ring->producer; | ||
1413 | copied = 0; | 1591 | copied = 0; |
1414 | offset = 2; | 1592 | offset = 2; |
1415 | 1593 | ||
@@ -2109,7 +2287,7 @@ qlcnic_fwinit_work(struct work_struct *work) | |||
2109 | { | 2287 | { |
2110 | struct qlcnic_adapter *adapter = container_of(work, | 2288 | struct qlcnic_adapter *adapter = container_of(work, |
2111 | struct qlcnic_adapter, fw_work.work); | 2289 | struct qlcnic_adapter, fw_work.work); |
2112 | u32 dev_state = 0xf; | 2290 | u32 dev_state = 0xf, npar_state; |
2113 | 2291 | ||
2114 | if (qlcnic_api_lock(adapter)) | 2292 | if (qlcnic_api_lock(adapter)) |
2115 | goto err_ret; | 2293 | goto err_ret; |
@@ -2122,6 +2300,19 @@ qlcnic_fwinit_work(struct work_struct *work) | |||
2122 | return; | 2300 | return; |
2123 | } | 2301 | } |
2124 | 2302 | ||
2303 | if (adapter->op_mode == QLCNIC_NON_PRIV_FUNC) { | ||
2304 | npar_state = QLCRD32(adapter, QLCNIC_CRB_DEV_NPAR_STATE); | ||
2305 | if (npar_state == QLCNIC_DEV_NPAR_RDY) { | ||
2306 | qlcnic_api_unlock(adapter); | ||
2307 | goto wait_npar; | ||
2308 | } else { | ||
2309 | qlcnic_schedule_work(adapter, qlcnic_fwinit_work, | ||
2310 | FW_POLL_DELAY); | ||
2311 | qlcnic_api_unlock(adapter); | ||
2312 | return; | ||
2313 | } | ||
2314 | } | ||
2315 | |||
2125 | if (adapter->fw_wait_cnt++ > adapter->reset_ack_timeo) { | 2316 | if (adapter->fw_wait_cnt++ > adapter->reset_ack_timeo) { |
2126 | dev_err(&adapter->pdev->dev, "Reset:Failed to get ack %d sec\n", | 2317 | dev_err(&adapter->pdev->dev, "Reset:Failed to get ack %d sec\n", |
2127 | adapter->reset_ack_timeo); | 2318 | adapter->reset_ack_timeo); |
@@ -2154,7 +2345,7 @@ skip_ack_check: | |||
2154 | 2345 | ||
2155 | qlcnic_api_unlock(adapter); | 2346 | qlcnic_api_unlock(adapter); |
2156 | 2347 | ||
2157 | if (!qlcnic_start_firmware(adapter)) { | 2348 | if (!adapter->nic_ops->start_firmware(adapter)) { |
2158 | qlcnic_schedule_work(adapter, qlcnic_attach_work, 0); | 2349 | qlcnic_schedule_work(adapter, qlcnic_attach_work, 0); |
2159 | return; | 2350 | return; |
2160 | } | 2351 | } |
@@ -2163,6 +2354,7 @@ skip_ack_check: | |||
2163 | 2354 | ||
2164 | qlcnic_api_unlock(adapter); | 2355 | qlcnic_api_unlock(adapter); |
2165 | 2356 | ||
2357 | wait_npar: | ||
2166 | dev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE); | 2358 | dev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE); |
2167 | QLCDB(adapter, HW, "Func waiting: Device state=%u\n", dev_state); | 2359 | QLCDB(adapter, HW, "Func waiting: Device state=%u\n", dev_state); |
2168 | 2360 | ||
@@ -2177,7 +2369,7 @@ skip_ack_check: | |||
2177 | break; | 2369 | break; |
2178 | 2370 | ||
2179 | default: | 2371 | default: |
2180 | if (!qlcnic_start_firmware(adapter)) { | 2372 | if (!adapter->nic_ops->start_firmware(adapter)) { |
2181 | qlcnic_schedule_work(adapter, qlcnic_attach_work, 0); | 2373 | qlcnic_schedule_work(adapter, qlcnic_attach_work, 0); |
2182 | return; | 2374 | return; |
2183 | } | 2375 | } |
@@ -2251,6 +2443,30 @@ qlcnic_dev_request_reset(struct qlcnic_adapter *adapter) | |||
2251 | qlcnic_api_unlock(adapter); | 2443 | qlcnic_api_unlock(adapter); |
2252 | } | 2444 | } |
2253 | 2445 | ||
2446 | /* Transit to NPAR READY state from NPAR NOT READY state */ | ||
2447 | static void | ||
2448 | qlcnic_dev_set_npar_ready(struct qlcnic_adapter *adapter) | ||
2449 | { | ||
2450 | u32 state; | ||
2451 | |||
2452 | if (adapter->op_mode == QLCNIC_NON_PRIV_FUNC || | ||
2453 | adapter->fw_hal_version == QLCNIC_FW_BASE) | ||
2454 | return; | ||
2455 | |||
2456 | if (qlcnic_api_lock(adapter)) | ||
2457 | return; | ||
2458 | |||
2459 | state = QLCRD32(adapter, QLCNIC_CRB_DEV_NPAR_STATE); | ||
2460 | |||
2461 | if (state != QLCNIC_DEV_NPAR_RDY) { | ||
2462 | QLCWR32(adapter, QLCNIC_CRB_DEV_NPAR_STATE, | ||
2463 | QLCNIC_DEV_NPAR_RDY); | ||
2464 | QLCDB(adapter, DRV, "NPAR READY state set\n"); | ||
2465 | } | ||
2466 | |||
2467 | qlcnic_api_unlock(adapter); | ||
2468 | } | ||
2469 | |||
2254 | static void | 2470 | static void |
2255 | qlcnic_schedule_work(struct qlcnic_adapter *adapter, | 2471 | qlcnic_schedule_work(struct qlcnic_adapter *adapter, |
2256 | work_func_t func, int delay) | 2472 | work_func_t func, int delay) |
@@ -2365,6 +2581,46 @@ reschedule: | |||
2365 | qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, FW_POLL_DELAY); | 2581 | qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, FW_POLL_DELAY); |
2366 | } | 2582 | } |
2367 | 2583 | ||
2584 | static int | ||
2585 | qlcnicvf_start_firmware(struct qlcnic_adapter *adapter) | ||
2586 | { | ||
2587 | int err; | ||
2588 | |||
2589 | err = qlcnic_can_start_firmware(adapter); | ||
2590 | if (err) | ||
2591 | return err; | ||
2592 | |||
2593 | qlcnic_check_options(adapter); | ||
2594 | |||
2595 | adapter->need_fw_reset = 0; | ||
2596 | |||
2597 | return err; | ||
2598 | } | ||
2599 | |||
2600 | static int | ||
2601 | qlcnicvf_config_bridged_mode(struct qlcnic_adapter *adapter, u32 enable) | ||
2602 | { | ||
2603 | return -EOPNOTSUPP; | ||
2604 | } | ||
2605 | |||
2606 | static int | ||
2607 | qlcnicvf_config_led(struct qlcnic_adapter *adapter, u32 state, u32 rate) | ||
2608 | { | ||
2609 | return -EOPNOTSUPP; | ||
2610 | } | ||
2611 | |||
2612 | static int | ||
2613 | qlcnicvf_set_ilb_mode(struct qlcnic_adapter *adapter) | ||
2614 | { | ||
2615 | return -EOPNOTSUPP; | ||
2616 | } | ||
2617 | |||
2618 | static void | ||
2619 | qlcnicvf_clear_ilb_mode(struct qlcnic_adapter *adapter) | ||
2620 | { | ||
2621 | return; | ||
2622 | } | ||
2623 | |||
2368 | static ssize_t | 2624 | static ssize_t |
2369 | qlcnic_store_bridged_mode(struct device *dev, | 2625 | qlcnic_store_bridged_mode(struct device *dev, |
2370 | struct device_attribute *attr, const char *buf, size_t len) | 2626 | struct device_attribute *attr, const char *buf, size_t len) |
@@ -2382,7 +2638,7 @@ qlcnic_store_bridged_mode(struct device *dev, | |||
2382 | if (strict_strtoul(buf, 2, &new)) | 2638 | if (strict_strtoul(buf, 2, &new)) |
2383 | goto err_out; | 2639 | goto err_out; |
2384 | 2640 | ||
2385 | if (!qlcnic_config_bridged_mode(adapter, !!new)) | 2641 | if (!adapter->nic_ops->config_bridged_mode(adapter, !!new)) |
2386 | ret = len; | 2642 | ret = len; |
2387 | 2643 | ||
2388 | err_out: | 2644 | err_out: |
diff --git a/drivers/net/qlge/qlge.h b/drivers/net/qlge/qlge.h index 20624ba44a37..bfb8b327f2fd 100644 --- a/drivers/net/qlge/qlge.h +++ b/drivers/net/qlge/qlge.h | |||
@@ -1062,7 +1062,7 @@ struct tx_buf_desc { | |||
1062 | #define TX_DESC_LEN_MASK 0x000fffff | 1062 | #define TX_DESC_LEN_MASK 0x000fffff |
1063 | #define TX_DESC_C 0x40000000 | 1063 | #define TX_DESC_C 0x40000000 |
1064 | #define TX_DESC_E 0x80000000 | 1064 | #define TX_DESC_E 0x80000000 |
1065 | } __attribute((packed)); | 1065 | } __packed; |
1066 | 1066 | ||
1067 | /* | 1067 | /* |
1068 | * IOCB Definitions... | 1068 | * IOCB Definitions... |
@@ -1095,7 +1095,7 @@ struct ob_mac_iocb_req { | |||
1095 | __le16 vlan_tci; | 1095 | __le16 vlan_tci; |
1096 | __le16 reserved4; | 1096 | __le16 reserved4; |
1097 | struct tx_buf_desc tbd[TX_DESC_PER_IOCB]; | 1097 | struct tx_buf_desc tbd[TX_DESC_PER_IOCB]; |
1098 | } __attribute((packed)); | 1098 | } __packed; |
1099 | 1099 | ||
1100 | struct ob_mac_iocb_rsp { | 1100 | struct ob_mac_iocb_rsp { |
1101 | u8 opcode; /* */ | 1101 | u8 opcode; /* */ |
@@ -1112,7 +1112,7 @@ struct ob_mac_iocb_rsp { | |||
1112 | u32 tid; | 1112 | u32 tid; |
1113 | u32 txq_idx; | 1113 | u32 txq_idx; |
1114 | __le32 reserved[13]; | 1114 | __le32 reserved[13]; |
1115 | } __attribute((packed)); | 1115 | } __packed; |
1116 | 1116 | ||
1117 | struct ob_mac_tso_iocb_req { | 1117 | struct ob_mac_tso_iocb_req { |
1118 | u8 opcode; | 1118 | u8 opcode; |
@@ -1140,7 +1140,7 @@ struct ob_mac_tso_iocb_req { | |||
1140 | __le16 vlan_tci; | 1140 | __le16 vlan_tci; |
1141 | __le16 mss; | 1141 | __le16 mss; |
1142 | struct tx_buf_desc tbd[TX_DESC_PER_IOCB]; | 1142 | struct tx_buf_desc tbd[TX_DESC_PER_IOCB]; |
1143 | } __attribute((packed)); | 1143 | } __packed; |
1144 | 1144 | ||
1145 | struct ob_mac_tso_iocb_rsp { | 1145 | struct ob_mac_tso_iocb_rsp { |
1146 | u8 opcode; | 1146 | u8 opcode; |
@@ -1157,7 +1157,7 @@ struct ob_mac_tso_iocb_rsp { | |||
1157 | u32 tid; | 1157 | u32 tid; |
1158 | u32 txq_idx; | 1158 | u32 txq_idx; |
1159 | __le32 reserved2[13]; | 1159 | __le32 reserved2[13]; |
1160 | } __attribute((packed)); | 1160 | } __packed; |
1161 | 1161 | ||
1162 | struct ib_mac_iocb_rsp { | 1162 | struct ib_mac_iocb_rsp { |
1163 | u8 opcode; /* 0x20 */ | 1163 | u8 opcode; /* 0x20 */ |
@@ -1216,7 +1216,7 @@ struct ib_mac_iocb_rsp { | |||
1216 | #define IB_MAC_IOCB_RSP_HL 0x80 | 1216 | #define IB_MAC_IOCB_RSP_HL 0x80 |
1217 | __le32 hdr_len; /* */ | 1217 | __le32 hdr_len; /* */ |
1218 | __le64 hdr_addr; /* */ | 1218 | __le64 hdr_addr; /* */ |
1219 | } __attribute((packed)); | 1219 | } __packed; |
1220 | 1220 | ||
1221 | struct ib_ae_iocb_rsp { | 1221 | struct ib_ae_iocb_rsp { |
1222 | u8 opcode; | 1222 | u8 opcode; |
@@ -1237,7 +1237,7 @@ struct ib_ae_iocb_rsp { | |||
1237 | #define PCI_ERR_ANON_BUF_RD 0x40 | 1237 | #define PCI_ERR_ANON_BUF_RD 0x40 |
1238 | u8 q_id; | 1238 | u8 q_id; |
1239 | __le32 reserved[15]; | 1239 | __le32 reserved[15]; |
1240 | } __attribute((packed)); | 1240 | } __packed; |
1241 | 1241 | ||
1242 | /* | 1242 | /* |
1243 | * These three structures are for generic | 1243 | * These three structures are for generic |
@@ -1249,7 +1249,7 @@ struct ql_net_rsp_iocb { | |||
1249 | __le16 length; | 1249 | __le16 length; |
1250 | __le32 tid; | 1250 | __le32 tid; |
1251 | __le32 reserved[14]; | 1251 | __le32 reserved[14]; |
1252 | } __attribute((packed)); | 1252 | } __packed; |
1253 | 1253 | ||
1254 | struct net_req_iocb { | 1254 | struct net_req_iocb { |
1255 | u8 opcode; | 1255 | u8 opcode; |
@@ -1257,7 +1257,7 @@ struct net_req_iocb { | |||
1257 | __le16 flags1; | 1257 | __le16 flags1; |
1258 | __le32 tid; | 1258 | __le32 tid; |
1259 | __le32 reserved1[30]; | 1259 | __le32 reserved1[30]; |
1260 | } __attribute((packed)); | 1260 | } __packed; |
1261 | 1261 | ||
1262 | /* | 1262 | /* |
1263 | * tx ring initialization control block for chip. | 1263 | * tx ring initialization control block for chip. |
@@ -1283,7 +1283,7 @@ struct wqicb { | |||
1283 | __le16 rid; | 1283 | __le16 rid; |
1284 | __le64 addr; | 1284 | __le64 addr; |
1285 | __le64 cnsmr_idx_addr; | 1285 | __le64 cnsmr_idx_addr; |
1286 | } __attribute((packed)); | 1286 | } __packed; |
1287 | 1287 | ||
1288 | /* | 1288 | /* |
1289 | * rx ring initialization control block for chip. | 1289 | * rx ring initialization control block for chip. |
@@ -1317,7 +1317,7 @@ struct cqicb { | |||
1317 | __le64 sbq_addr; | 1317 | __le64 sbq_addr; |
1318 | __le16 sbq_buf_size; | 1318 | __le16 sbq_buf_size; |
1319 | __le16 sbq_len; /* entry count */ | 1319 | __le16 sbq_len; /* entry count */ |
1320 | } __attribute((packed)); | 1320 | } __packed; |
1321 | 1321 | ||
1322 | struct ricb { | 1322 | struct ricb { |
1323 | u8 base_cq; | 1323 | u8 base_cq; |
@@ -1335,7 +1335,7 @@ struct ricb { | |||
1335 | u8 hash_cq_id[1024]; | 1335 | u8 hash_cq_id[1024]; |
1336 | __le32 ipv6_hash_key[10]; | 1336 | __le32 ipv6_hash_key[10]; |
1337 | __le32 ipv4_hash_key[4]; | 1337 | __le32 ipv4_hash_key[4]; |
1338 | } __attribute((packed)); | 1338 | } __packed; |
1339 | 1339 | ||
1340 | /* SOFTWARE/DRIVER DATA STRUCTURES. */ | 1340 | /* SOFTWARE/DRIVER DATA STRUCTURES. */ |
1341 | 1341 | ||
diff --git a/drivers/net/r6040.c b/drivers/net/r6040.c index 9a251acf5ab8..7d482a2316ac 100644 --- a/drivers/net/r6040.c +++ b/drivers/net/r6040.c | |||
@@ -44,12 +44,13 @@ | |||
44 | #include <linux/io.h> | 44 | #include <linux/io.h> |
45 | #include <linux/irq.h> | 45 | #include <linux/irq.h> |
46 | #include <linux/uaccess.h> | 46 | #include <linux/uaccess.h> |
47 | #include <linux/phy.h> | ||
47 | 48 | ||
48 | #include <asm/processor.h> | 49 | #include <asm/processor.h> |
49 | 50 | ||
50 | #define DRV_NAME "r6040" | 51 | #define DRV_NAME "r6040" |
51 | #define DRV_VERSION "0.25" | 52 | #define DRV_VERSION "0.26" |
52 | #define DRV_RELDATE "20Aug2009" | 53 | #define DRV_RELDATE "30May2010" |
53 | 54 | ||
54 | /* PHY CHIP Address */ | 55 | /* PHY CHIP Address */ |
55 | #define PHY1_ADDR 1 /* For MAC1 */ | 56 | #define PHY1_ADDR 1 /* For MAC1 */ |
@@ -179,7 +180,6 @@ struct r6040_descriptor { | |||
179 | 180 | ||
180 | struct r6040_private { | 181 | struct r6040_private { |
181 | spinlock_t lock; /* driver lock */ | 182 | spinlock_t lock; /* driver lock */ |
182 | struct timer_list timer; | ||
183 | struct pci_dev *pdev; | 183 | struct pci_dev *pdev; |
184 | struct r6040_descriptor *rx_insert_ptr; | 184 | struct r6040_descriptor *rx_insert_ptr; |
185 | struct r6040_descriptor *rx_remove_ptr; | 185 | struct r6040_descriptor *rx_remove_ptr; |
@@ -189,13 +189,15 @@ struct r6040_private { | |||
189 | struct r6040_descriptor *tx_ring; | 189 | struct r6040_descriptor *tx_ring; |
190 | dma_addr_t rx_ring_dma; | 190 | dma_addr_t rx_ring_dma; |
191 | dma_addr_t tx_ring_dma; | 191 | dma_addr_t tx_ring_dma; |
192 | u16 tx_free_desc, phy_addr, phy_mode; | 192 | u16 tx_free_desc, phy_addr; |
193 | u16 mcr0, mcr1; | 193 | u16 mcr0, mcr1; |
194 | u16 switch_sig; | ||
195 | struct net_device *dev; | 194 | struct net_device *dev; |
196 | struct mii_if_info mii_if; | 195 | struct mii_bus *mii_bus; |
197 | struct napi_struct napi; | 196 | struct napi_struct napi; |
198 | void __iomem *base; | 197 | void __iomem *base; |
198 | struct phy_device *phydev; | ||
199 | int old_link; | ||
200 | int old_duplex; | ||
199 | }; | 201 | }; |
200 | 202 | ||
201 | static char version[] __devinitdata = KERN_INFO DRV_NAME | 203 | static char version[] __devinitdata = KERN_INFO DRV_NAME |
@@ -238,20 +240,30 @@ static void r6040_phy_write(void __iomem *ioaddr, int phy_addr, int reg, u16 val | |||
238 | } | 240 | } |
239 | } | 241 | } |
240 | 242 | ||
241 | static int r6040_mdio_read(struct net_device *dev, int mii_id, int reg) | 243 | static int r6040_mdiobus_read(struct mii_bus *bus, int phy_addr, int reg) |
242 | { | 244 | { |
245 | struct net_device *dev = bus->priv; | ||
243 | struct r6040_private *lp = netdev_priv(dev); | 246 | struct r6040_private *lp = netdev_priv(dev); |
244 | void __iomem *ioaddr = lp->base; | 247 | void __iomem *ioaddr = lp->base; |
245 | 248 | ||
246 | return (r6040_phy_read(ioaddr, lp->phy_addr, reg)); | 249 | return r6040_phy_read(ioaddr, phy_addr, reg); |
247 | } | 250 | } |
248 | 251 | ||
249 | static void r6040_mdio_write(struct net_device *dev, int mii_id, int reg, int val) | 252 | static int r6040_mdiobus_write(struct mii_bus *bus, int phy_addr, |
253 | int reg, u16 value) | ||
250 | { | 254 | { |
255 | struct net_device *dev = bus->priv; | ||
251 | struct r6040_private *lp = netdev_priv(dev); | 256 | struct r6040_private *lp = netdev_priv(dev); |
252 | void __iomem *ioaddr = lp->base; | 257 | void __iomem *ioaddr = lp->base; |
253 | 258 | ||
254 | r6040_phy_write(ioaddr, lp->phy_addr, reg, val); | 259 | r6040_phy_write(ioaddr, phy_addr, reg, value); |
260 | |||
261 | return 0; | ||
262 | } | ||
263 | |||
264 | static int r6040_mdiobus_reset(struct mii_bus *bus) | ||
265 | { | ||
266 | return 0; | ||
255 | } | 267 | } |
256 | 268 | ||
257 | static void r6040_free_txbufs(struct net_device *dev) | 269 | static void r6040_free_txbufs(struct net_device *dev) |
@@ -408,10 +420,9 @@ static void r6040_tx_timeout(struct net_device *dev) | |||
408 | void __iomem *ioaddr = priv->base; | 420 | void __iomem *ioaddr = priv->base; |
409 | 421 | ||
410 | netdev_warn(dev, "transmit timed out, int enable %4.4x " | 422 | netdev_warn(dev, "transmit timed out, int enable %4.4x " |
411 | "status %4.4x, PHY status %4.4x\n", | 423 | "status %4.4x\n", |
412 | ioread16(ioaddr + MIER), | 424 | ioread16(ioaddr + MIER), |
413 | ioread16(ioaddr + MISR), | 425 | ioread16(ioaddr + MISR)); |
414 | r6040_mdio_read(dev, priv->mii_if.phy_id, MII_BMSR)); | ||
415 | 426 | ||
416 | dev->stats.tx_errors++; | 427 | dev->stats.tx_errors++; |
417 | 428 | ||
@@ -463,9 +474,6 @@ static int r6040_close(struct net_device *dev) | |||
463 | struct r6040_private *lp = netdev_priv(dev); | 474 | struct r6040_private *lp = netdev_priv(dev); |
464 | struct pci_dev *pdev = lp->pdev; | 475 | struct pci_dev *pdev = lp->pdev; |
465 | 476 | ||
466 | /* deleted timer */ | ||
467 | del_timer_sync(&lp->timer); | ||
468 | |||
469 | spin_lock_irq(&lp->lock); | 477 | spin_lock_irq(&lp->lock); |
470 | napi_disable(&lp->napi); | 478 | napi_disable(&lp->napi); |
471 | netif_stop_queue(dev); | 479 | netif_stop_queue(dev); |
@@ -495,64 +503,14 @@ static int r6040_close(struct net_device *dev) | |||
495 | return 0; | 503 | return 0; |
496 | } | 504 | } |
497 | 505 | ||
498 | /* Status of PHY CHIP */ | ||
499 | static int r6040_phy_mode_chk(struct net_device *dev) | ||
500 | { | ||
501 | struct r6040_private *lp = netdev_priv(dev); | ||
502 | void __iomem *ioaddr = lp->base; | ||
503 | int phy_dat; | ||
504 | |||
505 | /* PHY Link Status Check */ | ||
506 | phy_dat = r6040_phy_read(ioaddr, lp->phy_addr, 1); | ||
507 | if (!(phy_dat & 0x4)) | ||
508 | phy_dat = 0x8000; /* Link Failed, full duplex */ | ||
509 | |||
510 | /* PHY Chip Auto-Negotiation Status */ | ||
511 | phy_dat = r6040_phy_read(ioaddr, lp->phy_addr, 1); | ||
512 | if (phy_dat & 0x0020) { | ||
513 | /* Auto Negotiation Mode */ | ||
514 | phy_dat = r6040_phy_read(ioaddr, lp->phy_addr, 5); | ||
515 | phy_dat &= r6040_phy_read(ioaddr, lp->phy_addr, 4); | ||
516 | if (phy_dat & 0x140) | ||
517 | /* Force full duplex */ | ||
518 | phy_dat = 0x8000; | ||
519 | else | ||
520 | phy_dat = 0; | ||
521 | } else { | ||
522 | /* Force Mode */ | ||
523 | phy_dat = r6040_phy_read(ioaddr, lp->phy_addr, 0); | ||
524 | if (phy_dat & 0x100) | ||
525 | phy_dat = 0x8000; | ||
526 | else | ||
527 | phy_dat = 0x0000; | ||
528 | } | ||
529 | |||
530 | return phy_dat; | ||
531 | }; | ||
532 | |||
533 | static void r6040_set_carrier(struct mii_if_info *mii) | ||
534 | { | ||
535 | if (r6040_phy_mode_chk(mii->dev)) { | ||
536 | /* autoneg is off: Link is always assumed to be up */ | ||
537 | if (!netif_carrier_ok(mii->dev)) | ||
538 | netif_carrier_on(mii->dev); | ||
539 | } else | ||
540 | r6040_phy_mode_chk(mii->dev); | ||
541 | } | ||
542 | |||
543 | static int r6040_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) | 506 | static int r6040_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) |
544 | { | 507 | { |
545 | struct r6040_private *lp = netdev_priv(dev); | 508 | struct r6040_private *lp = netdev_priv(dev); |
546 | struct mii_ioctl_data *data = if_mii(rq); | ||
547 | int rc; | ||
548 | 509 | ||
549 | if (!netif_running(dev)) | 510 | if (!lp->phydev) |
550 | return -EINVAL; | 511 | return -EINVAL; |
551 | spin_lock_irq(&lp->lock); | 512 | |
552 | rc = generic_mii_ioctl(&lp->mii_if, data, cmd, NULL); | 513 | return phy_mii_ioctl(lp->phydev, if_mii(rq), cmd); |
553 | spin_unlock_irq(&lp->lock); | ||
554 | r6040_set_carrier(&lp->mii_if); | ||
555 | return rc; | ||
556 | } | 514 | } |
557 | 515 | ||
558 | static int r6040_rx(struct net_device *dev, int limit) | 516 | static int r6040_rx(struct net_device *dev, int limit) |
@@ -751,26 +709,6 @@ static int r6040_up(struct net_device *dev) | |||
751 | if (ret) | 709 | if (ret) |
752 | return ret; | 710 | return ret; |
753 | 711 | ||
754 | /* Read the PHY ID */ | ||
755 | lp->switch_sig = r6040_phy_read(ioaddr, 0, 2); | ||
756 | |||
757 | if (lp->switch_sig == ICPLUS_PHY_ID) { | ||
758 | r6040_phy_write(ioaddr, 29, 31, 0x175C); /* Enable registers */ | ||
759 | lp->phy_mode = 0x8000; | ||
760 | } else { | ||
761 | /* PHY Mode Check */ | ||
762 | r6040_phy_write(ioaddr, lp->phy_addr, 4, PHY_CAP); | ||
763 | r6040_phy_write(ioaddr, lp->phy_addr, 0, PHY_MODE); | ||
764 | |||
765 | if (PHY_MODE == 0x3100) | ||
766 | lp->phy_mode = r6040_phy_mode_chk(dev); | ||
767 | else | ||
768 | lp->phy_mode = (PHY_MODE & 0x0100) ? 0x8000:0x0; | ||
769 | } | ||
770 | |||
771 | /* Set duplex mode */ | ||
772 | lp->mcr0 |= lp->phy_mode; | ||
773 | |||
774 | /* improve performance (by RDC guys) */ | 712 | /* improve performance (by RDC guys) */ |
775 | r6040_phy_write(ioaddr, 30, 17, (r6040_phy_read(ioaddr, 30, 17) | 0x4000)); | 713 | r6040_phy_write(ioaddr, 30, 17, (r6040_phy_read(ioaddr, 30, 17) | 0x4000)); |
776 | r6040_phy_write(ioaddr, 30, 17, ~((~r6040_phy_read(ioaddr, 30, 17)) | 0x2000)); | 714 | r6040_phy_write(ioaddr, 30, 17, ~((~r6040_phy_read(ioaddr, 30, 17)) | 0x2000)); |
@@ -783,35 +721,6 @@ static int r6040_up(struct net_device *dev) | |||
783 | return 0; | 721 | return 0; |
784 | } | 722 | } |
785 | 723 | ||
786 | /* | ||
787 | A periodic timer routine | ||
788 | Polling PHY Chip Link Status | ||
789 | */ | ||
790 | static void r6040_timer(unsigned long data) | ||
791 | { | ||
792 | struct net_device *dev = (struct net_device *)data; | ||
793 | struct r6040_private *lp = netdev_priv(dev); | ||
794 | void __iomem *ioaddr = lp->base; | ||
795 | u16 phy_mode; | ||
796 | |||
797 | /* Polling PHY Chip Status */ | ||
798 | if (PHY_MODE == 0x3100) | ||
799 | phy_mode = r6040_phy_mode_chk(dev); | ||
800 | else | ||
801 | phy_mode = (PHY_MODE & 0x0100) ? 0x8000:0x0; | ||
802 | |||
803 | if (phy_mode != lp->phy_mode) { | ||
804 | lp->phy_mode = phy_mode; | ||
805 | lp->mcr0 = (lp->mcr0 & 0x7fff) | phy_mode; | ||
806 | iowrite16(lp->mcr0, ioaddr); | ||
807 | } | ||
808 | |||
809 | /* Timer active again */ | ||
810 | mod_timer(&lp->timer, round_jiffies(jiffies + HZ)); | ||
811 | |||
812 | /* Check media */ | ||
813 | mii_check_media(&lp->mii_if, 1, 1); | ||
814 | } | ||
815 | 724 | ||
816 | /* Read/set MAC address routines */ | 725 | /* Read/set MAC address routines */ |
817 | static void r6040_mac_address(struct net_device *dev) | 726 | static void r6040_mac_address(struct net_device *dev) |
@@ -873,10 +782,6 @@ static int r6040_open(struct net_device *dev) | |||
873 | napi_enable(&lp->napi); | 782 | napi_enable(&lp->napi); |
874 | netif_start_queue(dev); | 783 | netif_start_queue(dev); |
875 | 784 | ||
876 | /* set and active a timer process */ | ||
877 | setup_timer(&lp->timer, r6040_timer, (unsigned long) dev); | ||
878 | if (lp->switch_sig != ICPLUS_PHY_ID) | ||
879 | mod_timer(&lp->timer, jiffies + HZ); | ||
880 | return 0; | 785 | return 0; |
881 | } | 786 | } |
882 | 787 | ||
@@ -1015,40 +920,22 @@ static void netdev_get_drvinfo(struct net_device *dev, | |||
1015 | static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) | 920 | static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) |
1016 | { | 921 | { |
1017 | struct r6040_private *rp = netdev_priv(dev); | 922 | struct r6040_private *rp = netdev_priv(dev); |
1018 | int rc; | ||
1019 | |||
1020 | spin_lock_irq(&rp->lock); | ||
1021 | rc = mii_ethtool_gset(&rp->mii_if, cmd); | ||
1022 | spin_unlock_irq(&rp->lock); | ||
1023 | 923 | ||
1024 | return rc; | 924 | return phy_ethtool_gset(rp->phydev, cmd); |
1025 | } | 925 | } |
1026 | 926 | ||
1027 | static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) | 927 | static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) |
1028 | { | 928 | { |
1029 | struct r6040_private *rp = netdev_priv(dev); | 929 | struct r6040_private *rp = netdev_priv(dev); |
1030 | int rc; | ||
1031 | |||
1032 | spin_lock_irq(&rp->lock); | ||
1033 | rc = mii_ethtool_sset(&rp->mii_if, cmd); | ||
1034 | spin_unlock_irq(&rp->lock); | ||
1035 | r6040_set_carrier(&rp->mii_if); | ||
1036 | |||
1037 | return rc; | ||
1038 | } | ||
1039 | |||
1040 | static u32 netdev_get_link(struct net_device *dev) | ||
1041 | { | ||
1042 | struct r6040_private *rp = netdev_priv(dev); | ||
1043 | 930 | ||
1044 | return mii_link_ok(&rp->mii_if); | 931 | return phy_ethtool_sset(rp->phydev, cmd); |
1045 | } | 932 | } |
1046 | 933 | ||
1047 | static const struct ethtool_ops netdev_ethtool_ops = { | 934 | static const struct ethtool_ops netdev_ethtool_ops = { |
1048 | .get_drvinfo = netdev_get_drvinfo, | 935 | .get_drvinfo = netdev_get_drvinfo, |
1049 | .get_settings = netdev_get_settings, | 936 | .get_settings = netdev_get_settings, |
1050 | .set_settings = netdev_set_settings, | 937 | .set_settings = netdev_set_settings, |
1051 | .get_link = netdev_get_link, | 938 | .get_link = ethtool_op_get_link, |
1052 | }; | 939 | }; |
1053 | 940 | ||
1054 | static const struct net_device_ops r6040_netdev_ops = { | 941 | static const struct net_device_ops r6040_netdev_ops = { |
@@ -1067,6 +954,79 @@ static const struct net_device_ops r6040_netdev_ops = { | |||
1067 | #endif | 954 | #endif |
1068 | }; | 955 | }; |
1069 | 956 | ||
957 | static void r6040_adjust_link(struct net_device *dev) | ||
958 | { | ||
959 | struct r6040_private *lp = netdev_priv(dev); | ||
960 | struct phy_device *phydev = lp->phydev; | ||
961 | int status_changed = 0; | ||
962 | void __iomem *ioaddr = lp->base; | ||
963 | |||
964 | BUG_ON(!phydev); | ||
965 | |||
966 | if (lp->old_link != phydev->link) { | ||
967 | status_changed = 1; | ||
968 | lp->old_link = phydev->link; | ||
969 | } | ||
970 | |||
971 | /* reflect duplex change */ | ||
972 | if (phydev->link && (lp->old_duplex != phydev->duplex)) { | ||
973 | lp->mcr0 |= (phydev->duplex == DUPLEX_FULL ? 0x8000 : 0); | ||
974 | iowrite16(lp->mcr0, ioaddr); | ||
975 | |||
976 | status_changed = 1; | ||
977 | lp->old_duplex = phydev->duplex; | ||
978 | } | ||
979 | |||
980 | if (status_changed) { | ||
981 | pr_info("%s: link %s", dev->name, phydev->link ? | ||
982 | "UP" : "DOWN"); | ||
983 | if (phydev->link) | ||
984 | pr_cont(" - %d/%s", phydev->speed, | ||
985 | DUPLEX_FULL == phydev->duplex ? "full" : "half"); | ||
986 | pr_cont("\n"); | ||
987 | } | ||
988 | } | ||
989 | |||
990 | static int r6040_mii_probe(struct net_device *dev) | ||
991 | { | ||
992 | struct r6040_private *lp = netdev_priv(dev); | ||
993 | struct phy_device *phydev = NULL; | ||
994 | |||
995 | phydev = phy_find_first(lp->mii_bus); | ||
996 | if (!phydev) { | ||
997 | dev_err(&lp->pdev->dev, "no PHY found\n"); | ||
998 | return -ENODEV; | ||
999 | } | ||
1000 | |||
1001 | phydev = phy_connect(dev, dev_name(&phydev->dev), &r6040_adjust_link, | ||
1002 | 0, PHY_INTERFACE_MODE_MII); | ||
1003 | |||
1004 | if (IS_ERR(phydev)) { | ||
1005 | dev_err(&lp->pdev->dev, "could not attach to PHY\n"); | ||
1006 | return PTR_ERR(phydev); | ||
1007 | } | ||
1008 | |||
1009 | /* mask with MAC supported features */ | ||
1010 | phydev->supported &= (SUPPORTED_10baseT_Half | ||
1011 | | SUPPORTED_10baseT_Full | ||
1012 | | SUPPORTED_100baseT_Half | ||
1013 | | SUPPORTED_100baseT_Full | ||
1014 | | SUPPORTED_Autoneg | ||
1015 | | SUPPORTED_MII | ||
1016 | | SUPPORTED_TP); | ||
1017 | |||
1018 | phydev->advertising = phydev->supported; | ||
1019 | lp->phydev = phydev; | ||
1020 | lp->old_link = 0; | ||
1021 | lp->old_duplex = -1; | ||
1022 | |||
1023 | dev_info(&lp->pdev->dev, "attached PHY driver [%s] " | ||
1024 | "(mii_bus:phy_addr=%s)\n", | ||
1025 | phydev->drv->name, dev_name(&phydev->dev)); | ||
1026 | |||
1027 | return 0; | ||
1028 | } | ||
1029 | |||
1070 | static int __devinit r6040_init_one(struct pci_dev *pdev, | 1030 | static int __devinit r6040_init_one(struct pci_dev *pdev, |
1071 | const struct pci_device_id *ent) | 1031 | const struct pci_device_id *ent) |
1072 | { | 1032 | { |
@@ -1077,6 +1037,7 @@ static int __devinit r6040_init_one(struct pci_dev *pdev, | |||
1077 | static int card_idx = -1; | 1037 | static int card_idx = -1; |
1078 | int bar = 0; | 1038 | int bar = 0; |
1079 | u16 *adrp; | 1039 | u16 *adrp; |
1040 | int i; | ||
1080 | 1041 | ||
1081 | printk("%s\n", version); | 1042 | printk("%s\n", version); |
1082 | 1043 | ||
@@ -1163,7 +1124,6 @@ static int __devinit r6040_init_one(struct pci_dev *pdev, | |||
1163 | /* Init RDC private data */ | 1124 | /* Init RDC private data */ |
1164 | lp->mcr0 = 0x1002; | 1125 | lp->mcr0 = 0x1002; |
1165 | lp->phy_addr = phy_table[card_idx]; | 1126 | lp->phy_addr = phy_table[card_idx]; |
1166 | lp->switch_sig = 0; | ||
1167 | 1127 | ||
1168 | /* The RDC-specific entries in the device structure. */ | 1128 | /* The RDC-specific entries in the device structure. */ |
1169 | dev->netdev_ops = &r6040_netdev_ops; | 1129 | dev->netdev_ops = &r6040_netdev_ops; |
@@ -1171,28 +1131,54 @@ static int __devinit r6040_init_one(struct pci_dev *pdev, | |||
1171 | dev->watchdog_timeo = TX_TIMEOUT; | 1131 | dev->watchdog_timeo = TX_TIMEOUT; |
1172 | 1132 | ||
1173 | netif_napi_add(dev, &lp->napi, r6040_poll, 64); | 1133 | netif_napi_add(dev, &lp->napi, r6040_poll, 64); |
1174 | lp->mii_if.dev = dev; | 1134 | |
1175 | lp->mii_if.mdio_read = r6040_mdio_read; | 1135 | lp->mii_bus = mdiobus_alloc(); |
1176 | lp->mii_if.mdio_write = r6040_mdio_write; | 1136 | if (!lp->mii_bus) { |
1177 | lp->mii_if.phy_id = lp->phy_addr; | 1137 | dev_err(&pdev->dev, "mdiobus_alloc() failed\n"); |
1178 | lp->mii_if.phy_id_mask = 0x1f; | ||
1179 | lp->mii_if.reg_num_mask = 0x1f; | ||
1180 | |||
1181 | /* Check the vendor ID on the PHY, if 0xffff assume none attached */ | ||
1182 | if (r6040_phy_read(ioaddr, lp->phy_addr, 2) == 0xffff) { | ||
1183 | dev_err(&pdev->dev, "Failed to detect an attached PHY\n"); | ||
1184 | err = -ENODEV; | ||
1185 | goto err_out_unmap; | 1138 | goto err_out_unmap; |
1186 | } | 1139 | } |
1187 | 1140 | ||
1141 | lp->mii_bus->priv = dev; | ||
1142 | lp->mii_bus->read = r6040_mdiobus_read; | ||
1143 | lp->mii_bus->write = r6040_mdiobus_write; | ||
1144 | lp->mii_bus->reset = r6040_mdiobus_reset; | ||
1145 | lp->mii_bus->name = "r6040_eth_mii"; | ||
1146 | snprintf(lp->mii_bus->id, MII_BUS_ID_SIZE, "%x", card_idx); | ||
1147 | lp->mii_bus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL); | ||
1148 | if (!lp->mii_bus->irq) { | ||
1149 | dev_err(&pdev->dev, "mii_bus irq allocation failed\n"); | ||
1150 | goto err_out_mdio; | ||
1151 | } | ||
1152 | |||
1153 | for (i = 0; i < PHY_MAX_ADDR; i++) | ||
1154 | lp->mii_bus->irq[i] = PHY_POLL; | ||
1155 | |||
1156 | err = mdiobus_register(lp->mii_bus); | ||
1157 | if (err) { | ||
1158 | dev_err(&pdev->dev, "failed to register MII bus\n"); | ||
1159 | goto err_out_mdio_irq; | ||
1160 | } | ||
1161 | |||
1162 | err = r6040_mii_probe(dev); | ||
1163 | if (err) { | ||
1164 | dev_err(&pdev->dev, "failed to probe MII bus\n"); | ||
1165 | goto err_out_mdio_unregister; | ||
1166 | } | ||
1167 | |||
1188 | /* Register net device. After this dev->name assign */ | 1168 | /* Register net device. After this dev->name assign */ |
1189 | err = register_netdev(dev); | 1169 | err = register_netdev(dev); |
1190 | if (err) { | 1170 | if (err) { |
1191 | dev_err(&pdev->dev, "Failed to register net device\n"); | 1171 | dev_err(&pdev->dev, "Failed to register net device\n"); |
1192 | goto err_out_unmap; | 1172 | goto err_out_mdio_unregister; |
1193 | } | 1173 | } |
1194 | return 0; | 1174 | return 0; |
1195 | 1175 | ||
1176 | err_out_mdio_unregister: | ||
1177 | mdiobus_unregister(lp->mii_bus); | ||
1178 | err_out_mdio_irq: | ||
1179 | kfree(lp->mii_bus->irq); | ||
1180 | err_out_mdio: | ||
1181 | mdiobus_free(lp->mii_bus); | ||
1196 | err_out_unmap: | 1182 | err_out_unmap: |
1197 | pci_iounmap(pdev, ioaddr); | 1183 | pci_iounmap(pdev, ioaddr); |
1198 | err_out_free_res: | 1184 | err_out_free_res: |
@@ -1206,8 +1192,12 @@ err_out: | |||
1206 | static void __devexit r6040_remove_one(struct pci_dev *pdev) | 1192 | static void __devexit r6040_remove_one(struct pci_dev *pdev) |
1207 | { | 1193 | { |
1208 | struct net_device *dev = pci_get_drvdata(pdev); | 1194 | struct net_device *dev = pci_get_drvdata(pdev); |
1195 | struct r6040_private *lp = netdev_priv(dev); | ||
1209 | 1196 | ||
1210 | unregister_netdev(dev); | 1197 | unregister_netdev(dev); |
1198 | mdiobus_unregister(lp->mii_bus); | ||
1199 | kfree(lp->mii_bus->irq); | ||
1200 | mdiobus_free(lp->mii_bus); | ||
1211 | pci_release_regions(pdev); | 1201 | pci_release_regions(pdev); |
1212 | free_netdev(dev); | 1202 | free_netdev(dev); |
1213 | pci_disable_device(pdev); | 1203 | pci_disable_device(pdev); |
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c index 03a8318d90a2..6949504589db 100644 --- a/drivers/net/r8169.c +++ b/drivers/net/r8169.c | |||
@@ -88,7 +88,7 @@ static const int multicast_filter_limit = 32; | |||
88 | #define RTL_W32(reg, val32) writel ((val32), ioaddr + (reg)) | 88 | #define RTL_W32(reg, val32) writel ((val32), ioaddr + (reg)) |
89 | #define RTL_R8(reg) readb (ioaddr + (reg)) | 89 | #define RTL_R8(reg) readb (ioaddr + (reg)) |
90 | #define RTL_R16(reg) readw (ioaddr + (reg)) | 90 | #define RTL_R16(reg) readw (ioaddr + (reg)) |
91 | #define RTL_R32(reg) ((unsigned long) readl (ioaddr + (reg))) | 91 | #define RTL_R32(reg) readl (ioaddr + (reg)) |
92 | 92 | ||
93 | enum mac_version { | 93 | enum mac_version { |
94 | RTL_GIGA_MAC_NONE = 0x00, | 94 | RTL_GIGA_MAC_NONE = 0x00, |
diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c index 156460527231..26b0cc219204 100644 --- a/drivers/net/sfc/efx.c +++ b/drivers/net/sfc/efx.c | |||
@@ -27,6 +27,7 @@ | |||
27 | #include "nic.h" | 27 | #include "nic.h" |
28 | 28 | ||
29 | #include "mcdi.h" | 29 | #include "mcdi.h" |
30 | #include "workarounds.h" | ||
30 | 31 | ||
31 | /************************************************************************** | 32 | /************************************************************************** |
32 | * | 33 | * |
@@ -92,13 +93,6 @@ const char *efx_reset_type_names[] = { | |||
92 | 93 | ||
93 | #define EFX_MAX_MTU (9 * 1024) | 94 | #define EFX_MAX_MTU (9 * 1024) |
94 | 95 | ||
95 | /* RX slow fill workqueue. If memory allocation fails in the fast path, | ||
96 | * a work item is pushed onto this work queue to retry the allocation later, | ||
97 | * to avoid the NIC being starved of RX buffers. Since this is a per cpu | ||
98 | * workqueue, there is nothing to be gained in making it per NIC | ||
99 | */ | ||
100 | static struct workqueue_struct *refill_workqueue; | ||
101 | |||
102 | /* Reset workqueue. If any NIC has a hardware failure then a reset will be | 96 | /* Reset workqueue. If any NIC has a hardware failure then a reset will be |
103 | * queued onto this work queue. This is not a per-nic work queue, because | 97 | * queued onto this work queue. This is not a per-nic work queue, because |
104 | * efx_reset_work() acquires the rtnl lock, so resets are naturally serialised. | 98 | * efx_reset_work() acquires the rtnl lock, so resets are naturally serialised. |
@@ -475,7 +469,8 @@ static void efx_init_channels(struct efx_nic *efx) | |||
475 | efx->rx_buffer_len = (max(EFX_PAGE_IP_ALIGN, NET_IP_ALIGN) + | 469 | efx->rx_buffer_len = (max(EFX_PAGE_IP_ALIGN, NET_IP_ALIGN) + |
476 | EFX_MAX_FRAME_LEN(efx->net_dev->mtu) + | 470 | EFX_MAX_FRAME_LEN(efx->net_dev->mtu) + |
477 | efx->type->rx_buffer_padding); | 471 | efx->type->rx_buffer_padding); |
478 | efx->rx_buffer_order = get_order(efx->rx_buffer_len); | 472 | efx->rx_buffer_order = get_order(efx->rx_buffer_len + |
473 | sizeof(struct efx_rx_page_state)); | ||
479 | 474 | ||
480 | /* Initialise the channels */ | 475 | /* Initialise the channels */ |
481 | efx_for_each_channel(channel, efx) { | 476 | efx_for_each_channel(channel, efx) { |
@@ -515,11 +510,11 @@ static void efx_start_channel(struct efx_channel *channel) | |||
515 | channel->enabled = true; | 510 | channel->enabled = true; |
516 | smp_wmb(); | 511 | smp_wmb(); |
517 | 512 | ||
518 | napi_enable(&channel->napi_str); | 513 | /* Fill the queues before enabling NAPI */ |
519 | |||
520 | /* Load up RX descriptors */ | ||
521 | efx_for_each_channel_rx_queue(rx_queue, channel) | 514 | efx_for_each_channel_rx_queue(rx_queue, channel) |
522 | efx_fast_push_rx_descriptors(rx_queue); | 515 | efx_fast_push_rx_descriptors(rx_queue); |
516 | |||
517 | napi_enable(&channel->napi_str); | ||
523 | } | 518 | } |
524 | 519 | ||
525 | /* This disables event queue processing and packet transmission. | 520 | /* This disables event queue processing and packet transmission. |
@@ -528,8 +523,6 @@ static void efx_start_channel(struct efx_channel *channel) | |||
528 | */ | 523 | */ |
529 | static void efx_stop_channel(struct efx_channel *channel) | 524 | static void efx_stop_channel(struct efx_channel *channel) |
530 | { | 525 | { |
531 | struct efx_rx_queue *rx_queue; | ||
532 | |||
533 | if (!channel->enabled) | 526 | if (!channel->enabled) |
534 | return; | 527 | return; |
535 | 528 | ||
@@ -537,12 +530,6 @@ static void efx_stop_channel(struct efx_channel *channel) | |||
537 | 530 | ||
538 | channel->enabled = false; | 531 | channel->enabled = false; |
539 | napi_disable(&channel->napi_str); | 532 | napi_disable(&channel->napi_str); |
540 | |||
541 | /* Ensure that any worker threads have exited or will be no-ops */ | ||
542 | efx_for_each_channel_rx_queue(rx_queue, channel) { | ||
543 | spin_lock_bh(&rx_queue->add_lock); | ||
544 | spin_unlock_bh(&rx_queue->add_lock); | ||
545 | } | ||
546 | } | 533 | } |
547 | 534 | ||
548 | static void efx_fini_channels(struct efx_nic *efx) | 535 | static void efx_fini_channels(struct efx_nic *efx) |
@@ -556,10 +543,18 @@ static void efx_fini_channels(struct efx_nic *efx) | |||
556 | BUG_ON(efx->port_enabled); | 543 | BUG_ON(efx->port_enabled); |
557 | 544 | ||
558 | rc = efx_nic_flush_queues(efx); | 545 | rc = efx_nic_flush_queues(efx); |
559 | if (rc) | 546 | if (rc && EFX_WORKAROUND_7803(efx)) { |
547 | /* Schedule a reset to recover from the flush failure. The | ||
548 | * descriptor caches reference memory we're about to free, | ||
549 | * but falcon_reconfigure_mac_wrapper() won't reconnect | ||
550 | * the MACs because of the pending reset. */ | ||
551 | EFX_ERR(efx, "Resetting to recover from flush failure\n"); | ||
552 | efx_schedule_reset(efx, RESET_TYPE_ALL); | ||
553 | } else if (rc) { | ||
560 | EFX_ERR(efx, "failed to flush queues\n"); | 554 | EFX_ERR(efx, "failed to flush queues\n"); |
561 | else | 555 | } else { |
562 | EFX_LOG(efx, "successfully flushed all queues\n"); | 556 | EFX_LOG(efx, "successfully flushed all queues\n"); |
557 | } | ||
563 | 558 | ||
564 | efx_for_each_channel(channel, efx) { | 559 | efx_for_each_channel(channel, efx) { |
565 | EFX_LOG(channel->efx, "shut down chan %d\n", channel->channel); | 560 | EFX_LOG(channel->efx, "shut down chan %d\n", channel->channel); |
@@ -586,9 +581,9 @@ static void efx_remove_channel(struct efx_channel *channel) | |||
586 | efx_remove_eventq(channel); | 581 | efx_remove_eventq(channel); |
587 | } | 582 | } |
588 | 583 | ||
589 | void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue, int delay) | 584 | void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue) |
590 | { | 585 | { |
591 | queue_delayed_work(refill_workqueue, &rx_queue->work, delay); | 586 | mod_timer(&rx_queue->slow_fill, jiffies + msecs_to_jiffies(100)); |
592 | } | 587 | } |
593 | 588 | ||
594 | /************************************************************************** | 589 | /************************************************************************** |
@@ -1233,15 +1228,8 @@ static void efx_start_all(struct efx_nic *efx) | |||
1233 | * since we're holding the rtnl_lock at this point. */ | 1228 | * since we're holding the rtnl_lock at this point. */ |
1234 | static void efx_flush_all(struct efx_nic *efx) | 1229 | static void efx_flush_all(struct efx_nic *efx) |
1235 | { | 1230 | { |
1236 | struct efx_rx_queue *rx_queue; | ||
1237 | |||
1238 | /* Make sure the hardware monitor is stopped */ | 1231 | /* Make sure the hardware monitor is stopped */ |
1239 | cancel_delayed_work_sync(&efx->monitor_work); | 1232 | cancel_delayed_work_sync(&efx->monitor_work); |
1240 | |||
1241 | /* Ensure that all RX slow refills are complete. */ | ||
1242 | efx_for_each_rx_queue(rx_queue, efx) | ||
1243 | cancel_delayed_work_sync(&rx_queue->work); | ||
1244 | |||
1245 | /* Stop scheduled port reconfigurations */ | 1233 | /* Stop scheduled port reconfigurations */ |
1246 | cancel_work_sync(&efx->mac_work); | 1234 | cancel_work_sync(&efx->mac_work); |
1247 | } | 1235 | } |
@@ -1530,11 +1518,8 @@ static struct net_device_stats *efx_net_stats(struct net_device *net_dev) | |||
1530 | stats->tx_window_errors = mac_stats->tx_late_collision; | 1518 | stats->tx_window_errors = mac_stats->tx_late_collision; |
1531 | 1519 | ||
1532 | stats->rx_errors = (stats->rx_length_errors + | 1520 | stats->rx_errors = (stats->rx_length_errors + |
1533 | stats->rx_over_errors + | ||
1534 | stats->rx_crc_errors + | 1521 | stats->rx_crc_errors + |
1535 | stats->rx_frame_errors + | 1522 | stats->rx_frame_errors + |
1536 | stats->rx_fifo_errors + | ||
1537 | stats->rx_missed_errors + | ||
1538 | mac_stats->rx_symbol_error); | 1523 | mac_stats->rx_symbol_error); |
1539 | stats->tx_errors = (stats->tx_window_errors + | 1524 | stats->tx_errors = (stats->tx_window_errors + |
1540 | mac_stats->tx_bad); | 1525 | mac_stats->tx_bad); |
@@ -1886,6 +1871,9 @@ static void efx_reset_work(struct work_struct *data) | |||
1886 | { | 1871 | { |
1887 | struct efx_nic *efx = container_of(data, struct efx_nic, reset_work); | 1872 | struct efx_nic *efx = container_of(data, struct efx_nic, reset_work); |
1888 | 1873 | ||
1874 | if (efx->reset_pending == RESET_TYPE_NONE) | ||
1875 | return; | ||
1876 | |||
1889 | /* If we're not RUNNING then don't reset. Leave the reset_pending | 1877 | /* If we're not RUNNING then don't reset. Leave the reset_pending |
1890 | * flag set so that efx_pci_probe_main will be retried */ | 1878 | * flag set so that efx_pci_probe_main will be retried */ |
1891 | if (efx->state != STATE_RUNNING) { | 1879 | if (efx->state != STATE_RUNNING) { |
@@ -2052,8 +2040,8 @@ static int efx_init_struct(struct efx_nic *efx, struct efx_nic_type *type, | |||
2052 | rx_queue->queue = i; | 2040 | rx_queue->queue = i; |
2053 | rx_queue->channel = &efx->channel[0]; /* for safety */ | 2041 | rx_queue->channel = &efx->channel[0]; /* for safety */ |
2054 | rx_queue->buffer = NULL; | 2042 | rx_queue->buffer = NULL; |
2055 | spin_lock_init(&rx_queue->add_lock); | 2043 | setup_timer(&rx_queue->slow_fill, efx_rx_slow_fill, |
2056 | INIT_DELAYED_WORK(&rx_queue->work, efx_rx_work); | 2044 | (unsigned long)rx_queue); |
2057 | } | 2045 | } |
2058 | 2046 | ||
2059 | efx->type = type; | 2047 | efx->type = type; |
@@ -2332,6 +2320,9 @@ static int efx_pm_thaw(struct device *dev) | |||
2332 | 2320 | ||
2333 | efx->type->resume_wol(efx); | 2321 | efx->type->resume_wol(efx); |
2334 | 2322 | ||
2323 | /* Reschedule any quenched resets scheduled during efx_pm_freeze() */ | ||
2324 | queue_work(reset_workqueue, &efx->reset_work); | ||
2325 | |||
2335 | return 0; | 2326 | return 0; |
2336 | } | 2327 | } |
2337 | 2328 | ||
@@ -2421,11 +2412,6 @@ static int __init efx_init_module(void) | |||
2421 | if (rc) | 2412 | if (rc) |
2422 | goto err_notifier; | 2413 | goto err_notifier; |
2423 | 2414 | ||
2424 | refill_workqueue = create_workqueue("sfc_refill"); | ||
2425 | if (!refill_workqueue) { | ||
2426 | rc = -ENOMEM; | ||
2427 | goto err_refill; | ||
2428 | } | ||
2429 | reset_workqueue = create_singlethread_workqueue("sfc_reset"); | 2415 | reset_workqueue = create_singlethread_workqueue("sfc_reset"); |
2430 | if (!reset_workqueue) { | 2416 | if (!reset_workqueue) { |
2431 | rc = -ENOMEM; | 2417 | rc = -ENOMEM; |
@@ -2441,8 +2427,6 @@ static int __init efx_init_module(void) | |||
2441 | err_pci: | 2427 | err_pci: |
2442 | destroy_workqueue(reset_workqueue); | 2428 | destroy_workqueue(reset_workqueue); |
2443 | err_reset: | 2429 | err_reset: |
2444 | destroy_workqueue(refill_workqueue); | ||
2445 | err_refill: | ||
2446 | unregister_netdevice_notifier(&efx_netdev_notifier); | 2430 | unregister_netdevice_notifier(&efx_netdev_notifier); |
2447 | err_notifier: | 2431 | err_notifier: |
2448 | return rc; | 2432 | return rc; |
@@ -2454,7 +2438,6 @@ static void __exit efx_exit_module(void) | |||
2454 | 2438 | ||
2455 | pci_unregister_driver(&efx_pci_driver); | 2439 | pci_unregister_driver(&efx_pci_driver); |
2456 | destroy_workqueue(reset_workqueue); | 2440 | destroy_workqueue(reset_workqueue); |
2457 | destroy_workqueue(refill_workqueue); | ||
2458 | unregister_netdevice_notifier(&efx_netdev_notifier); | 2441 | unregister_netdevice_notifier(&efx_netdev_notifier); |
2459 | 2442 | ||
2460 | } | 2443 | } |
diff --git a/drivers/net/sfc/efx.h b/drivers/net/sfc/efx.h index ffd708c5304a..e1e448887dfc 100644 --- a/drivers/net/sfc/efx.h +++ b/drivers/net/sfc/efx.h | |||
@@ -47,12 +47,12 @@ extern void efx_init_rx_queue(struct efx_rx_queue *rx_queue); | |||
47 | extern void efx_fini_rx_queue(struct efx_rx_queue *rx_queue); | 47 | extern void efx_fini_rx_queue(struct efx_rx_queue *rx_queue); |
48 | extern void efx_rx_strategy(struct efx_channel *channel); | 48 | extern void efx_rx_strategy(struct efx_channel *channel); |
49 | extern void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue); | 49 | extern void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue); |
50 | extern void efx_rx_work(struct work_struct *data); | 50 | extern void efx_rx_slow_fill(unsigned long context); |
51 | extern void __efx_rx_packet(struct efx_channel *channel, | 51 | extern void __efx_rx_packet(struct efx_channel *channel, |
52 | struct efx_rx_buffer *rx_buf, bool checksummed); | 52 | struct efx_rx_buffer *rx_buf, bool checksummed); |
53 | extern void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index, | 53 | extern void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index, |
54 | unsigned int len, bool checksummed, bool discard); | 54 | unsigned int len, bool checksummed, bool discard); |
55 | extern void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue, int delay); | 55 | extern void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue); |
56 | #define EFX_RXQ_SIZE 1024 | 56 | #define EFX_RXQ_SIZE 1024 |
57 | #define EFX_RXQ_MASK (EFX_RXQ_SIZE - 1) | 57 | #define EFX_RXQ_MASK (EFX_RXQ_SIZE - 1) |
58 | 58 | ||
diff --git a/drivers/net/sfc/falcon.c b/drivers/net/sfc/falcon.c index 655b697b45b2..8558865ff380 100644 --- a/drivers/net/sfc/falcon.c +++ b/drivers/net/sfc/falcon.c | |||
@@ -548,7 +548,9 @@ void falcon_reconfigure_mac_wrapper(struct efx_nic *efx) | |||
548 | { | 548 | { |
549 | struct efx_link_state *link_state = &efx->link_state; | 549 | struct efx_link_state *link_state = &efx->link_state; |
550 | efx_oword_t reg; | 550 | efx_oword_t reg; |
551 | int link_speed; | 551 | int link_speed, isolate; |
552 | |||
553 | isolate = (efx->reset_pending != RESET_TYPE_NONE); | ||
552 | 554 | ||
553 | switch (link_state->speed) { | 555 | switch (link_state->speed) { |
554 | case 10000: link_speed = 3; break; | 556 | case 10000: link_speed = 3; break; |
@@ -570,7 +572,7 @@ void falcon_reconfigure_mac_wrapper(struct efx_nic *efx) | |||
570 | * discarded. */ | 572 | * discarded. */ |
571 | if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) { | 573 | if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) { |
572 | EFX_SET_OWORD_FIELD(reg, FRF_BB_TXFIFO_DRAIN_EN, | 574 | EFX_SET_OWORD_FIELD(reg, FRF_BB_TXFIFO_DRAIN_EN, |
573 | !link_state->up); | 575 | !link_state->up || isolate); |
574 | } | 576 | } |
575 | 577 | ||
576 | efx_writeo(efx, ®, FR_AB_MAC_CTRL); | 578 | efx_writeo(efx, ®, FR_AB_MAC_CTRL); |
@@ -584,7 +586,7 @@ void falcon_reconfigure_mac_wrapper(struct efx_nic *efx) | |||
584 | EFX_SET_OWORD_FIELD(reg, FRF_AZ_RX_XOFF_MAC_EN, 1); | 586 | EFX_SET_OWORD_FIELD(reg, FRF_AZ_RX_XOFF_MAC_EN, 1); |
585 | /* Unisolate the MAC -> RX */ | 587 | /* Unisolate the MAC -> RX */ |
586 | if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) | 588 | if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) |
587 | EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_INGR_EN, 1); | 589 | EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_INGR_EN, !isolate); |
588 | efx_writeo(efx, ®, FR_AZ_RX_CFG); | 590 | efx_writeo(efx, ®, FR_AZ_RX_CFG); |
589 | } | 591 | } |
590 | 592 | ||
diff --git a/drivers/net/sfc/mcdi_phy.c b/drivers/net/sfc/mcdi_phy.c index 6032c0e1f1f8..86e43b1f7689 100644 --- a/drivers/net/sfc/mcdi_phy.c +++ b/drivers/net/sfc/mcdi_phy.c | |||
@@ -20,7 +20,7 @@ | |||
20 | #include "nic.h" | 20 | #include "nic.h" |
21 | #include "selftest.h" | 21 | #include "selftest.h" |
22 | 22 | ||
23 | struct efx_mcdi_phy_cfg { | 23 | struct efx_mcdi_phy_data { |
24 | u32 flags; | 24 | u32 flags; |
25 | u32 type; | 25 | u32 type; |
26 | u32 supported_cap; | 26 | u32 supported_cap; |
@@ -35,7 +35,7 @@ struct efx_mcdi_phy_cfg { | |||
35 | }; | 35 | }; |
36 | 36 | ||
37 | static int | 37 | static int |
38 | efx_mcdi_get_phy_cfg(struct efx_nic *efx, struct efx_mcdi_phy_cfg *cfg) | 38 | efx_mcdi_get_phy_cfg(struct efx_nic *efx, struct efx_mcdi_phy_data *cfg) |
39 | { | 39 | { |
40 | u8 outbuf[MC_CMD_GET_PHY_CFG_OUT_LEN]; | 40 | u8 outbuf[MC_CMD_GET_PHY_CFG_OUT_LEN]; |
41 | size_t outlen; | 41 | size_t outlen; |
@@ -259,7 +259,7 @@ static u32 ethtool_to_mcdi_cap(u32 cap) | |||
259 | 259 | ||
260 | static u32 efx_get_mcdi_phy_flags(struct efx_nic *efx) | 260 | static u32 efx_get_mcdi_phy_flags(struct efx_nic *efx) |
261 | { | 261 | { |
262 | struct efx_mcdi_phy_cfg *phy_cfg = efx->phy_data; | 262 | struct efx_mcdi_phy_data *phy_cfg = efx->phy_data; |
263 | enum efx_phy_mode mode, supported; | 263 | enum efx_phy_mode mode, supported; |
264 | u32 flags; | 264 | u32 flags; |
265 | 265 | ||
@@ -307,7 +307,7 @@ static u32 mcdi_to_ethtool_media(u32 media) | |||
307 | 307 | ||
308 | static int efx_mcdi_phy_probe(struct efx_nic *efx) | 308 | static int efx_mcdi_phy_probe(struct efx_nic *efx) |
309 | { | 309 | { |
310 | struct efx_mcdi_phy_cfg *phy_data; | 310 | struct efx_mcdi_phy_data *phy_data; |
311 | u8 outbuf[MC_CMD_GET_LINK_OUT_LEN]; | 311 | u8 outbuf[MC_CMD_GET_LINK_OUT_LEN]; |
312 | u32 caps; | 312 | u32 caps; |
313 | int rc; | 313 | int rc; |
@@ -395,6 +395,7 @@ static int efx_mcdi_phy_probe(struct efx_nic *efx) | |||
395 | efx->wanted_fc = EFX_FC_RX | EFX_FC_TX; | 395 | efx->wanted_fc = EFX_FC_RX | EFX_FC_TX; |
396 | if (phy_data->supported_cap & (1 << MC_CMD_PHY_CAP_AN_LBN)) | 396 | if (phy_data->supported_cap & (1 << MC_CMD_PHY_CAP_AN_LBN)) |
397 | efx->wanted_fc |= EFX_FC_AUTO; | 397 | efx->wanted_fc |= EFX_FC_AUTO; |
398 | efx_link_set_wanted_fc(efx, efx->wanted_fc); | ||
398 | 399 | ||
399 | return 0; | 400 | return 0; |
400 | 401 | ||
@@ -405,7 +406,7 @@ fail: | |||
405 | 406 | ||
406 | int efx_mcdi_phy_reconfigure(struct efx_nic *efx) | 407 | int efx_mcdi_phy_reconfigure(struct efx_nic *efx) |
407 | { | 408 | { |
408 | struct efx_mcdi_phy_cfg *phy_cfg = efx->phy_data; | 409 | struct efx_mcdi_phy_data *phy_cfg = efx->phy_data; |
409 | u32 caps = (efx->link_advertising ? | 410 | u32 caps = (efx->link_advertising ? |
410 | ethtool_to_mcdi_cap(efx->link_advertising) : | 411 | ethtool_to_mcdi_cap(efx->link_advertising) : |
411 | phy_cfg->forced_cap); | 412 | phy_cfg->forced_cap); |
@@ -446,7 +447,7 @@ void efx_mcdi_phy_decode_link(struct efx_nic *efx, | |||
446 | */ | 447 | */ |
447 | void efx_mcdi_phy_check_fcntl(struct efx_nic *efx, u32 lpa) | 448 | void efx_mcdi_phy_check_fcntl(struct efx_nic *efx, u32 lpa) |
448 | { | 449 | { |
449 | struct efx_mcdi_phy_cfg *phy_cfg = efx->phy_data; | 450 | struct efx_mcdi_phy_data *phy_cfg = efx->phy_data; |
450 | u32 rmtadv; | 451 | u32 rmtadv; |
451 | 452 | ||
452 | /* The link partner capabilities are only relevent if the | 453 | /* The link partner capabilities are only relevent if the |
@@ -505,7 +506,7 @@ static void efx_mcdi_phy_remove(struct efx_nic *efx) | |||
505 | 506 | ||
506 | static void efx_mcdi_phy_get_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd) | 507 | static void efx_mcdi_phy_get_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd) |
507 | { | 508 | { |
508 | struct efx_mcdi_phy_cfg *phy_cfg = efx->phy_data; | 509 | struct efx_mcdi_phy_data *phy_cfg = efx->phy_data; |
509 | u8 outbuf[MC_CMD_GET_LINK_OUT_LEN]; | 510 | u8 outbuf[MC_CMD_GET_LINK_OUT_LEN]; |
510 | int rc; | 511 | int rc; |
511 | 512 | ||
@@ -535,7 +536,7 @@ static void efx_mcdi_phy_get_settings(struct efx_nic *efx, struct ethtool_cmd *e | |||
535 | 536 | ||
536 | static int efx_mcdi_phy_set_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd) | 537 | static int efx_mcdi_phy_set_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd) |
537 | { | 538 | { |
538 | struct efx_mcdi_phy_cfg *phy_cfg = efx->phy_data; | 539 | struct efx_mcdi_phy_data *phy_cfg = efx->phy_data; |
539 | u32 caps; | 540 | u32 caps; |
540 | int rc; | 541 | int rc; |
541 | 542 | ||
@@ -674,7 +675,7 @@ out: | |||
674 | static int efx_mcdi_phy_run_tests(struct efx_nic *efx, int *results, | 675 | static int efx_mcdi_phy_run_tests(struct efx_nic *efx, int *results, |
675 | unsigned flags) | 676 | unsigned flags) |
676 | { | 677 | { |
677 | struct efx_mcdi_phy_cfg *phy_cfg = efx->phy_data; | 678 | struct efx_mcdi_phy_data *phy_cfg = efx->phy_data; |
678 | u32 mode; | 679 | u32 mode; |
679 | int rc; | 680 | int rc; |
680 | 681 | ||
@@ -712,7 +713,7 @@ static int efx_mcdi_phy_run_tests(struct efx_nic *efx, int *results, | |||
712 | 713 | ||
713 | const char *efx_mcdi_phy_test_name(struct efx_nic *efx, unsigned int index) | 714 | const char *efx_mcdi_phy_test_name(struct efx_nic *efx, unsigned int index) |
714 | { | 715 | { |
715 | struct efx_mcdi_phy_cfg *phy_cfg = efx->phy_data; | 716 | struct efx_mcdi_phy_data *phy_cfg = efx->phy_data; |
716 | 717 | ||
717 | if (phy_cfg->flags & (1 << MC_CMD_GET_PHY_CFG_BIST_LBN)) { | 718 | if (phy_cfg->flags & (1 << MC_CMD_GET_PHY_CFG_BIST_LBN)) { |
718 | if (index == 0) | 719 | if (index == 0) |
diff --git a/drivers/net/sfc/net_driver.h b/drivers/net/sfc/net_driver.h index 4762c91cb587..ba636e086fc3 100644 --- a/drivers/net/sfc/net_driver.h +++ b/drivers/net/sfc/net_driver.h | |||
@@ -18,6 +18,7 @@ | |||
18 | #include <linux/etherdevice.h> | 18 | #include <linux/etherdevice.h> |
19 | #include <linux/ethtool.h> | 19 | #include <linux/ethtool.h> |
20 | #include <linux/if_vlan.h> | 20 | #include <linux/if_vlan.h> |
21 | #include <linux/timer.h> | ||
21 | #include <linux/mdio.h> | 22 | #include <linux/mdio.h> |
22 | #include <linux/list.h> | 23 | #include <linux/list.h> |
23 | #include <linux/pci.h> | 24 | #include <linux/pci.h> |
@@ -221,7 +222,6 @@ struct efx_tx_queue { | |||
221 | * If both this and skb are %NULL, the buffer slot is currently free. | 222 | * If both this and skb are %NULL, the buffer slot is currently free. |
222 | * @data: Pointer to ethernet header | 223 | * @data: Pointer to ethernet header |
223 | * @len: Buffer length, in bytes. | 224 | * @len: Buffer length, in bytes. |
224 | * @unmap_addr: DMA address to unmap | ||
225 | */ | 225 | */ |
226 | struct efx_rx_buffer { | 226 | struct efx_rx_buffer { |
227 | dma_addr_t dma_addr; | 227 | dma_addr_t dma_addr; |
@@ -229,7 +229,24 @@ struct efx_rx_buffer { | |||
229 | struct page *page; | 229 | struct page *page; |
230 | char *data; | 230 | char *data; |
231 | unsigned int len; | 231 | unsigned int len; |
232 | dma_addr_t unmap_addr; | 232 | }; |
233 | |||
234 | /** | ||
235 | * struct efx_rx_page_state - Page-based rx buffer state | ||
236 | * | ||
237 | * Inserted at the start of every page allocated for receive buffers. | ||
238 | * Used to facilitate sharing dma mappings between recycled rx buffers | ||
239 | * and those passed up to the kernel. | ||
240 | * | ||
241 | * @refcnt: Number of struct efx_rx_buffer's referencing this page. | ||
242 | * When refcnt falls to zero, the page is unmapped for dma | ||
243 | * @dma_addr: The dma address of this page. | ||
244 | */ | ||
245 | struct efx_rx_page_state { | ||
246 | unsigned refcnt; | ||
247 | dma_addr_t dma_addr; | ||
248 | |||
249 | unsigned int __pad[0] ____cacheline_aligned; | ||
233 | }; | 250 | }; |
234 | 251 | ||
235 | /** | 252 | /** |
@@ -242,10 +259,6 @@ struct efx_rx_buffer { | |||
242 | * @added_count: Number of buffers added to the receive queue. | 259 | * @added_count: Number of buffers added to the receive queue. |
243 | * @notified_count: Number of buffers given to NIC (<= @added_count). | 260 | * @notified_count: Number of buffers given to NIC (<= @added_count). |
244 | * @removed_count: Number of buffers removed from the receive queue. | 261 | * @removed_count: Number of buffers removed from the receive queue. |
245 | * @add_lock: Receive queue descriptor add spin lock. | ||
246 | * This lock must be held in order to add buffers to the RX | ||
247 | * descriptor ring (rxd and buffer) and to update added_count (but | ||
248 | * not removed_count). | ||
249 | * @max_fill: RX descriptor maximum fill level (<= ring size) | 262 | * @max_fill: RX descriptor maximum fill level (<= ring size) |
250 | * @fast_fill_trigger: RX descriptor fill level that will trigger a fast fill | 263 | * @fast_fill_trigger: RX descriptor fill level that will trigger a fast fill |
251 | * (<= @max_fill) | 264 | * (<= @max_fill) |
@@ -259,12 +272,7 @@ struct efx_rx_buffer { | |||
259 | * overflow was observed. It should never be set. | 272 | * overflow was observed. It should never be set. |
260 | * @alloc_page_count: RX allocation strategy counter. | 273 | * @alloc_page_count: RX allocation strategy counter. |
261 | * @alloc_skb_count: RX allocation strategy counter. | 274 | * @alloc_skb_count: RX allocation strategy counter. |
262 | * @work: Descriptor push work thread | 275 | * @slow_fill: Timer used to defer efx_nic_generate_fill_event(). |
263 | * @buf_page: Page for next RX buffer. | ||
264 | * We can use a single page for multiple RX buffers. This tracks | ||
265 | * the remaining space in the allocation. | ||
266 | * @buf_dma_addr: Page's DMA address. | ||
267 | * @buf_data: Page's host address. | ||
268 | * @flushed: Use when handling queue flushing | 276 | * @flushed: Use when handling queue flushing |
269 | */ | 277 | */ |
270 | struct efx_rx_queue { | 278 | struct efx_rx_queue { |
@@ -277,7 +285,6 @@ struct efx_rx_queue { | |||
277 | int added_count; | 285 | int added_count; |
278 | int notified_count; | 286 | int notified_count; |
279 | int removed_count; | 287 | int removed_count; |
280 | spinlock_t add_lock; | ||
281 | unsigned int max_fill; | 288 | unsigned int max_fill; |
282 | unsigned int fast_fill_trigger; | 289 | unsigned int fast_fill_trigger; |
283 | unsigned int fast_fill_limit; | 290 | unsigned int fast_fill_limit; |
@@ -285,12 +292,9 @@ struct efx_rx_queue { | |||
285 | unsigned int min_overfill; | 292 | unsigned int min_overfill; |
286 | unsigned int alloc_page_count; | 293 | unsigned int alloc_page_count; |
287 | unsigned int alloc_skb_count; | 294 | unsigned int alloc_skb_count; |
288 | struct delayed_work work; | 295 | struct timer_list slow_fill; |
289 | unsigned int slow_fill_count; | 296 | unsigned int slow_fill_count; |
290 | 297 | ||
291 | struct page *buf_page; | ||
292 | dma_addr_t buf_dma_addr; | ||
293 | char *buf_data; | ||
294 | enum efx_flush_state flushed; | 298 | enum efx_flush_state flushed; |
295 | }; | 299 | }; |
296 | 300 | ||
@@ -336,7 +340,7 @@ enum efx_rx_alloc_method { | |||
336 | * @eventq: Event queue buffer | 340 | * @eventq: Event queue buffer |
337 | * @eventq_read_ptr: Event queue read pointer | 341 | * @eventq_read_ptr: Event queue read pointer |
338 | * @last_eventq_read_ptr: Last event queue read pointer value. | 342 | * @last_eventq_read_ptr: Last event queue read pointer value. |
339 | * @eventq_magic: Event queue magic value for driver-generated test events | 343 | * @magic_count: Event queue test event count |
340 | * @irq_count: Number of IRQs since last adaptive moderation decision | 344 | * @irq_count: Number of IRQs since last adaptive moderation decision |
341 | * @irq_mod_score: IRQ moderation score | 345 | * @irq_mod_score: IRQ moderation score |
342 | * @rx_alloc_level: Watermark based heuristic counter for pushing descriptors | 346 | * @rx_alloc_level: Watermark based heuristic counter for pushing descriptors |
@@ -367,7 +371,7 @@ struct efx_channel { | |||
367 | struct efx_special_buffer eventq; | 371 | struct efx_special_buffer eventq; |
368 | unsigned int eventq_read_ptr; | 372 | unsigned int eventq_read_ptr; |
369 | unsigned int last_eventq_read_ptr; | 373 | unsigned int last_eventq_read_ptr; |
370 | unsigned int eventq_magic; | 374 | unsigned int magic_count; |
371 | 375 | ||
372 | unsigned int irq_count; | 376 | unsigned int irq_count; |
373 | unsigned int irq_mod_score; | 377 | unsigned int irq_mod_score; |
@@ -645,6 +649,7 @@ union efx_multicast_hash { | |||
645 | * struct efx_nic - an Efx NIC | 649 | * struct efx_nic - an Efx NIC |
646 | * @name: Device name (net device name or bus id before net device registered) | 650 | * @name: Device name (net device name or bus id before net device registered) |
647 | * @pci_dev: The PCI device | 651 | * @pci_dev: The PCI device |
652 | * @port_num: Index of this host port within the controller | ||
648 | * @type: Controller type attributes | 653 | * @type: Controller type attributes |
649 | * @legacy_irq: IRQ number | 654 | * @legacy_irq: IRQ number |
650 | * @workqueue: Workqueue for port reconfigures and the HW monitor. | 655 | * @workqueue: Workqueue for port reconfigures and the HW monitor. |
@@ -728,6 +733,7 @@ union efx_multicast_hash { | |||
728 | struct efx_nic { | 733 | struct efx_nic { |
729 | char name[IFNAMSIZ]; | 734 | char name[IFNAMSIZ]; |
730 | struct pci_dev *pci_dev; | 735 | struct pci_dev *pci_dev; |
736 | unsigned port_num; | ||
731 | const struct efx_nic_type *type; | 737 | const struct efx_nic_type *type; |
732 | int legacy_irq; | 738 | int legacy_irq; |
733 | struct workqueue_struct *workqueue; | 739 | struct workqueue_struct *workqueue; |
diff --git a/drivers/net/sfc/nic.c b/drivers/net/sfc/nic.c index 5d3aaec58556..0ee6fd367e6f 100644 --- a/drivers/net/sfc/nic.c +++ b/drivers/net/sfc/nic.c | |||
@@ -79,6 +79,14 @@ MODULE_PARM_DESC(rx_xon_thresh_bytes, "RX fifo XON threshold"); | |||
79 | /* Depth of RX flush request fifo */ | 79 | /* Depth of RX flush request fifo */ |
80 | #define EFX_RX_FLUSH_COUNT 4 | 80 | #define EFX_RX_FLUSH_COUNT 4 |
81 | 81 | ||
82 | /* Generated event code for efx_generate_test_event() */ | ||
83 | #define EFX_CHANNEL_MAGIC_TEST(_channel) \ | ||
84 | (0x00010100 + (_channel)->channel) | ||
85 | |||
86 | /* Generated event code for efx_generate_fill_event() */ | ||
87 | #define EFX_CHANNEL_MAGIC_FILL(_channel) \ | ||
88 | (0x00010200 + (_channel)->channel) | ||
89 | |||
82 | /************************************************************************** | 90 | /************************************************************************** |
83 | * | 91 | * |
84 | * Solarstorm hardware access | 92 | * Solarstorm hardware access |
@@ -850,6 +858,26 @@ efx_handle_rx_event(struct efx_channel *channel, const efx_qword_t *event) | |||
850 | checksummed, discard); | 858 | checksummed, discard); |
851 | } | 859 | } |
852 | 860 | ||
861 | static void | ||
862 | efx_handle_generated_event(struct efx_channel *channel, efx_qword_t *event) | ||
863 | { | ||
864 | struct efx_nic *efx = channel->efx; | ||
865 | unsigned code; | ||
866 | |||
867 | code = EFX_QWORD_FIELD(*event, FSF_AZ_DRV_GEN_EV_MAGIC); | ||
868 | if (code == EFX_CHANNEL_MAGIC_TEST(channel)) | ||
869 | ++channel->magic_count; | ||
870 | else if (code == EFX_CHANNEL_MAGIC_FILL(channel)) | ||
871 | /* The queue must be empty, so we won't receive any rx | ||
872 | * events, so efx_process_channel() won't refill the | ||
873 | * queue. Refill it here */ | ||
874 | efx_fast_push_rx_descriptors(&efx->rx_queue[channel->channel]); | ||
875 | else | ||
876 | EFX_LOG(efx, "channel %d received generated " | ||
877 | "event "EFX_QWORD_FMT"\n", channel->channel, | ||
878 | EFX_QWORD_VAL(*event)); | ||
879 | } | ||
880 | |||
853 | /* Global events are basically PHY events */ | 881 | /* Global events are basically PHY events */ |
854 | static void | 882 | static void |
855 | efx_handle_global_event(struct efx_channel *channel, efx_qword_t *event) | 883 | efx_handle_global_event(struct efx_channel *channel, efx_qword_t *event) |
@@ -993,11 +1021,7 @@ int efx_nic_process_eventq(struct efx_channel *channel, int budget) | |||
993 | } | 1021 | } |
994 | break; | 1022 | break; |
995 | case FSE_AZ_EV_CODE_DRV_GEN_EV: | 1023 | case FSE_AZ_EV_CODE_DRV_GEN_EV: |
996 | channel->eventq_magic = EFX_QWORD_FIELD( | 1024 | efx_handle_generated_event(channel, &event); |
997 | event, FSF_AZ_DRV_GEN_EV_MAGIC); | ||
998 | EFX_LOG(channel->efx, "channel %d received generated " | ||
999 | "event "EFX_QWORD_FMT"\n", channel->channel, | ||
1000 | EFX_QWORD_VAL(event)); | ||
1001 | break; | 1025 | break; |
1002 | case FSE_AZ_EV_CODE_GLOBAL_EV: | 1026 | case FSE_AZ_EV_CODE_GLOBAL_EV: |
1003 | efx_handle_global_event(channel, &event); | 1027 | efx_handle_global_event(channel, &event); |
@@ -1088,12 +1112,20 @@ void efx_nic_remove_eventq(struct efx_channel *channel) | |||
1088 | } | 1112 | } |
1089 | 1113 | ||
1090 | 1114 | ||
1091 | /* Generates a test event on the event queue. A subsequent call to | 1115 | void efx_nic_generate_test_event(struct efx_channel *channel) |
1092 | * process_eventq() should pick up the event and place the value of | ||
1093 | * "magic" into channel->eventq_magic; | ||
1094 | */ | ||
1095 | void efx_nic_generate_test_event(struct efx_channel *channel, unsigned int magic) | ||
1096 | { | 1116 | { |
1117 | unsigned int magic = EFX_CHANNEL_MAGIC_TEST(channel); | ||
1118 | efx_qword_t test_event; | ||
1119 | |||
1120 | EFX_POPULATE_QWORD_2(test_event, FSF_AZ_EV_CODE, | ||
1121 | FSE_AZ_EV_CODE_DRV_GEN_EV, | ||
1122 | FSF_AZ_DRV_GEN_EV_MAGIC, magic); | ||
1123 | efx_generate_event(channel, &test_event); | ||
1124 | } | ||
1125 | |||
1126 | void efx_nic_generate_fill_event(struct efx_channel *channel) | ||
1127 | { | ||
1128 | unsigned int magic = EFX_CHANNEL_MAGIC_FILL(channel); | ||
1097 | efx_qword_t test_event; | 1129 | efx_qword_t test_event; |
1098 | 1130 | ||
1099 | EFX_POPULATE_QWORD_2(test_event, FSF_AZ_EV_CODE, | 1131 | EFX_POPULATE_QWORD_2(test_event, FSF_AZ_EV_CODE, |
@@ -1219,9 +1251,6 @@ int efx_nic_flush_queues(struct efx_nic *efx) | |||
1219 | rx_queue->flushed = FLUSH_DONE; | 1251 | rx_queue->flushed = FLUSH_DONE; |
1220 | } | 1252 | } |
1221 | 1253 | ||
1222 | if (EFX_WORKAROUND_7803(efx)) | ||
1223 | return 0; | ||
1224 | |||
1225 | return -ETIMEDOUT; | 1254 | return -ETIMEDOUT; |
1226 | } | 1255 | } |
1227 | 1256 | ||
diff --git a/drivers/net/sfc/nic.h b/drivers/net/sfc/nic.h index bbc2c0c2f843..95770e15115d 100644 --- a/drivers/net/sfc/nic.h +++ b/drivers/net/sfc/nic.h | |||
@@ -190,8 +190,8 @@ extern int efx_nic_rx_xoff_thresh, efx_nic_rx_xon_thresh; | |||
190 | /* Interrupts and test events */ | 190 | /* Interrupts and test events */ |
191 | extern int efx_nic_init_interrupt(struct efx_nic *efx); | 191 | extern int efx_nic_init_interrupt(struct efx_nic *efx); |
192 | extern void efx_nic_enable_interrupts(struct efx_nic *efx); | 192 | extern void efx_nic_enable_interrupts(struct efx_nic *efx); |
193 | extern void efx_nic_generate_test_event(struct efx_channel *channel, | 193 | extern void efx_nic_generate_test_event(struct efx_channel *channel); |
194 | unsigned int magic); | 194 | extern void efx_nic_generate_fill_event(struct efx_channel *channel); |
195 | extern void efx_nic_generate_interrupt(struct efx_nic *efx); | 195 | extern void efx_nic_generate_interrupt(struct efx_nic *efx); |
196 | extern void efx_nic_disable_interrupts(struct efx_nic *efx); | 196 | extern void efx_nic_disable_interrupts(struct efx_nic *efx); |
197 | extern void efx_nic_fini_interrupt(struct efx_nic *efx); | 197 | extern void efx_nic_fini_interrupt(struct efx_nic *efx); |
diff --git a/drivers/net/sfc/rx.c b/drivers/net/sfc/rx.c index e308818b9f55..9fb698e3519d 100644 --- a/drivers/net/sfc/rx.c +++ b/drivers/net/sfc/rx.c | |||
@@ -25,6 +25,9 @@ | |||
25 | /* Number of RX descriptors pushed at once. */ | 25 | /* Number of RX descriptors pushed at once. */ |
26 | #define EFX_RX_BATCH 8 | 26 | #define EFX_RX_BATCH 8 |
27 | 27 | ||
28 | /* Maximum size of a buffer sharing a page */ | ||
29 | #define EFX_RX_HALF_PAGE ((PAGE_SIZE >> 1) - sizeof(struct efx_rx_page_state)) | ||
30 | |||
28 | /* Size of buffer allocated for skb header area. */ | 31 | /* Size of buffer allocated for skb header area. */ |
29 | #define EFX_SKB_HEADERS 64u | 32 | #define EFX_SKB_HEADERS 64u |
30 | 33 | ||
@@ -98,155 +101,138 @@ static inline unsigned int efx_rx_buf_size(struct efx_nic *efx) | |||
98 | return PAGE_SIZE << efx->rx_buffer_order; | 101 | return PAGE_SIZE << efx->rx_buffer_order; |
99 | } | 102 | } |
100 | 103 | ||
101 | |||
102 | /** | 104 | /** |
103 | * efx_init_rx_buffer_skb - create new RX buffer using skb-based allocation | 105 | * efx_init_rx_buffers_skb - create EFX_RX_BATCH skb-based RX buffers |
104 | * | 106 | * |
105 | * @rx_queue: Efx RX queue | 107 | * @rx_queue: Efx RX queue |
106 | * @rx_buf: RX buffer structure to populate | ||
107 | * | 108 | * |
108 | * This allocates memory for a new receive buffer, maps it for DMA, | 109 | * This allocates EFX_RX_BATCH skbs, maps them for DMA, and populates a |
109 | * and populates a struct efx_rx_buffer with the relevant | 110 | * struct efx_rx_buffer for each one. Return a negative error code or 0 |
110 | * information. Return a negative error code or 0 on success. | 111 | * on success. May fail having only inserted fewer than EFX_RX_BATCH |
112 | * buffers. | ||
111 | */ | 113 | */ |
112 | static int efx_init_rx_buffer_skb(struct efx_rx_queue *rx_queue, | 114 | static int efx_init_rx_buffers_skb(struct efx_rx_queue *rx_queue) |
113 | struct efx_rx_buffer *rx_buf) | ||
114 | { | 115 | { |
115 | struct efx_nic *efx = rx_queue->efx; | 116 | struct efx_nic *efx = rx_queue->efx; |
116 | struct net_device *net_dev = efx->net_dev; | 117 | struct net_device *net_dev = efx->net_dev; |
118 | struct efx_rx_buffer *rx_buf; | ||
117 | int skb_len = efx->rx_buffer_len; | 119 | int skb_len = efx->rx_buffer_len; |
120 | unsigned index, count; | ||
118 | 121 | ||
119 | rx_buf->skb = netdev_alloc_skb(net_dev, skb_len); | 122 | for (count = 0; count < EFX_RX_BATCH; ++count) { |
120 | if (unlikely(!rx_buf->skb)) | 123 | index = rx_queue->added_count & EFX_RXQ_MASK; |
121 | return -ENOMEM; | 124 | rx_buf = efx_rx_buffer(rx_queue, index); |
122 | 125 | ||
123 | /* Adjust the SKB for padding and checksum */ | 126 | rx_buf->skb = netdev_alloc_skb(net_dev, skb_len); |
124 | skb_reserve(rx_buf->skb, NET_IP_ALIGN); | 127 | if (unlikely(!rx_buf->skb)) |
125 | rx_buf->len = skb_len - NET_IP_ALIGN; | 128 | return -ENOMEM; |
126 | rx_buf->data = (char *)rx_buf->skb->data; | 129 | rx_buf->page = NULL; |
127 | rx_buf->skb->ip_summed = CHECKSUM_UNNECESSARY; | ||
128 | 130 | ||
129 | rx_buf->dma_addr = pci_map_single(efx->pci_dev, | 131 | /* Adjust the SKB for padding and checksum */ |
130 | rx_buf->data, rx_buf->len, | 132 | skb_reserve(rx_buf->skb, NET_IP_ALIGN); |
131 | PCI_DMA_FROMDEVICE); | 133 | rx_buf->len = skb_len - NET_IP_ALIGN; |
134 | rx_buf->data = (char *)rx_buf->skb->data; | ||
135 | rx_buf->skb->ip_summed = CHECKSUM_UNNECESSARY; | ||
136 | |||
137 | rx_buf->dma_addr = pci_map_single(efx->pci_dev, | ||
138 | rx_buf->data, rx_buf->len, | ||
139 | PCI_DMA_FROMDEVICE); | ||
140 | if (unlikely(pci_dma_mapping_error(efx->pci_dev, | ||
141 | rx_buf->dma_addr))) { | ||
142 | dev_kfree_skb_any(rx_buf->skb); | ||
143 | rx_buf->skb = NULL; | ||
144 | return -EIO; | ||
145 | } | ||
132 | 146 | ||
133 | if (unlikely(pci_dma_mapping_error(efx->pci_dev, rx_buf->dma_addr))) { | 147 | ++rx_queue->added_count; |
134 | dev_kfree_skb_any(rx_buf->skb); | 148 | ++rx_queue->alloc_skb_count; |
135 | rx_buf->skb = NULL; | ||
136 | return -EIO; | ||
137 | } | 149 | } |
138 | 150 | ||
139 | return 0; | 151 | return 0; |
140 | } | 152 | } |
141 | 153 | ||
142 | /** | 154 | /** |
143 | * efx_init_rx_buffer_page - create new RX buffer using page-based allocation | 155 | * efx_init_rx_buffers_page - create EFX_RX_BATCH page-based RX buffers |
144 | * | 156 | * |
145 | * @rx_queue: Efx RX queue | 157 | * @rx_queue: Efx RX queue |
146 | * @rx_buf: RX buffer structure to populate | ||
147 | * | 158 | * |
148 | * This allocates memory for a new receive buffer, maps it for DMA, | 159 | * This allocates memory for EFX_RX_BATCH receive buffers, maps them for DMA, |
149 | * and populates a struct efx_rx_buffer with the relevant | 160 | * and populates struct efx_rx_buffers for each one. Return a negative error |
150 | * information. Return a negative error code or 0 on success. | 161 | * code or 0 on success. If a single page can be split between two buffers, |
162 | * then the page will either be inserted fully, or not at at all. | ||
151 | */ | 163 | */ |
152 | static int efx_init_rx_buffer_page(struct efx_rx_queue *rx_queue, | 164 | static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue) |
153 | struct efx_rx_buffer *rx_buf) | ||
154 | { | 165 | { |
155 | struct efx_nic *efx = rx_queue->efx; | 166 | struct efx_nic *efx = rx_queue->efx; |
156 | int bytes, space, offset; | 167 | struct efx_rx_buffer *rx_buf; |
157 | 168 | struct page *page; | |
158 | bytes = efx->rx_buffer_len - EFX_PAGE_IP_ALIGN; | 169 | void *page_addr; |
159 | 170 | struct efx_rx_page_state *state; | |
160 | /* If there is space left in the previously allocated page, | 171 | dma_addr_t dma_addr; |
161 | * then use it. Otherwise allocate a new one */ | 172 | unsigned index, count; |
162 | rx_buf->page = rx_queue->buf_page; | 173 | |
163 | if (rx_buf->page == NULL) { | 174 | /* We can split a page between two buffers */ |
164 | dma_addr_t dma_addr; | 175 | BUILD_BUG_ON(EFX_RX_BATCH & 1); |
165 | 176 | ||
166 | rx_buf->page = alloc_pages(__GFP_COLD | __GFP_COMP | GFP_ATOMIC, | 177 | for (count = 0; count < EFX_RX_BATCH; ++count) { |
167 | efx->rx_buffer_order); | 178 | page = alloc_pages(__GFP_COLD | __GFP_COMP | GFP_ATOMIC, |
168 | if (unlikely(rx_buf->page == NULL)) | 179 | efx->rx_buffer_order); |
180 | if (unlikely(page == NULL)) | ||
169 | return -ENOMEM; | 181 | return -ENOMEM; |
170 | 182 | dma_addr = pci_map_page(efx->pci_dev, page, 0, | |
171 | dma_addr = pci_map_page(efx->pci_dev, rx_buf->page, | 183 | efx_rx_buf_size(efx), |
172 | 0, efx_rx_buf_size(efx), | ||
173 | PCI_DMA_FROMDEVICE); | 184 | PCI_DMA_FROMDEVICE); |
174 | |||
175 | if (unlikely(pci_dma_mapping_error(efx->pci_dev, dma_addr))) { | 185 | if (unlikely(pci_dma_mapping_error(efx->pci_dev, dma_addr))) { |
176 | __free_pages(rx_buf->page, efx->rx_buffer_order); | 186 | __free_pages(page, efx->rx_buffer_order); |
177 | rx_buf->page = NULL; | ||
178 | return -EIO; | 187 | return -EIO; |
179 | } | 188 | } |
180 | 189 | page_addr = page_address(page); | |
181 | rx_queue->buf_page = rx_buf->page; | 190 | state = page_addr; |
182 | rx_queue->buf_dma_addr = dma_addr; | 191 | state->refcnt = 0; |
183 | rx_queue->buf_data = (page_address(rx_buf->page) + | 192 | state->dma_addr = dma_addr; |
184 | EFX_PAGE_IP_ALIGN); | 193 | |
185 | } | 194 | page_addr += sizeof(struct efx_rx_page_state); |
186 | 195 | dma_addr += sizeof(struct efx_rx_page_state); | |
187 | rx_buf->len = bytes; | 196 | |
188 | rx_buf->data = rx_queue->buf_data; | 197 | split: |
189 | offset = efx_rx_buf_offset(rx_buf); | 198 | index = rx_queue->added_count & EFX_RXQ_MASK; |
190 | rx_buf->dma_addr = rx_queue->buf_dma_addr + offset; | 199 | rx_buf = efx_rx_buffer(rx_queue, index); |
191 | 200 | rx_buf->dma_addr = dma_addr + EFX_PAGE_IP_ALIGN; | |
192 | /* Try to pack multiple buffers per page */ | 201 | rx_buf->skb = NULL; |
193 | if (efx->rx_buffer_order == 0) { | 202 | rx_buf->page = page; |
194 | /* The next buffer starts on the next 512 byte boundary */ | 203 | rx_buf->data = page_addr + EFX_PAGE_IP_ALIGN; |
195 | rx_queue->buf_data += ((bytes + 0x1ff) & ~0x1ff); | 204 | rx_buf->len = efx->rx_buffer_len - EFX_PAGE_IP_ALIGN; |
196 | offset += ((bytes + 0x1ff) & ~0x1ff); | 205 | ++rx_queue->added_count; |
197 | 206 | ++rx_queue->alloc_page_count; | |
198 | space = efx_rx_buf_size(efx) - offset; | 207 | ++state->refcnt; |
199 | if (space >= bytes) { | 208 | |
200 | /* Refs dropped on kernel releasing each skb */ | 209 | if ((~count & 1) && (efx->rx_buffer_len <= EFX_RX_HALF_PAGE)) { |
201 | get_page(rx_queue->buf_page); | 210 | /* Use the second half of the page */ |
202 | goto out; | 211 | get_page(page); |
212 | dma_addr += (PAGE_SIZE >> 1); | ||
213 | page_addr += (PAGE_SIZE >> 1); | ||
214 | ++count; | ||
215 | goto split; | ||
203 | } | 216 | } |
204 | } | 217 | } |
205 | 218 | ||
206 | /* This is the final RX buffer for this page, so mark it for | ||
207 | * unmapping */ | ||
208 | rx_queue->buf_page = NULL; | ||
209 | rx_buf->unmap_addr = rx_queue->buf_dma_addr; | ||
210 | |||
211 | out: | ||
212 | return 0; | 219 | return 0; |
213 | } | 220 | } |
214 | 221 | ||
215 | /* This allocates memory for a new receive buffer, maps it for DMA, | ||
216 | * and populates a struct efx_rx_buffer with the relevant | ||
217 | * information. | ||
218 | */ | ||
219 | static int efx_init_rx_buffer(struct efx_rx_queue *rx_queue, | ||
220 | struct efx_rx_buffer *new_rx_buf) | ||
221 | { | ||
222 | int rc = 0; | ||
223 | |||
224 | if (rx_queue->channel->rx_alloc_push_pages) { | ||
225 | new_rx_buf->skb = NULL; | ||
226 | rc = efx_init_rx_buffer_page(rx_queue, new_rx_buf); | ||
227 | rx_queue->alloc_page_count++; | ||
228 | } else { | ||
229 | new_rx_buf->page = NULL; | ||
230 | rc = efx_init_rx_buffer_skb(rx_queue, new_rx_buf); | ||
231 | rx_queue->alloc_skb_count++; | ||
232 | } | ||
233 | |||
234 | if (unlikely(rc < 0)) | ||
235 | EFX_LOG_RL(rx_queue->efx, "%s RXQ[%d] =%d\n", __func__, | ||
236 | rx_queue->queue, rc); | ||
237 | return rc; | ||
238 | } | ||
239 | |||
240 | static void efx_unmap_rx_buffer(struct efx_nic *efx, | 222 | static void efx_unmap_rx_buffer(struct efx_nic *efx, |
241 | struct efx_rx_buffer *rx_buf) | 223 | struct efx_rx_buffer *rx_buf) |
242 | { | 224 | { |
243 | if (rx_buf->page) { | 225 | if (rx_buf->page) { |
226 | struct efx_rx_page_state *state; | ||
227 | |||
244 | EFX_BUG_ON_PARANOID(rx_buf->skb); | 228 | EFX_BUG_ON_PARANOID(rx_buf->skb); |
245 | if (rx_buf->unmap_addr) { | 229 | |
246 | pci_unmap_page(efx->pci_dev, rx_buf->unmap_addr, | 230 | state = page_address(rx_buf->page); |
231 | if (--state->refcnt == 0) { | ||
232 | pci_unmap_page(efx->pci_dev, | ||
233 | state->dma_addr, | ||
247 | efx_rx_buf_size(efx), | 234 | efx_rx_buf_size(efx), |
248 | PCI_DMA_FROMDEVICE); | 235 | PCI_DMA_FROMDEVICE); |
249 | rx_buf->unmap_addr = 0; | ||
250 | } | 236 | } |
251 | } else if (likely(rx_buf->skb)) { | 237 | } else if (likely(rx_buf->skb)) { |
252 | pci_unmap_single(efx->pci_dev, rx_buf->dma_addr, | 238 | pci_unmap_single(efx->pci_dev, rx_buf->dma_addr, |
@@ -273,31 +259,84 @@ static void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue, | |||
273 | efx_free_rx_buffer(rx_queue->efx, rx_buf); | 259 | efx_free_rx_buffer(rx_queue->efx, rx_buf); |
274 | } | 260 | } |
275 | 261 | ||
262 | /* Attempt to resurrect the other receive buffer that used to share this page, | ||
263 | * which had previously been passed up to the kernel and freed. */ | ||
264 | static void efx_resurrect_rx_buffer(struct efx_rx_queue *rx_queue, | ||
265 | struct efx_rx_buffer *rx_buf) | ||
266 | { | ||
267 | struct efx_rx_page_state *state = page_address(rx_buf->page); | ||
268 | struct efx_rx_buffer *new_buf; | ||
269 | unsigned fill_level, index; | ||
270 | |||
271 | /* +1 because efx_rx_packet() incremented removed_count. +1 because | ||
272 | * we'd like to insert an additional descriptor whilst leaving | ||
273 | * EFX_RXD_HEAD_ROOM for the non-recycle path */ | ||
274 | fill_level = (rx_queue->added_count - rx_queue->removed_count + 2); | ||
275 | if (unlikely(fill_level >= EFX_RXQ_SIZE - EFX_RXD_HEAD_ROOM)) { | ||
276 | /* We could place "state" on a list, and drain the list in | ||
277 | * efx_fast_push_rx_descriptors(). For now, this will do. */ | ||
278 | return; | ||
279 | } | ||
280 | |||
281 | ++state->refcnt; | ||
282 | get_page(rx_buf->page); | ||
283 | |||
284 | index = rx_queue->added_count & EFX_RXQ_MASK; | ||
285 | new_buf = efx_rx_buffer(rx_queue, index); | ||
286 | new_buf->dma_addr = rx_buf->dma_addr ^ (PAGE_SIZE >> 1); | ||
287 | new_buf->skb = NULL; | ||
288 | new_buf->page = rx_buf->page; | ||
289 | new_buf->data = (void *) | ||
290 | ((__force unsigned long)rx_buf->data ^ (PAGE_SIZE >> 1)); | ||
291 | new_buf->len = rx_buf->len; | ||
292 | ++rx_queue->added_count; | ||
293 | } | ||
294 | |||
295 | /* Recycle the given rx buffer directly back into the rx_queue. There is | ||
296 | * always room to add this buffer, because we've just popped a buffer. */ | ||
297 | static void efx_recycle_rx_buffer(struct efx_channel *channel, | ||
298 | struct efx_rx_buffer *rx_buf) | ||
299 | { | ||
300 | struct efx_nic *efx = channel->efx; | ||
301 | struct efx_rx_queue *rx_queue = &efx->rx_queue[channel->channel]; | ||
302 | struct efx_rx_buffer *new_buf; | ||
303 | unsigned index; | ||
304 | |||
305 | if (rx_buf->page != NULL && efx->rx_buffer_len <= EFX_RX_HALF_PAGE && | ||
306 | page_count(rx_buf->page) == 1) | ||
307 | efx_resurrect_rx_buffer(rx_queue, rx_buf); | ||
308 | |||
309 | index = rx_queue->added_count & EFX_RXQ_MASK; | ||
310 | new_buf = efx_rx_buffer(rx_queue, index); | ||
311 | |||
312 | memcpy(new_buf, rx_buf, sizeof(*new_buf)); | ||
313 | rx_buf->page = NULL; | ||
314 | rx_buf->skb = NULL; | ||
315 | ++rx_queue->added_count; | ||
316 | } | ||
317 | |||
276 | /** | 318 | /** |
277 | * efx_fast_push_rx_descriptors - push new RX descriptors quickly | 319 | * efx_fast_push_rx_descriptors - push new RX descriptors quickly |
278 | * @rx_queue: RX descriptor queue | 320 | * @rx_queue: RX descriptor queue |
279 | * @retry: Recheck the fill level | ||
280 | * This will aim to fill the RX descriptor queue up to | 321 | * This will aim to fill the RX descriptor queue up to |
281 | * @rx_queue->@fast_fill_limit. If there is insufficient atomic | 322 | * @rx_queue->@fast_fill_limit. If there is insufficient atomic |
282 | * memory to do so, the caller should retry. | 323 | * memory to do so, a slow fill will be scheduled. |
324 | * | ||
325 | * The caller must provide serialisation (none is used here). In practise, | ||
326 | * this means this function must run from the NAPI handler, or be called | ||
327 | * when NAPI is disabled. | ||
283 | */ | 328 | */ |
284 | static int __efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue, | 329 | void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue) |
285 | int retry) | ||
286 | { | 330 | { |
287 | struct efx_rx_buffer *rx_buf; | 331 | struct efx_channel *channel = rx_queue->channel; |
288 | unsigned fill_level, index; | 332 | unsigned fill_level; |
289 | int i, space, rc = 0; | 333 | int space, rc = 0; |
290 | 334 | ||
291 | /* Calculate current fill level. Do this outside the lock, | 335 | /* Calculate current fill level, and exit if we don't need to fill */ |
292 | * because most of the time we'll end up not wanting to do the | ||
293 | * fill anyway. | ||
294 | */ | ||
295 | fill_level = (rx_queue->added_count - rx_queue->removed_count); | 336 | fill_level = (rx_queue->added_count - rx_queue->removed_count); |
296 | EFX_BUG_ON_PARANOID(fill_level > EFX_RXQ_SIZE); | 337 | EFX_BUG_ON_PARANOID(fill_level > EFX_RXQ_SIZE); |
297 | |||
298 | /* Don't fill if we don't need to */ | ||
299 | if (fill_level >= rx_queue->fast_fill_trigger) | 338 | if (fill_level >= rx_queue->fast_fill_trigger) |
300 | return 0; | 339 | goto out; |
301 | 340 | ||
302 | /* Record minimum fill level */ | 341 | /* Record minimum fill level */ |
303 | if (unlikely(fill_level < rx_queue->min_fill)) { | 342 | if (unlikely(fill_level < rx_queue->min_fill)) { |
@@ -305,34 +344,25 @@ static int __efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue, | |||
305 | rx_queue->min_fill = fill_level; | 344 | rx_queue->min_fill = fill_level; |
306 | } | 345 | } |
307 | 346 | ||
308 | /* Acquire RX add lock. If this lock is contended, then a fast | ||
309 | * fill must already be in progress (e.g. in the refill | ||
310 | * tasklet), so we don't need to do anything | ||
311 | */ | ||
312 | if (!spin_trylock_bh(&rx_queue->add_lock)) | ||
313 | return -1; | ||
314 | |||
315 | retry: | ||
316 | /* Recalculate current fill level now that we have the lock */ | ||
317 | fill_level = (rx_queue->added_count - rx_queue->removed_count); | ||
318 | EFX_BUG_ON_PARANOID(fill_level > EFX_RXQ_SIZE); | ||
319 | space = rx_queue->fast_fill_limit - fill_level; | 347 | space = rx_queue->fast_fill_limit - fill_level; |
320 | if (space < EFX_RX_BATCH) | 348 | if (space < EFX_RX_BATCH) |
321 | goto out_unlock; | 349 | goto out; |
322 | 350 | ||
323 | EFX_TRACE(rx_queue->efx, "RX queue %d fast-filling descriptor ring from" | 351 | EFX_TRACE(rx_queue->efx, "RX queue %d fast-filling descriptor ring from" |
324 | " level %d to level %d using %s allocation\n", | 352 | " level %d to level %d using %s allocation\n", |
325 | rx_queue->queue, fill_level, rx_queue->fast_fill_limit, | 353 | rx_queue->queue, fill_level, rx_queue->fast_fill_limit, |
326 | rx_queue->channel->rx_alloc_push_pages ? "page" : "skb"); | 354 | channel->rx_alloc_push_pages ? "page" : "skb"); |
327 | 355 | ||
328 | do { | 356 | do { |
329 | for (i = 0; i < EFX_RX_BATCH; ++i) { | 357 | if (channel->rx_alloc_push_pages) |
330 | index = rx_queue->added_count & EFX_RXQ_MASK; | 358 | rc = efx_init_rx_buffers_page(rx_queue); |
331 | rx_buf = efx_rx_buffer(rx_queue, index); | 359 | else |
332 | rc = efx_init_rx_buffer(rx_queue, rx_buf); | 360 | rc = efx_init_rx_buffers_skb(rx_queue); |
333 | if (unlikely(rc)) | 361 | if (unlikely(rc)) { |
334 | goto out; | 362 | /* Ensure that we don't leave the rx queue empty */ |
335 | ++rx_queue->added_count; | 363 | if (rx_queue->added_count == rx_queue->removed_count) |
364 | efx_schedule_slow_fill(rx_queue); | ||
365 | goto out; | ||
336 | } | 366 | } |
337 | } while ((space -= EFX_RX_BATCH) >= EFX_RX_BATCH); | 367 | } while ((space -= EFX_RX_BATCH) >= EFX_RX_BATCH); |
338 | 368 | ||
@@ -341,63 +371,18 @@ static int __efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue, | |||
341 | rx_queue->added_count - rx_queue->removed_count); | 371 | rx_queue->added_count - rx_queue->removed_count); |
342 | 372 | ||
343 | out: | 373 | out: |
344 | /* Send write pointer to card. */ | 374 | if (rx_queue->notified_count != rx_queue->added_count) |
345 | efx_nic_notify_rx_desc(rx_queue); | 375 | efx_nic_notify_rx_desc(rx_queue); |
346 | |||
347 | /* If the fast fill is running inside from the refill tasklet, then | ||
348 | * for SMP systems it may be running on a different CPU to | ||
349 | * RX event processing, which means that the fill level may now be | ||
350 | * out of date. */ | ||
351 | if (unlikely(retry && (rc == 0))) | ||
352 | goto retry; | ||
353 | |||
354 | out_unlock: | ||
355 | spin_unlock_bh(&rx_queue->add_lock); | ||
356 | |||
357 | return rc; | ||
358 | } | ||
359 | |||
360 | /** | ||
361 | * efx_fast_push_rx_descriptors - push new RX descriptors quickly | ||
362 | * @rx_queue: RX descriptor queue | ||
363 | * | ||
364 | * This will aim to fill the RX descriptor queue up to | ||
365 | * @rx_queue->@fast_fill_limit. If there is insufficient memory to do so, | ||
366 | * it will schedule a work item to immediately continue the fast fill | ||
367 | */ | ||
368 | void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue) | ||
369 | { | ||
370 | int rc; | ||
371 | |||
372 | rc = __efx_fast_push_rx_descriptors(rx_queue, 0); | ||
373 | if (unlikely(rc)) { | ||
374 | /* Schedule the work item to run immediately. The hope is | ||
375 | * that work is immediately pending to free some memory | ||
376 | * (e.g. an RX event or TX completion) | ||
377 | */ | ||
378 | efx_schedule_slow_fill(rx_queue, 0); | ||
379 | } | ||
380 | } | 376 | } |
381 | 377 | ||
382 | void efx_rx_work(struct work_struct *data) | 378 | void efx_rx_slow_fill(unsigned long context) |
383 | { | 379 | { |
384 | struct efx_rx_queue *rx_queue; | 380 | struct efx_rx_queue *rx_queue = (struct efx_rx_queue *)context; |
385 | int rc; | 381 | struct efx_channel *channel = rx_queue->channel; |
386 | |||
387 | rx_queue = container_of(data, struct efx_rx_queue, work.work); | ||
388 | |||
389 | if (unlikely(!rx_queue->channel->enabled)) | ||
390 | return; | ||
391 | |||
392 | EFX_TRACE(rx_queue->efx, "RX queue %d worker thread executing on CPU " | ||
393 | "%d\n", rx_queue->queue, raw_smp_processor_id()); | ||
394 | 382 | ||
383 | /* Post an event to cause NAPI to run and refill the queue */ | ||
384 | efx_nic_generate_fill_event(channel); | ||
395 | ++rx_queue->slow_fill_count; | 385 | ++rx_queue->slow_fill_count; |
396 | /* Push new RX descriptors, allowing at least 1 jiffy for | ||
397 | * the kernel to free some more memory. */ | ||
398 | rc = __efx_fast_push_rx_descriptors(rx_queue, 1); | ||
399 | if (rc) | ||
400 | efx_schedule_slow_fill(rx_queue, 1); | ||
401 | } | 386 | } |
402 | 387 | ||
403 | static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue, | 388 | static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue, |
@@ -498,6 +483,7 @@ void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index, | |||
498 | unsigned int len, bool checksummed, bool discard) | 483 | unsigned int len, bool checksummed, bool discard) |
499 | { | 484 | { |
500 | struct efx_nic *efx = rx_queue->efx; | 485 | struct efx_nic *efx = rx_queue->efx; |
486 | struct efx_channel *channel = rx_queue->channel; | ||
501 | struct efx_rx_buffer *rx_buf; | 487 | struct efx_rx_buffer *rx_buf; |
502 | bool leak_packet = false; | 488 | bool leak_packet = false; |
503 | 489 | ||
@@ -525,12 +511,13 @@ void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index, | |||
525 | /* Discard packet, if instructed to do so */ | 511 | /* Discard packet, if instructed to do so */ |
526 | if (unlikely(discard)) { | 512 | if (unlikely(discard)) { |
527 | if (unlikely(leak_packet)) | 513 | if (unlikely(leak_packet)) |
528 | rx_queue->channel->n_skbuff_leaks++; | 514 | channel->n_skbuff_leaks++; |
529 | else | 515 | else |
530 | /* We haven't called efx_unmap_rx_buffer yet, | 516 | efx_recycle_rx_buffer(channel, rx_buf); |
531 | * so fini the entire rx_buffer here */ | 517 | |
532 | efx_fini_rx_buffer(rx_queue, rx_buf); | 518 | /* Don't hold off the previous receive */ |
533 | return; | 519 | rx_buf = NULL; |
520 | goto out; | ||
534 | } | 521 | } |
535 | 522 | ||
536 | /* Release card resources - assumes all RX buffers consumed in-order | 523 | /* Release card resources - assumes all RX buffers consumed in-order |
@@ -547,6 +534,7 @@ void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index, | |||
547 | * prefetched into cache. | 534 | * prefetched into cache. |
548 | */ | 535 | */ |
549 | rx_buf->len = len; | 536 | rx_buf->len = len; |
537 | out: | ||
550 | if (rx_queue->channel->rx_pkt) | 538 | if (rx_queue->channel->rx_pkt) |
551 | __efx_rx_packet(rx_queue->channel, | 539 | __efx_rx_packet(rx_queue->channel, |
552 | rx_queue->channel->rx_pkt, | 540 | rx_queue->channel->rx_pkt, |
@@ -682,6 +670,7 @@ void efx_fini_rx_queue(struct efx_rx_queue *rx_queue) | |||
682 | 670 | ||
683 | EFX_LOG(rx_queue->efx, "shutting down RX queue %d\n", rx_queue->queue); | 671 | EFX_LOG(rx_queue->efx, "shutting down RX queue %d\n", rx_queue->queue); |
684 | 672 | ||
673 | del_timer_sync(&rx_queue->slow_fill); | ||
685 | efx_nic_fini_rx(rx_queue); | 674 | efx_nic_fini_rx(rx_queue); |
686 | 675 | ||
687 | /* Release RX buffers NB start at index 0 not current HW ptr */ | 676 | /* Release RX buffers NB start at index 0 not current HW ptr */ |
@@ -691,16 +680,6 @@ void efx_fini_rx_queue(struct efx_rx_queue *rx_queue) | |||
691 | efx_fini_rx_buffer(rx_queue, rx_buf); | 680 | efx_fini_rx_buffer(rx_queue, rx_buf); |
692 | } | 681 | } |
693 | } | 682 | } |
694 | |||
695 | /* For a page that is part-way through splitting into RX buffers */ | ||
696 | if (rx_queue->buf_page != NULL) { | ||
697 | pci_unmap_page(rx_queue->efx->pci_dev, rx_queue->buf_dma_addr, | ||
698 | efx_rx_buf_size(rx_queue->efx), | ||
699 | PCI_DMA_FROMDEVICE); | ||
700 | __free_pages(rx_queue->buf_page, | ||
701 | rx_queue->efx->rx_buffer_order); | ||
702 | rx_queue->buf_page = NULL; | ||
703 | } | ||
704 | } | 683 | } |
705 | 684 | ||
706 | void efx_remove_rx_queue(struct efx_rx_queue *rx_queue) | 685 | void efx_remove_rx_queue(struct efx_rx_queue *rx_queue) |
diff --git a/drivers/net/sfc/selftest.c b/drivers/net/sfc/selftest.c index 371e86cc090f..1f83404af63b 100644 --- a/drivers/net/sfc/selftest.c +++ b/drivers/net/sfc/selftest.c | |||
@@ -38,7 +38,7 @@ struct efx_loopback_payload { | |||
38 | struct udphdr udp; | 38 | struct udphdr udp; |
39 | __be16 iteration; | 39 | __be16 iteration; |
40 | const char msg[64]; | 40 | const char msg[64]; |
41 | } __attribute__ ((packed)); | 41 | } __packed; |
42 | 42 | ||
43 | /* Loopback test source MAC address */ | 43 | /* Loopback test source MAC address */ |
44 | static const unsigned char payload_source[ETH_ALEN] = { | 44 | static const unsigned char payload_source[ETH_ALEN] = { |
@@ -161,23 +161,17 @@ static int efx_test_interrupts(struct efx_nic *efx, | |||
161 | static int efx_test_eventq_irq(struct efx_channel *channel, | 161 | static int efx_test_eventq_irq(struct efx_channel *channel, |
162 | struct efx_self_tests *tests) | 162 | struct efx_self_tests *tests) |
163 | { | 163 | { |
164 | unsigned int magic, count; | 164 | unsigned int magic_count, count; |
165 | |||
166 | /* Channel specific code, limited to 20 bits */ | ||
167 | magic = (0x00010150 + channel->channel); | ||
168 | EFX_LOG(channel->efx, "channel %d testing event queue with code %x\n", | ||
169 | channel->channel, magic); | ||
170 | 165 | ||
171 | tests->eventq_dma[channel->channel] = -1; | 166 | tests->eventq_dma[channel->channel] = -1; |
172 | tests->eventq_int[channel->channel] = -1; | 167 | tests->eventq_int[channel->channel] = -1; |
173 | tests->eventq_poll[channel->channel] = -1; | 168 | tests->eventq_poll[channel->channel] = -1; |
174 | 169 | ||
175 | /* Reset flag and zero magic word */ | 170 | magic_count = channel->magic_count; |
176 | channel->efx->last_irq_cpu = -1; | 171 | channel->efx->last_irq_cpu = -1; |
177 | channel->eventq_magic = 0; | ||
178 | smp_wmb(); | 172 | smp_wmb(); |
179 | 173 | ||
180 | efx_nic_generate_test_event(channel, magic); | 174 | efx_nic_generate_test_event(channel); |
181 | 175 | ||
182 | /* Wait for arrival of interrupt */ | 176 | /* Wait for arrival of interrupt */ |
183 | count = 0; | 177 | count = 0; |
@@ -187,7 +181,7 @@ static int efx_test_eventq_irq(struct efx_channel *channel, | |||
187 | if (channel->work_pending) | 181 | if (channel->work_pending) |
188 | efx_process_channel_now(channel); | 182 | efx_process_channel_now(channel); |
189 | 183 | ||
190 | if (channel->eventq_magic == magic) | 184 | if (channel->magic_count != magic_count) |
191 | goto eventq_ok; | 185 | goto eventq_ok; |
192 | } while (++count < 2); | 186 | } while (++count < 2); |
193 | 187 | ||
@@ -204,7 +198,7 @@ static int efx_test_eventq_irq(struct efx_channel *channel, | |||
204 | 198 | ||
205 | /* Check to see if event was received even if interrupt wasn't */ | 199 | /* Check to see if event was received even if interrupt wasn't */ |
206 | efx_process_channel_now(channel); | 200 | efx_process_channel_now(channel); |
207 | if (channel->eventq_magic == magic) { | 201 | if (channel->magic_count != magic_count) { |
208 | EFX_ERR(channel->efx, "channel %d event was generated, but " | 202 | EFX_ERR(channel->efx, "channel %d event was generated, but " |
209 | "failed to trigger an interrupt\n", channel->channel); | 203 | "failed to trigger an interrupt\n", channel->channel); |
210 | tests->eventq_dma[channel->channel] = 1; | 204 | tests->eventq_dma[channel->channel] = 1; |
@@ -545,7 +539,7 @@ efx_test_loopback(struct efx_tx_queue *tx_queue, | |||
545 | static int efx_wait_for_link(struct efx_nic *efx) | 539 | static int efx_wait_for_link(struct efx_nic *efx) |
546 | { | 540 | { |
547 | struct efx_link_state *link_state = &efx->link_state; | 541 | struct efx_link_state *link_state = &efx->link_state; |
548 | int count; | 542 | int count, link_up_count = 0; |
549 | bool link_up; | 543 | bool link_up; |
550 | 544 | ||
551 | for (count = 0; count < 40; count++) { | 545 | for (count = 0; count < 40; count++) { |
@@ -567,8 +561,12 @@ static int efx_wait_for_link(struct efx_nic *efx) | |||
567 | link_up = !efx->mac_op->check_fault(efx); | 561 | link_up = !efx->mac_op->check_fault(efx); |
568 | mutex_unlock(&efx->mac_lock); | 562 | mutex_unlock(&efx->mac_lock); |
569 | 563 | ||
570 | if (link_up) | 564 | if (link_up) { |
571 | return 0; | 565 | if (++link_up_count == 2) |
566 | return 0; | ||
567 | } else { | ||
568 | link_up_count = 0; | ||
569 | } | ||
572 | } | 570 | } |
573 | 571 | ||
574 | return -ETIMEDOUT; | 572 | return -ETIMEDOUT; |
diff --git a/drivers/net/sfc/workarounds.h b/drivers/net/sfc/workarounds.h index 518f7fc91473..782e45a613d6 100644 --- a/drivers/net/sfc/workarounds.h +++ b/drivers/net/sfc/workarounds.h | |||
@@ -54,7 +54,7 @@ | |||
54 | /* Increase filter depth to avoid RX_RESET */ | 54 | /* Increase filter depth to avoid RX_RESET */ |
55 | #define EFX_WORKAROUND_7244 EFX_WORKAROUND_FALCON_A | 55 | #define EFX_WORKAROUND_7244 EFX_WORKAROUND_FALCON_A |
56 | /* Flushes may never complete */ | 56 | /* Flushes may never complete */ |
57 | #define EFX_WORKAROUND_7803 EFX_WORKAROUND_FALCON_A | 57 | #define EFX_WORKAROUND_7803 EFX_WORKAROUND_FALCON_AB |
58 | /* Leak overlength packets rather than free */ | 58 | /* Leak overlength packets rather than free */ |
59 | #define EFX_WORKAROUND_8071 EFX_WORKAROUND_FALCON_A | 59 | #define EFX_WORKAROUND_8071 EFX_WORKAROUND_FALCON_A |
60 | 60 | ||
diff --git a/drivers/net/sky2.h b/drivers/net/sky2.h index 084eff21b67a..61891a6cacc2 100644 --- a/drivers/net/sky2.h +++ b/drivers/net/sky2.h | |||
@@ -2161,21 +2161,21 @@ struct sky2_tx_le { | |||
2161 | __le16 length; /* also vlan tag or checksum start */ | 2161 | __le16 length; /* also vlan tag or checksum start */ |
2162 | u8 ctrl; | 2162 | u8 ctrl; |
2163 | u8 opcode; | 2163 | u8 opcode; |
2164 | } __attribute((packed)); | 2164 | } __packed; |
2165 | 2165 | ||
2166 | struct sky2_rx_le { | 2166 | struct sky2_rx_le { |
2167 | __le32 addr; | 2167 | __le32 addr; |
2168 | __le16 length; | 2168 | __le16 length; |
2169 | u8 ctrl; | 2169 | u8 ctrl; |
2170 | u8 opcode; | 2170 | u8 opcode; |
2171 | } __attribute((packed)); | 2171 | } __packed; |
2172 | 2172 | ||
2173 | struct sky2_status_le { | 2173 | struct sky2_status_le { |
2174 | __le32 status; /* also checksum */ | 2174 | __le32 status; /* also checksum */ |
2175 | __le16 length; /* also vlan tag */ | 2175 | __le16 length; /* also vlan tag */ |
2176 | u8 css; | 2176 | u8 css; |
2177 | u8 opcode; | 2177 | u8 opcode; |
2178 | } __attribute((packed)); | 2178 | } __packed; |
2179 | 2179 | ||
2180 | struct tx_ring_info { | 2180 | struct tx_ring_info { |
2181 | struct sk_buff *skb; | 2181 | struct sk_buff *skb; |
diff --git a/drivers/net/tehuti.h b/drivers/net/tehuti.h index cff98d07cba8..67e3b71bf705 100644 --- a/drivers/net/tehuti.h +++ b/drivers/net/tehuti.h | |||
@@ -334,7 +334,7 @@ struct txd_desc { | |||
334 | u32 va_lo; | 334 | u32 va_lo; |
335 | u32 va_hi; | 335 | u32 va_hi; |
336 | struct pbl pbl[0]; /* Fragments */ | 336 | struct pbl pbl[0]; /* Fragments */ |
337 | } __attribute__ ((packed)); | 337 | } __packed; |
338 | 338 | ||
339 | /* Register region size */ | 339 | /* Register region size */ |
340 | #define BDX_REGS_SIZE 0x1000 | 340 | #define BDX_REGS_SIZE 0x1000 |
diff --git a/drivers/net/tulip/de2104x.c b/drivers/net/tulip/de2104x.c index c0e70006374e..960962660079 100644 --- a/drivers/net/tulip/de2104x.c +++ b/drivers/net/tulip/de2104x.c | |||
@@ -262,13 +262,13 @@ struct de_srom_media_block { | |||
262 | u16 csr13; | 262 | u16 csr13; |
263 | u16 csr14; | 263 | u16 csr14; |
264 | u16 csr15; | 264 | u16 csr15; |
265 | } __attribute__((packed)); | 265 | } __packed; |
266 | 266 | ||
267 | struct de_srom_info_leaf { | 267 | struct de_srom_info_leaf { |
268 | u16 default_media; | 268 | u16 default_media; |
269 | u8 n_blocks; | 269 | u8 n_blocks; |
270 | u8 unused; | 270 | u8 unused; |
271 | } __attribute__((packed)); | 271 | } __packed; |
272 | 272 | ||
273 | struct de_desc { | 273 | struct de_desc { |
274 | __le32 opts1; | 274 | __le32 opts1; |
diff --git a/drivers/net/tulip/eeprom.c b/drivers/net/tulip/eeprom.c index 6002e651b9ea..3031ed9c4a1a 100644 --- a/drivers/net/tulip/eeprom.c +++ b/drivers/net/tulip/eeprom.c | |||
@@ -120,8 +120,8 @@ static void __devinit tulip_build_fake_mediatable(struct tulip_private *tp) | |||
120 | 0x00, 0x06 /* ttm bit map */ | 120 | 0x00, 0x06 /* ttm bit map */ |
121 | }; | 121 | }; |
122 | 122 | ||
123 | tp->mtable = (struct mediatable *) | 123 | tp->mtable = kmalloc(sizeof(struct mediatable) + |
124 | kmalloc(sizeof(struct mediatable) + sizeof(struct medialeaf), GFP_KERNEL); | 124 | sizeof(struct medialeaf), GFP_KERNEL); |
125 | 125 | ||
126 | if (tp->mtable == NULL) | 126 | if (tp->mtable == NULL) |
127 | return; /* Horrible, impossible failure. */ | 127 | return; /* Horrible, impossible failure. */ |
@@ -227,9 +227,9 @@ subsequent_board: | |||
227 | return; | 227 | return; |
228 | } | 228 | } |
229 | 229 | ||
230 | mtable = (struct mediatable *) | 230 | mtable = kmalloc(sizeof(struct mediatable) + |
231 | kmalloc(sizeof(struct mediatable) + count*sizeof(struct medialeaf), | 231 | count * sizeof(struct medialeaf), |
232 | GFP_KERNEL); | 232 | GFP_KERNEL); |
233 | if (mtable == NULL) | 233 | if (mtable == NULL) |
234 | return; /* Horrible, impossible failure. */ | 234 | return; /* Horrible, impossible failure. */ |
235 | last_mediatable = tp->mtable = mtable; | 235 | last_mediatable = tp->mtable = mtable; |
diff --git a/drivers/net/tulip/tulip.h b/drivers/net/tulip/tulip.h index 0afa2d4f9472..e525875ed67d 100644 --- a/drivers/net/tulip/tulip.h +++ b/drivers/net/tulip/tulip.h | |||
@@ -20,6 +20,7 @@ | |||
20 | #include <linux/types.h> | 20 | #include <linux/types.h> |
21 | #include <linux/spinlock.h> | 21 | #include <linux/spinlock.h> |
22 | #include <linux/netdevice.h> | 22 | #include <linux/netdevice.h> |
23 | #include <linux/ethtool.h> | ||
23 | #include <linux/timer.h> | 24 | #include <linux/timer.h> |
24 | #include <linux/delay.h> | 25 | #include <linux/delay.h> |
25 | #include <linux/pci.h> | 26 | #include <linux/pci.h> |
@@ -51,22 +52,23 @@ struct tulip_chip_table { | |||
51 | 52 | ||
52 | 53 | ||
53 | enum tbl_flag { | 54 | enum tbl_flag { |
54 | HAS_MII = 0x0001, | 55 | HAS_MII = 0x00001, |
55 | HAS_MEDIA_TABLE = 0x0002, | 56 | HAS_MEDIA_TABLE = 0x00002, |
56 | CSR12_IN_SROM = 0x0004, | 57 | CSR12_IN_SROM = 0x00004, |
57 | ALWAYS_CHECK_MII = 0x0008, | 58 | ALWAYS_CHECK_MII = 0x00008, |
58 | HAS_ACPI = 0x0010, | 59 | HAS_ACPI = 0x00010, |
59 | MC_HASH_ONLY = 0x0020, /* Hash-only multicast filter. */ | 60 | MC_HASH_ONLY = 0x00020, /* Hash-only multicast filter. */ |
60 | HAS_PNICNWAY = 0x0080, | 61 | HAS_PNICNWAY = 0x00080, |
61 | HAS_NWAY = 0x0040, /* Uses internal NWay xcvr. */ | 62 | HAS_NWAY = 0x00040, /* Uses internal NWay xcvr. */ |
62 | HAS_INTR_MITIGATION = 0x0100, | 63 | HAS_INTR_MITIGATION = 0x00100, |
63 | IS_ASIX = 0x0200, | 64 | IS_ASIX = 0x00200, |
64 | HAS_8023X = 0x0400, | 65 | HAS_8023X = 0x00400, |
65 | COMET_MAC_ADDR = 0x0800, | 66 | COMET_MAC_ADDR = 0x00800, |
66 | HAS_PCI_MWI = 0x1000, | 67 | HAS_PCI_MWI = 0x01000, |
67 | HAS_PHY_IRQ = 0x2000, | 68 | HAS_PHY_IRQ = 0x02000, |
68 | HAS_SWAPPED_SEEPROM = 0x4000, | 69 | HAS_SWAPPED_SEEPROM = 0x04000, |
69 | NEEDS_FAKE_MEDIA_TABLE = 0x8000, | 70 | NEEDS_FAKE_MEDIA_TABLE = 0x08000, |
71 | COMET_PM = 0x10000, | ||
70 | }; | 72 | }; |
71 | 73 | ||
72 | 74 | ||
@@ -120,6 +122,11 @@ enum tulip_offsets { | |||
120 | CSR13 = 0x68, | 122 | CSR13 = 0x68, |
121 | CSR14 = 0x70, | 123 | CSR14 = 0x70, |
122 | CSR15 = 0x78, | 124 | CSR15 = 0x78, |
125 | CSR18 = 0x88, | ||
126 | CSR19 = 0x8c, | ||
127 | CSR20 = 0x90, | ||
128 | CSR27 = 0xAC, | ||
129 | CSR28 = 0xB0, | ||
123 | }; | 130 | }; |
124 | 131 | ||
125 | /* register offset and bits for CFDD PCI config reg */ | 132 | /* register offset and bits for CFDD PCI config reg */ |
@@ -289,6 +296,30 @@ enum t21143_csr6_bits { | |||
289 | csr6_mask_100bt = (csr6_scr | csr6_pcs | csr6_hbd), | 296 | csr6_mask_100bt = (csr6_scr | csr6_pcs | csr6_hbd), |
290 | }; | 297 | }; |
291 | 298 | ||
299 | enum tulip_comet_csr13_bits { | ||
300 | /* The LINKOFFE and LINKONE work in conjunction with LSCE, i.e. they | ||
301 | * determine which link status transition wakes up if LSCE is | ||
302 | * enabled */ | ||
303 | comet_csr13_linkoffe = (1 << 17), | ||
304 | comet_csr13_linkone = (1 << 16), | ||
305 | comet_csr13_wfre = (1 << 10), | ||
306 | comet_csr13_mpre = (1 << 9), | ||
307 | comet_csr13_lsce = (1 << 8), | ||
308 | comet_csr13_wfr = (1 << 2), | ||
309 | comet_csr13_mpr = (1 << 1), | ||
310 | comet_csr13_lsc = (1 << 0), | ||
311 | }; | ||
312 | |||
313 | enum tulip_comet_csr18_bits { | ||
314 | comet_csr18_pmes_sticky = (1 << 24), | ||
315 | comet_csr18_pm_mode = (1 << 19), | ||
316 | comet_csr18_apm_mode = (1 << 18), | ||
317 | comet_csr18_d3a = (1 << 7) | ||
318 | }; | ||
319 | |||
320 | enum tulip_comet_csr20_bits { | ||
321 | comet_csr20_pmes = (1 << 15), | ||
322 | }; | ||
292 | 323 | ||
293 | /* Keep the ring sizes a power of two for efficiency. | 324 | /* Keep the ring sizes a power of two for efficiency. |
294 | Making the Tx ring too large decreases the effectiveness of channel | 325 | Making the Tx ring too large decreases the effectiveness of channel |
@@ -411,6 +442,7 @@ struct tulip_private { | |||
411 | unsigned int csr6; /* Current CSR6 control settings. */ | 442 | unsigned int csr6; /* Current CSR6 control settings. */ |
412 | unsigned char eeprom[EEPROM_SIZE]; /* Serial EEPROM contents. */ | 443 | unsigned char eeprom[EEPROM_SIZE]; /* Serial EEPROM contents. */ |
413 | void (*link_change) (struct net_device * dev, int csr5); | 444 | void (*link_change) (struct net_device * dev, int csr5); |
445 | struct ethtool_wolinfo wolinfo; /* WOL settings */ | ||
414 | u16 sym_advertise, mii_advertise; /* NWay capabilities advertised. */ | 446 | u16 sym_advertise, mii_advertise; /* NWay capabilities advertised. */ |
415 | u16 lpar; /* 21143 Link partner ability. */ | 447 | u16 lpar; /* 21143 Link partner ability. */ |
416 | u16 advertising[4]; | 448 | u16 advertising[4]; |
diff --git a/drivers/net/tulip/tulip_core.c b/drivers/net/tulip/tulip_core.c index 254643ed945e..03e96b928c04 100644 --- a/drivers/net/tulip/tulip_core.c +++ b/drivers/net/tulip/tulip_core.c | |||
@@ -30,7 +30,6 @@ | |||
30 | #include <linux/etherdevice.h> | 30 | #include <linux/etherdevice.h> |
31 | #include <linux/delay.h> | 31 | #include <linux/delay.h> |
32 | #include <linux/mii.h> | 32 | #include <linux/mii.h> |
33 | #include <linux/ethtool.h> | ||
34 | #include <linux/crc32.h> | 33 | #include <linux/crc32.h> |
35 | #include <asm/unaligned.h> | 34 | #include <asm/unaligned.h> |
36 | #include <asm/uaccess.h> | 35 | #include <asm/uaccess.h> |
@@ -272,6 +271,7 @@ static void tulip_down(struct net_device *dev); | |||
272 | static struct net_device_stats *tulip_get_stats(struct net_device *dev); | 271 | static struct net_device_stats *tulip_get_stats(struct net_device *dev); |
273 | static int private_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); | 272 | static int private_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); |
274 | static void set_rx_mode(struct net_device *dev); | 273 | static void set_rx_mode(struct net_device *dev); |
274 | static void tulip_set_wolopts(struct pci_dev *pdev, u32 wolopts); | ||
275 | #ifdef CONFIG_NET_POLL_CONTROLLER | 275 | #ifdef CONFIG_NET_POLL_CONTROLLER |
276 | static void poll_tulip(struct net_device *dev); | 276 | static void poll_tulip(struct net_device *dev); |
277 | #endif | 277 | #endif |
@@ -309,6 +309,11 @@ static void tulip_up(struct net_device *dev) | |||
309 | /* Wake the chip from sleep/snooze mode. */ | 309 | /* Wake the chip from sleep/snooze mode. */ |
310 | tulip_set_power_state (tp, 0, 0); | 310 | tulip_set_power_state (tp, 0, 0); |
311 | 311 | ||
312 | /* Disable all WOL events */ | ||
313 | pci_enable_wake(tp->pdev, PCI_D3hot, 0); | ||
314 | pci_enable_wake(tp->pdev, PCI_D3cold, 0); | ||
315 | tulip_set_wolopts(tp->pdev, 0); | ||
316 | |||
312 | /* On some chip revs we must set the MII/SYM port before the reset!? */ | 317 | /* On some chip revs we must set the MII/SYM port before the reset!? */ |
313 | if (tp->mii_cnt || (tp->mtable && tp->mtable->has_mii)) | 318 | if (tp->mii_cnt || (tp->mtable && tp->mtable->has_mii)) |
314 | iowrite32(0x00040000, ioaddr + CSR6); | 319 | iowrite32(0x00040000, ioaddr + CSR6); |
@@ -345,8 +350,8 @@ static void tulip_up(struct net_device *dev) | |||
345 | } else if (tp->flags & COMET_MAC_ADDR) { | 350 | } else if (tp->flags & COMET_MAC_ADDR) { |
346 | iowrite32(addr_low, ioaddr + 0xA4); | 351 | iowrite32(addr_low, ioaddr + 0xA4); |
347 | iowrite32(addr_high, ioaddr + 0xA8); | 352 | iowrite32(addr_high, ioaddr + 0xA8); |
348 | iowrite32(0, ioaddr + 0xAC); | 353 | iowrite32(0, ioaddr + CSR27); |
349 | iowrite32(0, ioaddr + 0xB0); | 354 | iowrite32(0, ioaddr + CSR28); |
350 | } | 355 | } |
351 | } else { | 356 | } else { |
352 | /* This is set_rx_mode(), but without starting the transmitter. */ | 357 | /* This is set_rx_mode(), but without starting the transmitter. */ |
@@ -876,8 +881,35 @@ static void tulip_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *in | |||
876 | strcpy(info->bus_info, pci_name(np->pdev)); | 881 | strcpy(info->bus_info, pci_name(np->pdev)); |
877 | } | 882 | } |
878 | 883 | ||
884 | |||
885 | static int tulip_ethtool_set_wol(struct net_device *dev, | ||
886 | struct ethtool_wolinfo *wolinfo) | ||
887 | { | ||
888 | struct tulip_private *tp = netdev_priv(dev); | ||
889 | |||
890 | if (wolinfo->wolopts & (~tp->wolinfo.supported)) | ||
891 | return -EOPNOTSUPP; | ||
892 | |||
893 | tp->wolinfo.wolopts = wolinfo->wolopts; | ||
894 | device_set_wakeup_enable(&tp->pdev->dev, tp->wolinfo.wolopts); | ||
895 | return 0; | ||
896 | } | ||
897 | |||
898 | static void tulip_ethtool_get_wol(struct net_device *dev, | ||
899 | struct ethtool_wolinfo *wolinfo) | ||
900 | { | ||
901 | struct tulip_private *tp = netdev_priv(dev); | ||
902 | |||
903 | wolinfo->supported = tp->wolinfo.supported; | ||
904 | wolinfo->wolopts = tp->wolinfo.wolopts; | ||
905 | return; | ||
906 | } | ||
907 | |||
908 | |||
879 | static const struct ethtool_ops ops = { | 909 | static const struct ethtool_ops ops = { |
880 | .get_drvinfo = tulip_get_drvinfo | 910 | .get_drvinfo = tulip_get_drvinfo, |
911 | .set_wol = tulip_ethtool_set_wol, | ||
912 | .get_wol = tulip_ethtool_get_wol, | ||
881 | }; | 913 | }; |
882 | 914 | ||
883 | /* Provide ioctl() calls to examine the MII xcvr state. */ | 915 | /* Provide ioctl() calls to examine the MII xcvr state. */ |
@@ -1093,8 +1125,8 @@ static void set_rx_mode(struct net_device *dev) | |||
1093 | iowrite32(3, ioaddr + CSR13); | 1125 | iowrite32(3, ioaddr + CSR13); |
1094 | iowrite32(mc_filter[1], ioaddr + CSR14); | 1126 | iowrite32(mc_filter[1], ioaddr + CSR14); |
1095 | } else if (tp->flags & COMET_MAC_ADDR) { | 1127 | } else if (tp->flags & COMET_MAC_ADDR) { |
1096 | iowrite32(mc_filter[0], ioaddr + 0xAC); | 1128 | iowrite32(mc_filter[0], ioaddr + CSR27); |
1097 | iowrite32(mc_filter[1], ioaddr + 0xB0); | 1129 | iowrite32(mc_filter[1], ioaddr + CSR28); |
1098 | } | 1130 | } |
1099 | tp->mc_filter[0] = mc_filter[0]; | 1131 | tp->mc_filter[0] = mc_filter[0]; |
1100 | tp->mc_filter[1] = mc_filter[1]; | 1132 | tp->mc_filter[1] = mc_filter[1]; |
@@ -1381,6 +1413,13 @@ static int __devinit tulip_init_one (struct pci_dev *pdev, | |||
1381 | return i; | 1413 | return i; |
1382 | } | 1414 | } |
1383 | 1415 | ||
1416 | /* The chip will fail to enter a low-power state later unless | ||
1417 | * first explicitly commanded into D0 */ | ||
1418 | if (pci_set_power_state(pdev, PCI_D0)) { | ||
1419 | printk (KERN_NOTICE PFX | ||
1420 | "Failed to set power state to D0\n"); | ||
1421 | } | ||
1422 | |||
1384 | irq = pdev->irq; | 1423 | irq = pdev->irq; |
1385 | 1424 | ||
1386 | /* alloc_etherdev ensures aligned and zeroed private structures */ | 1425 | /* alloc_etherdev ensures aligned and zeroed private structures */ |
@@ -1427,6 +1466,19 @@ static int __devinit tulip_init_one (struct pci_dev *pdev, | |||
1427 | 1466 | ||
1428 | tp->chip_id = chip_idx; | 1467 | tp->chip_id = chip_idx; |
1429 | tp->flags = tulip_tbl[chip_idx].flags; | 1468 | tp->flags = tulip_tbl[chip_idx].flags; |
1469 | |||
1470 | tp->wolinfo.supported = 0; | ||
1471 | tp->wolinfo.wolopts = 0; | ||
1472 | /* COMET: Enable power management only for AN983B */ | ||
1473 | if (chip_idx == COMET ) { | ||
1474 | u32 sig; | ||
1475 | pci_read_config_dword (pdev, 0x80, &sig); | ||
1476 | if (sig == 0x09811317) { | ||
1477 | tp->flags |= COMET_PM; | ||
1478 | tp->wolinfo.supported = WAKE_PHY | WAKE_MAGIC; | ||
1479 | printk(KERN_INFO "tulip_init_one: Enabled WOL support for AN983B\n"); | ||
1480 | } | ||
1481 | } | ||
1430 | tp->pdev = pdev; | 1482 | tp->pdev = pdev; |
1431 | tp->base_addr = ioaddr; | 1483 | tp->base_addr = ioaddr; |
1432 | tp->revision = pdev->revision; | 1484 | tp->revision = pdev->revision; |
@@ -1759,11 +1811,43 @@ err_out_free_netdev: | |||
1759 | } | 1811 | } |
1760 | 1812 | ||
1761 | 1813 | ||
1814 | /* set the registers according to the given wolopts */ | ||
1815 | static void tulip_set_wolopts (struct pci_dev *pdev, u32 wolopts) | ||
1816 | { | ||
1817 | struct net_device *dev = pci_get_drvdata(pdev); | ||
1818 | struct tulip_private *tp = netdev_priv(dev); | ||
1819 | void __iomem *ioaddr = tp->base_addr; | ||
1820 | |||
1821 | if (tp->flags & COMET_PM) { | ||
1822 | |||
1823 | unsigned int tmp; | ||
1824 | |||
1825 | tmp = ioread32(ioaddr + CSR18); | ||
1826 | tmp &= ~(comet_csr18_pmes_sticky | comet_csr18_apm_mode | comet_csr18_d3a); | ||
1827 | tmp |= comet_csr18_pm_mode; | ||
1828 | iowrite32(tmp, ioaddr + CSR18); | ||
1829 | |||
1830 | /* Set the Wake-up Control/Status Register to the given WOL options*/ | ||
1831 | tmp = ioread32(ioaddr + CSR13); | ||
1832 | tmp &= ~(comet_csr13_linkoffe | comet_csr13_linkone | comet_csr13_wfre | comet_csr13_lsce | comet_csr13_mpre); | ||
1833 | if (wolopts & WAKE_MAGIC) | ||
1834 | tmp |= comet_csr13_mpre; | ||
1835 | if (wolopts & WAKE_PHY) | ||
1836 | tmp |= comet_csr13_linkoffe | comet_csr13_linkone | comet_csr13_lsce; | ||
1837 | /* Clear the event flags */ | ||
1838 | tmp |= comet_csr13_wfr | comet_csr13_mpr | comet_csr13_lsc; | ||
1839 | iowrite32(tmp, ioaddr + CSR13); | ||
1840 | } | ||
1841 | } | ||
1842 | |||
1762 | #ifdef CONFIG_PM | 1843 | #ifdef CONFIG_PM |
1763 | 1844 | ||
1845 | |||
1764 | static int tulip_suspend (struct pci_dev *pdev, pm_message_t state) | 1846 | static int tulip_suspend (struct pci_dev *pdev, pm_message_t state) |
1765 | { | 1847 | { |
1848 | pci_power_t pstate; | ||
1766 | struct net_device *dev = pci_get_drvdata(pdev); | 1849 | struct net_device *dev = pci_get_drvdata(pdev); |
1850 | struct tulip_private *tp = netdev_priv(dev); | ||
1767 | 1851 | ||
1768 | if (!dev) | 1852 | if (!dev) |
1769 | return -EINVAL; | 1853 | return -EINVAL; |
@@ -1779,7 +1863,16 @@ static int tulip_suspend (struct pci_dev *pdev, pm_message_t state) | |||
1779 | save_state: | 1863 | save_state: |
1780 | pci_save_state(pdev); | 1864 | pci_save_state(pdev); |
1781 | pci_disable_device(pdev); | 1865 | pci_disable_device(pdev); |
1782 | pci_set_power_state(pdev, pci_choose_state(pdev, state)); | 1866 | pstate = pci_choose_state(pdev, state); |
1867 | if (state.event == PM_EVENT_SUSPEND && pstate != PCI_D0) { | ||
1868 | int rc; | ||
1869 | |||
1870 | tulip_set_wolopts(pdev, tp->wolinfo.wolopts); | ||
1871 | rc = pci_enable_wake(pdev, pstate, tp->wolinfo.wolopts); | ||
1872 | if (rc) | ||
1873 | printk("tulip: pci_enable_wake failed (%d)\n", rc); | ||
1874 | } | ||
1875 | pci_set_power_state(pdev, pstate); | ||
1783 | 1876 | ||
1784 | return 0; | 1877 | return 0; |
1785 | } | 1878 | } |
@@ -1788,7 +1881,10 @@ save_state: | |||
1788 | static int tulip_resume(struct pci_dev *pdev) | 1881 | static int tulip_resume(struct pci_dev *pdev) |
1789 | { | 1882 | { |
1790 | struct net_device *dev = pci_get_drvdata(pdev); | 1883 | struct net_device *dev = pci_get_drvdata(pdev); |
1884 | struct tulip_private *tp = netdev_priv(dev); | ||
1885 | void __iomem *ioaddr = tp->base_addr; | ||
1791 | int retval; | 1886 | int retval; |
1887 | unsigned int tmp; | ||
1792 | 1888 | ||
1793 | if (!dev) | 1889 | if (!dev) |
1794 | return -EINVAL; | 1890 | return -EINVAL; |
@@ -1809,6 +1905,18 @@ static int tulip_resume(struct pci_dev *pdev) | |||
1809 | return retval; | 1905 | return retval; |
1810 | } | 1906 | } |
1811 | 1907 | ||
1908 | if (tp->flags & COMET_PM) { | ||
1909 | pci_enable_wake(pdev, PCI_D3hot, 0); | ||
1910 | pci_enable_wake(pdev, PCI_D3cold, 0); | ||
1911 | |||
1912 | /* Clear the PMES flag */ | ||
1913 | tmp = ioread32(ioaddr + CSR20); | ||
1914 | tmp |= comet_csr20_pmes; | ||
1915 | iowrite32(tmp, ioaddr + CSR20); | ||
1916 | |||
1917 | /* Disable all wake-up events */ | ||
1918 | tulip_set_wolopts(pdev, 0); | ||
1919 | } | ||
1812 | netif_device_attach(dev); | 1920 | netif_device_attach(dev); |
1813 | 1921 | ||
1814 | if (netif_running(dev)) | 1922 | if (netif_running(dev)) |
diff --git a/drivers/net/typhoon.c b/drivers/net/typhoon.c index 22bde49262c0..2e50077ff450 100644 --- a/drivers/net/typhoon.c +++ b/drivers/net/typhoon.c | |||
@@ -255,7 +255,7 @@ struct typhoon_shared { | |||
255 | struct rx_free rxBuff[RXFREE_ENTRIES] __3xp_aligned; | 255 | struct rx_free rxBuff[RXFREE_ENTRIES] __3xp_aligned; |
256 | u32 zeroWord; | 256 | u32 zeroWord; |
257 | struct tx_desc txHi[TXHI_ENTRIES]; | 257 | struct tx_desc txHi[TXHI_ENTRIES]; |
258 | } __attribute__ ((packed)); | 258 | } __packed; |
259 | 259 | ||
260 | struct rxbuff_ent { | 260 | struct rxbuff_ent { |
261 | struct sk_buff *skb; | 261 | struct sk_buff *skb; |
diff --git a/drivers/net/typhoon.h b/drivers/net/typhoon.h index 673fd5125914..88187fc84aa3 100644 --- a/drivers/net/typhoon.h +++ b/drivers/net/typhoon.h | |||
@@ -77,7 +77,7 @@ struct typhoon_indexes { | |||
77 | volatile __le32 cmdCleared; | 77 | volatile __le32 cmdCleared; |
78 | volatile __le32 respReady; | 78 | volatile __le32 respReady; |
79 | volatile __le32 rxHiReady; | 79 | volatile __le32 rxHiReady; |
80 | } __attribute__ ((packed)); | 80 | } __packed; |
81 | 81 | ||
82 | /* The host<->Typhoon interface | 82 | /* The host<->Typhoon interface |
83 | * Our means of communicating where things are | 83 | * Our means of communicating where things are |
@@ -125,7 +125,7 @@ struct typhoon_interface { | |||
125 | __le32 rxHiAddr; | 125 | __le32 rxHiAddr; |
126 | __le32 rxHiAddrHi; | 126 | __le32 rxHiAddrHi; |
127 | __le32 rxHiSize; | 127 | __le32 rxHiSize; |
128 | } __attribute__ ((packed)); | 128 | } __packed; |
129 | 129 | ||
130 | /* The Typhoon transmit/fragment descriptor | 130 | /* The Typhoon transmit/fragment descriptor |
131 | * | 131 | * |
@@ -187,7 +187,7 @@ struct tx_desc { | |||
187 | #define TYPHOON_TX_PF_VLAN_MASK cpu_to_le32(0x0ffff000) | 187 | #define TYPHOON_TX_PF_VLAN_MASK cpu_to_le32(0x0ffff000) |
188 | #define TYPHOON_TX_PF_INTERNAL cpu_to_le32(0xf0000000) | 188 | #define TYPHOON_TX_PF_INTERNAL cpu_to_le32(0xf0000000) |
189 | #define TYPHOON_TX_PF_VLAN_TAG_SHIFT 12 | 189 | #define TYPHOON_TX_PF_VLAN_TAG_SHIFT 12 |
190 | } __attribute__ ((packed)); | 190 | } __packed; |
191 | 191 | ||
192 | /* The TCP Segmentation offload option descriptor | 192 | /* The TCP Segmentation offload option descriptor |
193 | * | 193 | * |
@@ -208,7 +208,7 @@ struct tcpopt_desc { | |||
208 | __le32 respAddrLo; | 208 | __le32 respAddrLo; |
209 | __le32 bytesTx; | 209 | __le32 bytesTx; |
210 | __le32 status; | 210 | __le32 status; |
211 | } __attribute__ ((packed)); | 211 | } __packed; |
212 | 212 | ||
213 | /* The IPSEC Offload descriptor | 213 | /* The IPSEC Offload descriptor |
214 | * | 214 | * |
@@ -227,7 +227,7 @@ struct ipsec_desc { | |||
227 | __le32 sa1; | 227 | __le32 sa1; |
228 | __le32 sa2; | 228 | __le32 sa2; |
229 | __le32 reserved; | 229 | __le32 reserved; |
230 | } __attribute__ ((packed)); | 230 | } __packed; |
231 | 231 | ||
232 | /* The Typhoon receive descriptor (Updated by NIC) | 232 | /* The Typhoon receive descriptor (Updated by NIC) |
233 | * | 233 | * |
@@ -284,7 +284,7 @@ struct rx_desc { | |||
284 | #define TYPHOON_RX_UNKNOWN_SA cpu_to_le16(0x0100) | 284 | #define TYPHOON_RX_UNKNOWN_SA cpu_to_le16(0x0100) |
285 | #define TYPHOON_RX_ESP_FORMAT_ERR cpu_to_le16(0x0200) | 285 | #define TYPHOON_RX_ESP_FORMAT_ERR cpu_to_le16(0x0200) |
286 | __be32 vlanTag; | 286 | __be32 vlanTag; |
287 | } __attribute__ ((packed)); | 287 | } __packed; |
288 | 288 | ||
289 | /* The Typhoon free buffer descriptor, used to give a buffer to the NIC | 289 | /* The Typhoon free buffer descriptor, used to give a buffer to the NIC |
290 | * | 290 | * |
@@ -301,7 +301,7 @@ struct rx_free { | |||
301 | __le32 physAddrHi; | 301 | __le32 physAddrHi; |
302 | u32 virtAddr; | 302 | u32 virtAddr; |
303 | u32 virtAddrHi; | 303 | u32 virtAddrHi; |
304 | } __attribute__ ((packed)); | 304 | } __packed; |
305 | 305 | ||
306 | /* The Typhoon command descriptor, used for commands and responses | 306 | /* The Typhoon command descriptor, used for commands and responses |
307 | * | 307 | * |
@@ -347,7 +347,7 @@ struct cmd_desc { | |||
347 | __le16 parm1; | 347 | __le16 parm1; |
348 | __le32 parm2; | 348 | __le32 parm2; |
349 | __le32 parm3; | 349 | __le32 parm3; |
350 | } __attribute__ ((packed)); | 350 | } __packed; |
351 | 351 | ||
352 | /* The Typhoon response descriptor, see command descriptor for details | 352 | /* The Typhoon response descriptor, see command descriptor for details |
353 | */ | 353 | */ |
@@ -359,7 +359,7 @@ struct resp_desc { | |||
359 | __le16 parm1; | 359 | __le16 parm1; |
360 | __le32 parm2; | 360 | __le32 parm2; |
361 | __le32 parm3; | 361 | __le32 parm3; |
362 | } __attribute__ ((packed)); | 362 | } __packed; |
363 | 363 | ||
364 | #define INIT_COMMAND_NO_RESPONSE(x, command) \ | 364 | #define INIT_COMMAND_NO_RESPONSE(x, command) \ |
365 | do { struct cmd_desc *_ptr = (x); \ | 365 | do { struct cmd_desc *_ptr = (x); \ |
@@ -427,7 +427,7 @@ struct stats_resp { | |||
427 | #define TYPHOON_LINK_HALF_DUPLEX cpu_to_le32(0x00000000) | 427 | #define TYPHOON_LINK_HALF_DUPLEX cpu_to_le32(0x00000000) |
428 | __le32 unused2; | 428 | __le32 unused2; |
429 | __le32 unused3; | 429 | __le32 unused3; |
430 | } __attribute__ ((packed)); | 430 | } __packed; |
431 | 431 | ||
432 | /* TYPHOON_CMD_XCVR_SELECT xcvr values (resp.parm1) | 432 | /* TYPHOON_CMD_XCVR_SELECT xcvr values (resp.parm1) |
433 | */ | 433 | */ |
@@ -488,7 +488,7 @@ struct sa_descriptor { | |||
488 | u32 index; | 488 | u32 index; |
489 | u32 unused; | 489 | u32 unused; |
490 | u32 unused2; | 490 | u32 unused2; |
491 | } __attribute__ ((packed)); | 491 | } __packed; |
492 | 492 | ||
493 | /* TYPHOON_CMD_SET_OFFLOAD_TASKS bits (cmd.parm2 (Tx) & cmd.parm3 (Rx)) | 493 | /* TYPHOON_CMD_SET_OFFLOAD_TASKS bits (cmd.parm2 (Tx) & cmd.parm3 (Rx)) |
494 | * This is all for IPv4. | 494 | * This is all for IPv4. |
@@ -518,14 +518,14 @@ struct typhoon_file_header { | |||
518 | __le32 numSections; | 518 | __le32 numSections; |
519 | __le32 startAddr; | 519 | __le32 startAddr; |
520 | __le32 hmacDigest[5]; | 520 | __le32 hmacDigest[5]; |
521 | } __attribute__ ((packed)); | 521 | } __packed; |
522 | 522 | ||
523 | struct typhoon_section_header { | 523 | struct typhoon_section_header { |
524 | __le32 len; | 524 | __le32 len; |
525 | u16 checksum; | 525 | u16 checksum; |
526 | u16 reserved; | 526 | u16 reserved; |
527 | __le32 startAddr; | 527 | __le32 startAddr; |
528 | } __attribute__ ((packed)); | 528 | } __packed; |
529 | 529 | ||
530 | /* The Typhoon Register offsets | 530 | /* The Typhoon Register offsets |
531 | */ | 531 | */ |
diff --git a/drivers/net/ucc_geth.h b/drivers/net/ucc_geth.h index ef1fbeb11c6e..05a95586f3c5 100644 --- a/drivers/net/ucc_geth.h +++ b/drivers/net/ucc_geth.h | |||
@@ -106,7 +106,7 @@ struct ucc_geth { | |||
106 | u32 scar; /* Statistics carry register */ | 106 | u32 scar; /* Statistics carry register */ |
107 | u32 scam; /* Statistics caryy mask register */ | 107 | u32 scam; /* Statistics caryy mask register */ |
108 | u8 res5[0x200 - 0x1c4]; | 108 | u8 res5[0x200 - 0x1c4]; |
109 | } __attribute__ ((packed)); | 109 | } __packed; |
110 | 110 | ||
111 | /* UCC GETH TEMODR Register */ | 111 | /* UCC GETH TEMODR Register */ |
112 | #define TEMODER_TX_RMON_STATISTICS_ENABLE 0x0100 /* enable Tx statistics | 112 | #define TEMODER_TX_RMON_STATISTICS_ENABLE 0x0100 /* enable Tx statistics |
@@ -420,11 +420,11 @@ struct ucc_geth { | |||
420 | 420 | ||
421 | struct ucc_geth_thread_data_tx { | 421 | struct ucc_geth_thread_data_tx { |
422 | u8 res0[104]; | 422 | u8 res0[104]; |
423 | } __attribute__ ((packed)); | 423 | } __packed; |
424 | 424 | ||
425 | struct ucc_geth_thread_data_rx { | 425 | struct ucc_geth_thread_data_rx { |
426 | u8 res0[40]; | 426 | u8 res0[40]; |
427 | } __attribute__ ((packed)); | 427 | } __packed; |
428 | 428 | ||
429 | /* Send Queue Queue-Descriptor */ | 429 | /* Send Queue Queue-Descriptor */ |
430 | struct ucc_geth_send_queue_qd { | 430 | struct ucc_geth_send_queue_qd { |
@@ -432,19 +432,19 @@ struct ucc_geth_send_queue_qd { | |||
432 | u8 res0[0x8]; | 432 | u8 res0[0x8]; |
433 | u32 last_bd_completed_address;/* initialize to last entry in BD ring */ | 433 | u32 last_bd_completed_address;/* initialize to last entry in BD ring */ |
434 | u8 res1[0x30]; | 434 | u8 res1[0x30]; |
435 | } __attribute__ ((packed)); | 435 | } __packed; |
436 | 436 | ||
437 | struct ucc_geth_send_queue_mem_region { | 437 | struct ucc_geth_send_queue_mem_region { |
438 | struct ucc_geth_send_queue_qd sqqd[NUM_TX_QUEUES]; | 438 | struct ucc_geth_send_queue_qd sqqd[NUM_TX_QUEUES]; |
439 | } __attribute__ ((packed)); | 439 | } __packed; |
440 | 440 | ||
441 | struct ucc_geth_thread_tx_pram { | 441 | struct ucc_geth_thread_tx_pram { |
442 | u8 res0[64]; | 442 | u8 res0[64]; |
443 | } __attribute__ ((packed)); | 443 | } __packed; |
444 | 444 | ||
445 | struct ucc_geth_thread_rx_pram { | 445 | struct ucc_geth_thread_rx_pram { |
446 | u8 res0[128]; | 446 | u8 res0[128]; |
447 | } __attribute__ ((packed)); | 447 | } __packed; |
448 | 448 | ||
449 | #define THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING 64 | 449 | #define THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING 64 |
450 | #define THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_8 64 | 450 | #define THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_8 64 |
@@ -484,7 +484,7 @@ struct ucc_geth_scheduler { | |||
484 | /**< weight factor for queues */ | 484 | /**< weight factor for queues */ |
485 | u32 minw; /* temporary variable handled by QE */ | 485 | u32 minw; /* temporary variable handled by QE */ |
486 | u8 res1[0x70 - 0x64]; | 486 | u8 res1[0x70 - 0x64]; |
487 | } __attribute__ ((packed)); | 487 | } __packed; |
488 | 488 | ||
489 | struct ucc_geth_tx_firmware_statistics_pram { | 489 | struct ucc_geth_tx_firmware_statistics_pram { |
490 | u32 sicoltx; /* single collision */ | 490 | u32 sicoltx; /* single collision */ |
@@ -506,7 +506,7 @@ struct ucc_geth_tx_firmware_statistics_pram { | |||
506 | and 1518 octets */ | 506 | and 1518 octets */ |
507 | u32 txpktsjumbo; /* total packets (including bad) between 1024 | 507 | u32 txpktsjumbo; /* total packets (including bad) between 1024 |
508 | and MAXLength octets */ | 508 | and MAXLength octets */ |
509 | } __attribute__ ((packed)); | 509 | } __packed; |
510 | 510 | ||
511 | struct ucc_geth_rx_firmware_statistics_pram { | 511 | struct ucc_geth_rx_firmware_statistics_pram { |
512 | u32 frrxfcser; /* frames with crc error */ | 512 | u32 frrxfcser; /* frames with crc error */ |
@@ -540,7 +540,7 @@ struct ucc_geth_rx_firmware_statistics_pram { | |||
540 | replaced */ | 540 | replaced */ |
541 | u32 insertvlan; /* total frames that had their VLAN tag | 541 | u32 insertvlan; /* total frames that had their VLAN tag |
542 | inserted */ | 542 | inserted */ |
543 | } __attribute__ ((packed)); | 543 | } __packed; |
544 | 544 | ||
545 | struct ucc_geth_rx_interrupt_coalescing_entry { | 545 | struct ucc_geth_rx_interrupt_coalescing_entry { |
546 | u32 interruptcoalescingmaxvalue; /* interrupt coalescing max | 546 | u32 interruptcoalescingmaxvalue; /* interrupt coalescing max |
@@ -548,23 +548,23 @@ struct ucc_geth_rx_interrupt_coalescing_entry { | |||
548 | u32 interruptcoalescingcounter; /* interrupt coalescing counter, | 548 | u32 interruptcoalescingcounter; /* interrupt coalescing counter, |
549 | initialize to | 549 | initialize to |
550 | interruptcoalescingmaxvalue */ | 550 | interruptcoalescingmaxvalue */ |
551 | } __attribute__ ((packed)); | 551 | } __packed; |
552 | 552 | ||
553 | struct ucc_geth_rx_interrupt_coalescing_table { | 553 | struct ucc_geth_rx_interrupt_coalescing_table { |
554 | struct ucc_geth_rx_interrupt_coalescing_entry coalescingentry[NUM_RX_QUEUES]; | 554 | struct ucc_geth_rx_interrupt_coalescing_entry coalescingentry[NUM_RX_QUEUES]; |
555 | /**< interrupt coalescing entry */ | 555 | /**< interrupt coalescing entry */ |
556 | } __attribute__ ((packed)); | 556 | } __packed; |
557 | 557 | ||
558 | struct ucc_geth_rx_prefetched_bds { | 558 | struct ucc_geth_rx_prefetched_bds { |
559 | struct qe_bd bd[NUM_BDS_IN_PREFETCHED_BDS]; /* prefetched bd */ | 559 | struct qe_bd bd[NUM_BDS_IN_PREFETCHED_BDS]; /* prefetched bd */ |
560 | } __attribute__ ((packed)); | 560 | } __packed; |
561 | 561 | ||
562 | struct ucc_geth_rx_bd_queues_entry { | 562 | struct ucc_geth_rx_bd_queues_entry { |
563 | u32 bdbaseptr; /* BD base pointer */ | 563 | u32 bdbaseptr; /* BD base pointer */ |
564 | u32 bdptr; /* BD pointer */ | 564 | u32 bdptr; /* BD pointer */ |
565 | u32 externalbdbaseptr; /* external BD base pointer */ | 565 | u32 externalbdbaseptr; /* external BD base pointer */ |
566 | u32 externalbdptr; /* external BD pointer */ | 566 | u32 externalbdptr; /* external BD pointer */ |
567 | } __attribute__ ((packed)); | 567 | } __packed; |
568 | 568 | ||
569 | struct ucc_geth_tx_global_pram { | 569 | struct ucc_geth_tx_global_pram { |
570 | u16 temoder; | 570 | u16 temoder; |
@@ -580,13 +580,13 @@ struct ucc_geth_tx_global_pram { | |||
580 | u32 tqptr; /* a base pointer to the Tx Queues Memory | 580 | u32 tqptr; /* a base pointer to the Tx Queues Memory |
581 | Region */ | 581 | Region */ |
582 | u8 res2[0x80 - 0x74]; | 582 | u8 res2[0x80 - 0x74]; |
583 | } __attribute__ ((packed)); | 583 | } __packed; |
584 | 584 | ||
585 | /* structure representing Extended Filtering Global Parameters in PRAM */ | 585 | /* structure representing Extended Filtering Global Parameters in PRAM */ |
586 | struct ucc_geth_exf_global_pram { | 586 | struct ucc_geth_exf_global_pram { |
587 | u32 l2pcdptr; /* individual address filter, high */ | 587 | u32 l2pcdptr; /* individual address filter, high */ |
588 | u8 res0[0x10 - 0x04]; | 588 | u8 res0[0x10 - 0x04]; |
589 | } __attribute__ ((packed)); | 589 | } __packed; |
590 | 590 | ||
591 | struct ucc_geth_rx_global_pram { | 591 | struct ucc_geth_rx_global_pram { |
592 | u32 remoder; /* ethernet mode reg. */ | 592 | u32 remoder; /* ethernet mode reg. */ |
@@ -620,7 +620,7 @@ struct ucc_geth_rx_global_pram { | |||
620 | u32 exfGlobalParam; /* base address for extended filtering global | 620 | u32 exfGlobalParam; /* base address for extended filtering global |
621 | parameters */ | 621 | parameters */ |
622 | u8 res6[0x100 - 0xC4]; /* Initialize to zero */ | 622 | u8 res6[0x100 - 0xC4]; /* Initialize to zero */ |
623 | } __attribute__ ((packed)); | 623 | } __packed; |
624 | 624 | ||
625 | #define GRACEFUL_STOP_ACKNOWLEDGE_RX 0x01 | 625 | #define GRACEFUL_STOP_ACKNOWLEDGE_RX 0x01 |
626 | 626 | ||
@@ -639,7 +639,7 @@ struct ucc_geth_init_pram { | |||
639 | u32 txglobal; /* tx global */ | 639 | u32 txglobal; /* tx global */ |
640 | u32 txthread[ENET_INIT_PARAM_MAX_ENTRIES_TX]; /* tx threads */ | 640 | u32 txthread[ENET_INIT_PARAM_MAX_ENTRIES_TX]; /* tx threads */ |
641 | u8 res3[0x1]; | 641 | u8 res3[0x1]; |
642 | } __attribute__ ((packed)); | 642 | } __packed; |
643 | 643 | ||
644 | #define ENET_INIT_PARAM_RGF_SHIFT (32 - 4) | 644 | #define ENET_INIT_PARAM_RGF_SHIFT (32 - 4) |
645 | #define ENET_INIT_PARAM_TGF_SHIFT (32 - 8) | 645 | #define ENET_INIT_PARAM_TGF_SHIFT (32 - 8) |
@@ -661,7 +661,7 @@ struct ucc_geth_82xx_enet_address { | |||
661 | u16 h; /* address (MSB) */ | 661 | u16 h; /* address (MSB) */ |
662 | u16 m; /* address */ | 662 | u16 m; /* address */ |
663 | u16 l; /* address (LSB) */ | 663 | u16 l; /* address (LSB) */ |
664 | } __attribute__ ((packed)); | 664 | } __packed; |
665 | 665 | ||
666 | /* structure representing 82xx Address Filtering PRAM */ | 666 | /* structure representing 82xx Address Filtering PRAM */ |
667 | struct ucc_geth_82xx_address_filtering_pram { | 667 | struct ucc_geth_82xx_address_filtering_pram { |
@@ -672,7 +672,7 @@ struct ucc_geth_82xx_address_filtering_pram { | |||
672 | struct ucc_geth_82xx_enet_address __iomem taddr; | 672 | struct ucc_geth_82xx_enet_address __iomem taddr; |
673 | struct ucc_geth_82xx_enet_address __iomem paddr[NUM_OF_PADDRS]; | 673 | struct ucc_geth_82xx_enet_address __iomem paddr[NUM_OF_PADDRS]; |
674 | u8 res0[0x40 - 0x38]; | 674 | u8 res0[0x40 - 0x38]; |
675 | } __attribute__ ((packed)); | 675 | } __packed; |
676 | 676 | ||
677 | /* GETH Tx firmware statistics structure, used when calling | 677 | /* GETH Tx firmware statistics structure, used when calling |
678 | UCC_GETH_GetStatistics. */ | 678 | UCC_GETH_GetStatistics. */ |
@@ -696,7 +696,7 @@ struct ucc_geth_tx_firmware_statistics { | |||
696 | and 1518 octets */ | 696 | and 1518 octets */ |
697 | u32 txpktsjumbo; /* total packets (including bad) between 1024 | 697 | u32 txpktsjumbo; /* total packets (including bad) between 1024 |
698 | and MAXLength octets */ | 698 | and MAXLength octets */ |
699 | } __attribute__ ((packed)); | 699 | } __packed; |
700 | 700 | ||
701 | /* GETH Rx firmware statistics structure, used when calling | 701 | /* GETH Rx firmware statistics structure, used when calling |
702 | UCC_GETH_GetStatistics. */ | 702 | UCC_GETH_GetStatistics. */ |
@@ -732,7 +732,7 @@ struct ucc_geth_rx_firmware_statistics { | |||
732 | replaced */ | 732 | replaced */ |
733 | u32 insertvlan; /* total frames that had their VLAN tag | 733 | u32 insertvlan; /* total frames that had their VLAN tag |
734 | inserted */ | 734 | inserted */ |
735 | } __attribute__ ((packed)); | 735 | } __packed; |
736 | 736 | ||
737 | /* GETH hardware statistics structure, used when calling | 737 | /* GETH hardware statistics structure, used when calling |
738 | UCC_GETH_GetStatistics. */ | 738 | UCC_GETH_GetStatistics. */ |
@@ -781,7 +781,7 @@ struct ucc_geth_hardware_statistics { | |||
781 | u32 rbca; /* Total number of frames received successfully | 781 | u32 rbca; /* Total number of frames received successfully |
782 | that had destination address equal to the | 782 | that had destination address equal to the |
783 | broadcast address */ | 783 | broadcast address */ |
784 | } __attribute__ ((packed)); | 784 | } __packed; |
785 | 785 | ||
786 | /* UCC GETH Tx errors returned via TxConf callback */ | 786 | /* UCC GETH Tx errors returned via TxConf callback */ |
787 | #define TX_ERRORS_DEF 0x0200 | 787 | #define TX_ERRORS_DEF 0x0200 |
diff --git a/drivers/net/usb/asix.c b/drivers/net/usb/asix.c index 1f802e90474c..7e797ed0439a 100644 --- a/drivers/net/usb/asix.c +++ b/drivers/net/usb/asix.c | |||
@@ -179,7 +179,7 @@ struct ax88172_int_data { | |||
179 | __le16 res2; | 179 | __le16 res2; |
180 | u8 status; | 180 | u8 status; |
181 | __le16 res3; | 181 | __le16 res3; |
182 | } __attribute__ ((packed)); | 182 | } __packed; |
183 | 183 | ||
184 | static int asix_read_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index, | 184 | static int asix_read_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index, |
185 | u16 size, void *data) | 185 | u16 size, void *data) |
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c index 0a3c41faea9c..c8570b097880 100644 --- a/drivers/net/usb/hso.c +++ b/drivers/net/usb/hso.c | |||
@@ -211,7 +211,7 @@ struct hso_serial_state_notification { | |||
211 | u16 wIndex; | 211 | u16 wIndex; |
212 | u16 wLength; | 212 | u16 wLength; |
213 | u16 UART_state_bitmap; | 213 | u16 UART_state_bitmap; |
214 | } __attribute__((packed)); | 214 | } __packed; |
215 | 215 | ||
216 | struct hso_tiocmget { | 216 | struct hso_tiocmget { |
217 | struct mutex mutex; | 217 | struct mutex mutex; |
diff --git a/drivers/net/usb/kaweth.c b/drivers/net/usb/kaweth.c index d6078b8c4273..2b7b39cad1ce 100644 --- a/drivers/net/usb/kaweth.c +++ b/drivers/net/usb/kaweth.c | |||
@@ -207,7 +207,7 @@ struct kaweth_ethernet_configuration | |||
207 | __le16 segment_size; | 207 | __le16 segment_size; |
208 | __u16 max_multicast_filters; | 208 | __u16 max_multicast_filters; |
209 | __u8 reserved3; | 209 | __u8 reserved3; |
210 | } __attribute__ ((packed)); | 210 | } __packed; |
211 | 211 | ||
212 | /**************************************************************** | 212 | /**************************************************************** |
213 | * kaweth_device | 213 | * kaweth_device |
diff --git a/drivers/net/usb/net1080.c b/drivers/net/usb/net1080.c index 961a8ed38d8f..ba72a7281cb0 100644 --- a/drivers/net/usb/net1080.c +++ b/drivers/net/usb/net1080.c | |||
@@ -64,13 +64,13 @@ struct nc_header { // packed: | |||
64 | // all else is optional, and must start with: | 64 | // all else is optional, and must start with: |
65 | // __le16 vendorId; // from usb-if | 65 | // __le16 vendorId; // from usb-if |
66 | // __le16 productId; | 66 | // __le16 productId; |
67 | } __attribute__((__packed__)); | 67 | } __packed; |
68 | 68 | ||
69 | #define PAD_BYTE ((unsigned char)0xAC) | 69 | #define PAD_BYTE ((unsigned char)0xAC) |
70 | 70 | ||
71 | struct nc_trailer { | 71 | struct nc_trailer { |
72 | __le16 packet_id; | 72 | __le16 packet_id; |
73 | } __attribute__((__packed__)); | 73 | } __packed; |
74 | 74 | ||
75 | // packets may use FLAG_FRAMING_NC and optional pad | 75 | // packets may use FLAG_FRAMING_NC and optional pad |
76 | #define FRAMED_SIZE(mtu) (sizeof (struct nc_header) \ | 76 | #define FRAMED_SIZE(mtu) (sizeof (struct nc_header) \ |
diff --git a/drivers/net/usb/sierra_net.c b/drivers/net/usb/sierra_net.c index f1942d69a0d5..ee85c8b9a858 100644 --- a/drivers/net/usb/sierra_net.c +++ b/drivers/net/usb/sierra_net.c | |||
@@ -165,7 +165,7 @@ struct lsi_umts { | |||
165 | u8 gw_addr_len; /* NW-supplied GW address len */ | 165 | u8 gw_addr_len; /* NW-supplied GW address len */ |
166 | u8 gw_addr[16]; /* NW-supplied GW address (bigendian) */ | 166 | u8 gw_addr[16]; /* NW-supplied GW address (bigendian) */ |
167 | u8 reserved[8]; | 167 | u8 reserved[8]; |
168 | } __attribute__ ((packed)); | 168 | } __packed; |
169 | 169 | ||
170 | #define SIERRA_NET_LSI_COMMON_LEN 4 | 170 | #define SIERRA_NET_LSI_COMMON_LEN 4 |
171 | #define SIERRA_NET_LSI_UMTS_LEN (sizeof(struct lsi_umts)) | 171 | #define SIERRA_NET_LSI_UMTS_LEN (sizeof(struct lsi_umts)) |
diff --git a/drivers/net/via-velocity.h b/drivers/net/via-velocity.h index c38191179fae..f7b33ae7a703 100644 --- a/drivers/net/via-velocity.h +++ b/drivers/net/via-velocity.h | |||
@@ -193,7 +193,7 @@ struct rx_desc { | |||
193 | __le32 pa_low; /* Low 32 bit PCI address */ | 193 | __le32 pa_low; /* Low 32 bit PCI address */ |
194 | __le16 pa_high; /* Next 16 bit PCI address (48 total) */ | 194 | __le16 pa_high; /* Next 16 bit PCI address (48 total) */ |
195 | __le16 size; /* bits 0--14 - frame size, bit 15 - enable int. */ | 195 | __le16 size; /* bits 0--14 - frame size, bit 15 - enable int. */ |
196 | } __attribute__ ((__packed__)); | 196 | } __packed; |
197 | 197 | ||
198 | /* | 198 | /* |
199 | * Transmit descriptor | 199 | * Transmit descriptor |
@@ -208,7 +208,7 @@ struct tdesc1 { | |||
208 | __le16 vlan; | 208 | __le16 vlan; |
209 | u8 TCR; | 209 | u8 TCR; |
210 | u8 cmd; /* bits 0--1 - TCPLS, bits 4--7 - CMDZ */ | 210 | u8 cmd; /* bits 0--1 - TCPLS, bits 4--7 - CMDZ */ |
211 | } __attribute__ ((__packed__)); | 211 | } __packed; |
212 | 212 | ||
213 | enum { | 213 | enum { |
214 | TD_QUEUE = cpu_to_le16(0x8000) | 214 | TD_QUEUE = cpu_to_le16(0x8000) |
@@ -218,7 +218,7 @@ struct td_buf { | |||
218 | __le32 pa_low; | 218 | __le32 pa_low; |
219 | __le16 pa_high; | 219 | __le16 pa_high; |
220 | __le16 size; /* bits 0--13 - size, bit 15 - queue */ | 220 | __le16 size; /* bits 0--13 - size, bit 15 - queue */ |
221 | } __attribute__ ((__packed__)); | 221 | } __packed; |
222 | 222 | ||
223 | struct tx_desc { | 223 | struct tx_desc { |
224 | struct tdesc0 tdesc0; | 224 | struct tdesc0 tdesc0; |
@@ -1096,7 +1096,7 @@ struct mac_regs { | |||
1096 | 1096 | ||
1097 | volatile __le16 PatternCRC[8]; /* 0xB0 */ | 1097 | volatile __le16 PatternCRC[8]; /* 0xB0 */ |
1098 | volatile __le32 ByteMask[4][4]; /* 0xC0 */ | 1098 | volatile __le32 ByteMask[4][4]; /* 0xC0 */ |
1099 | } __attribute__ ((__packed__)); | 1099 | } __packed; |
1100 | 1100 | ||
1101 | 1101 | ||
1102 | enum hw_mib { | 1102 | enum hw_mib { |
@@ -1216,7 +1216,7 @@ struct arp_packet { | |||
1216 | u8 ar_sip[4]; | 1216 | u8 ar_sip[4]; |
1217 | u8 ar_tha[ETH_ALEN]; | 1217 | u8 ar_tha[ETH_ALEN]; |
1218 | u8 ar_tip[4]; | 1218 | u8 ar_tip[4]; |
1219 | } __attribute__ ((__packed__)); | 1219 | } __packed; |
1220 | 1220 | ||
1221 | struct _magic_packet { | 1221 | struct _magic_packet { |
1222 | u8 dest_mac[6]; | 1222 | u8 dest_mac[6]; |
@@ -1224,7 +1224,7 @@ struct _magic_packet { | |||
1224 | __be16 type; | 1224 | __be16 type; |
1225 | u8 MAC[16][6]; | 1225 | u8 MAC[16][6]; |
1226 | u8 password[6]; | 1226 | u8 password[6]; |
1227 | } __attribute__ ((__packed__)); | 1227 | } __packed; |
1228 | 1228 | ||
1229 | /* | 1229 | /* |
1230 | * Store for chip context when saving and restoring status. Not | 1230 | * Store for chip context when saving and restoring status. Not |
diff --git a/drivers/net/vxge/vxge-main.c b/drivers/net/vxge/vxge-main.c index b504bd561362..45c5dc225631 100644 --- a/drivers/net/vxge/vxge-main.c +++ b/drivers/net/vxge/vxge-main.c | |||
@@ -4012,7 +4012,7 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre) | |||
4012 | int high_dma = 0; | 4012 | int high_dma = 0; |
4013 | u64 vpath_mask = 0; | 4013 | u64 vpath_mask = 0; |
4014 | struct vxgedev *vdev; | 4014 | struct vxgedev *vdev; |
4015 | struct vxge_config ll_config; | 4015 | struct vxge_config *ll_config = NULL; |
4016 | struct vxge_hw_device_config *device_config = NULL; | 4016 | struct vxge_hw_device_config *device_config = NULL; |
4017 | struct vxge_hw_device_attr attr; | 4017 | struct vxge_hw_device_attr attr; |
4018 | int i, j, no_of_vpath = 0, max_vpath_supported = 0; | 4018 | int i, j, no_of_vpath = 0, max_vpath_supported = 0; |
@@ -4071,17 +4071,24 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre) | |||
4071 | goto _exit0; | 4071 | goto _exit0; |
4072 | } | 4072 | } |
4073 | 4073 | ||
4074 | memset(&ll_config, 0, sizeof(struct vxge_config)); | 4074 | ll_config = kzalloc(sizeof(*ll_config), GFP_KERNEL); |
4075 | ll_config.tx_steering_type = TX_MULTIQ_STEERING; | 4075 | if (!ll_config) { |
4076 | ll_config.intr_type = MSI_X; | 4076 | ret = -ENOMEM; |
4077 | ll_config.napi_weight = NEW_NAPI_WEIGHT; | 4077 | vxge_debug_init(VXGE_ERR, |
4078 | ll_config.rth_steering = RTH_STEERING; | 4078 | "ll_config : malloc failed %s %d", |
4079 | __FILE__, __LINE__); | ||
4080 | goto _exit0; | ||
4081 | } | ||
4082 | ll_config->tx_steering_type = TX_MULTIQ_STEERING; | ||
4083 | ll_config->intr_type = MSI_X; | ||
4084 | ll_config->napi_weight = NEW_NAPI_WEIGHT; | ||
4085 | ll_config->rth_steering = RTH_STEERING; | ||
4079 | 4086 | ||
4080 | /* get the default configuration parameters */ | 4087 | /* get the default configuration parameters */ |
4081 | vxge_hw_device_config_default_get(device_config); | 4088 | vxge_hw_device_config_default_get(device_config); |
4082 | 4089 | ||
4083 | /* initialize configuration parameters */ | 4090 | /* initialize configuration parameters */ |
4084 | vxge_device_config_init(device_config, &ll_config.intr_type); | 4091 | vxge_device_config_init(device_config, &ll_config->intr_type); |
4085 | 4092 | ||
4086 | ret = pci_enable_device(pdev); | 4093 | ret = pci_enable_device(pdev); |
4087 | if (ret) { | 4094 | if (ret) { |
@@ -4134,7 +4141,7 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre) | |||
4134 | (unsigned long long)pci_resource_start(pdev, 0)); | 4141 | (unsigned long long)pci_resource_start(pdev, 0)); |
4135 | 4142 | ||
4136 | status = vxge_hw_device_hw_info_get(attr.bar0, | 4143 | status = vxge_hw_device_hw_info_get(attr.bar0, |
4137 | &ll_config.device_hw_info); | 4144 | &ll_config->device_hw_info); |
4138 | if (status != VXGE_HW_OK) { | 4145 | if (status != VXGE_HW_OK) { |
4139 | vxge_debug_init(VXGE_ERR, | 4146 | vxge_debug_init(VXGE_ERR, |
4140 | "%s: Reading of hardware info failed." | 4147 | "%s: Reading of hardware info failed." |
@@ -4143,7 +4150,7 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre) | |||
4143 | goto _exit3; | 4150 | goto _exit3; |
4144 | } | 4151 | } |
4145 | 4152 | ||
4146 | if (ll_config.device_hw_info.fw_version.major != | 4153 | if (ll_config->device_hw_info.fw_version.major != |
4147 | VXGE_DRIVER_FW_VERSION_MAJOR) { | 4154 | VXGE_DRIVER_FW_VERSION_MAJOR) { |
4148 | vxge_debug_init(VXGE_ERR, | 4155 | vxge_debug_init(VXGE_ERR, |
4149 | "%s: Incorrect firmware version." | 4156 | "%s: Incorrect firmware version." |
@@ -4153,7 +4160,7 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre) | |||
4153 | goto _exit3; | 4160 | goto _exit3; |
4154 | } | 4161 | } |
4155 | 4162 | ||
4156 | vpath_mask = ll_config.device_hw_info.vpath_mask; | 4163 | vpath_mask = ll_config->device_hw_info.vpath_mask; |
4157 | if (vpath_mask == 0) { | 4164 | if (vpath_mask == 0) { |
4158 | vxge_debug_ll_config(VXGE_TRACE, | 4165 | vxge_debug_ll_config(VXGE_TRACE, |
4159 | "%s: No vpaths available in device", VXGE_DRIVER_NAME); | 4166 | "%s: No vpaths available in device", VXGE_DRIVER_NAME); |
@@ -4165,10 +4172,10 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre) | |||
4165 | "%s:%d Vpath mask = %llx", __func__, __LINE__, | 4172 | "%s:%d Vpath mask = %llx", __func__, __LINE__, |
4166 | (unsigned long long)vpath_mask); | 4173 | (unsigned long long)vpath_mask); |
4167 | 4174 | ||
4168 | function_mode = ll_config.device_hw_info.function_mode; | 4175 | function_mode = ll_config->device_hw_info.function_mode; |
4169 | host_type = ll_config.device_hw_info.host_type; | 4176 | host_type = ll_config->device_hw_info.host_type; |
4170 | is_privileged = __vxge_hw_device_is_privilaged(host_type, | 4177 | is_privileged = __vxge_hw_device_is_privilaged(host_type, |
4171 | ll_config.device_hw_info.func_id); | 4178 | ll_config->device_hw_info.func_id); |
4172 | 4179 | ||
4173 | /* Check how many vpaths are available */ | 4180 | /* Check how many vpaths are available */ |
4174 | for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { | 4181 | for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { |
@@ -4182,7 +4189,7 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre) | |||
4182 | 4189 | ||
4183 | /* Enable SRIOV mode, if firmware has SRIOV support and if it is a PF */ | 4190 | /* Enable SRIOV mode, if firmware has SRIOV support and if it is a PF */ |
4184 | if (is_sriov(function_mode) && (max_config_dev > 1) && | 4191 | if (is_sriov(function_mode) && (max_config_dev > 1) && |
4185 | (ll_config.intr_type != INTA) && | 4192 | (ll_config->intr_type != INTA) && |
4186 | (is_privileged == VXGE_HW_OK)) { | 4193 | (is_privileged == VXGE_HW_OK)) { |
4187 | ret = pci_enable_sriov(pdev, ((max_config_dev - 1) < num_vfs) | 4194 | ret = pci_enable_sriov(pdev, ((max_config_dev - 1) < num_vfs) |
4188 | ? (max_config_dev - 1) : num_vfs); | 4195 | ? (max_config_dev - 1) : num_vfs); |
@@ -4195,7 +4202,7 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre) | |||
4195 | * Configure vpaths and get driver configured number of vpaths | 4202 | * Configure vpaths and get driver configured number of vpaths |
4196 | * which is less than or equal to the maximum vpaths per function. | 4203 | * which is less than or equal to the maximum vpaths per function. |
4197 | */ | 4204 | */ |
4198 | no_of_vpath = vxge_config_vpaths(device_config, vpath_mask, &ll_config); | 4205 | no_of_vpath = vxge_config_vpaths(device_config, vpath_mask, ll_config); |
4199 | if (!no_of_vpath) { | 4206 | if (!no_of_vpath) { |
4200 | vxge_debug_ll_config(VXGE_ERR, | 4207 | vxge_debug_ll_config(VXGE_ERR, |
4201 | "%s: No more vpaths to configure", VXGE_DRIVER_NAME); | 4208 | "%s: No more vpaths to configure", VXGE_DRIVER_NAME); |
@@ -4230,21 +4237,21 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre) | |||
4230 | /* set private device info */ | 4237 | /* set private device info */ |
4231 | pci_set_drvdata(pdev, hldev); | 4238 | pci_set_drvdata(pdev, hldev); |
4232 | 4239 | ||
4233 | ll_config.gro_enable = VXGE_GRO_ALWAYS_AGGREGATE; | 4240 | ll_config->gro_enable = VXGE_GRO_ALWAYS_AGGREGATE; |
4234 | ll_config.fifo_indicate_max_pkts = VXGE_FIFO_INDICATE_MAX_PKTS; | 4241 | ll_config->fifo_indicate_max_pkts = VXGE_FIFO_INDICATE_MAX_PKTS; |
4235 | ll_config.addr_learn_en = addr_learn_en; | 4242 | ll_config->addr_learn_en = addr_learn_en; |
4236 | ll_config.rth_algorithm = RTH_ALG_JENKINS; | 4243 | ll_config->rth_algorithm = RTH_ALG_JENKINS; |
4237 | ll_config.rth_hash_type_tcpipv4 = VXGE_HW_RING_HASH_TYPE_TCP_IPV4; | 4244 | ll_config->rth_hash_type_tcpipv4 = VXGE_HW_RING_HASH_TYPE_TCP_IPV4; |
4238 | ll_config.rth_hash_type_ipv4 = VXGE_HW_RING_HASH_TYPE_NONE; | 4245 | ll_config->rth_hash_type_ipv4 = VXGE_HW_RING_HASH_TYPE_NONE; |
4239 | ll_config.rth_hash_type_tcpipv6 = VXGE_HW_RING_HASH_TYPE_NONE; | 4246 | ll_config->rth_hash_type_tcpipv6 = VXGE_HW_RING_HASH_TYPE_NONE; |
4240 | ll_config.rth_hash_type_ipv6 = VXGE_HW_RING_HASH_TYPE_NONE; | 4247 | ll_config->rth_hash_type_ipv6 = VXGE_HW_RING_HASH_TYPE_NONE; |
4241 | ll_config.rth_hash_type_tcpipv6ex = VXGE_HW_RING_HASH_TYPE_NONE; | 4248 | ll_config->rth_hash_type_tcpipv6ex = VXGE_HW_RING_HASH_TYPE_NONE; |
4242 | ll_config.rth_hash_type_ipv6ex = VXGE_HW_RING_HASH_TYPE_NONE; | 4249 | ll_config->rth_hash_type_ipv6ex = VXGE_HW_RING_HASH_TYPE_NONE; |
4243 | ll_config.rth_bkt_sz = RTH_BUCKET_SIZE; | 4250 | ll_config->rth_bkt_sz = RTH_BUCKET_SIZE; |
4244 | ll_config.tx_pause_enable = VXGE_PAUSE_CTRL_ENABLE; | 4251 | ll_config->tx_pause_enable = VXGE_PAUSE_CTRL_ENABLE; |
4245 | ll_config.rx_pause_enable = VXGE_PAUSE_CTRL_ENABLE; | 4252 | ll_config->rx_pause_enable = VXGE_PAUSE_CTRL_ENABLE; |
4246 | 4253 | ||
4247 | if (vxge_device_register(hldev, &ll_config, high_dma, no_of_vpath, | 4254 | if (vxge_device_register(hldev, ll_config, high_dma, no_of_vpath, |
4248 | &vdev)) { | 4255 | &vdev)) { |
4249 | ret = -EINVAL; | 4256 | ret = -EINVAL; |
4250 | goto _exit4; | 4257 | goto _exit4; |
@@ -4275,7 +4282,7 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre) | |||
4275 | vdev->vpaths[j].vdev = vdev; | 4282 | vdev->vpaths[j].vdev = vdev; |
4276 | vdev->vpaths[j].max_mac_addr_cnt = max_mac_vpath; | 4283 | vdev->vpaths[j].max_mac_addr_cnt = max_mac_vpath; |
4277 | memcpy((u8 *)vdev->vpaths[j].macaddr, | 4284 | memcpy((u8 *)vdev->vpaths[j].macaddr, |
4278 | (u8 *)ll_config.device_hw_info.mac_addrs[i], | 4285 | ll_config->device_hw_info.mac_addrs[i], |
4279 | ETH_ALEN); | 4286 | ETH_ALEN); |
4280 | 4287 | ||
4281 | /* Initialize the mac address list header */ | 4288 | /* Initialize the mac address list header */ |
@@ -4296,18 +4303,18 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre) | |||
4296 | 4303 | ||
4297 | macaddr = (u8 *)vdev->vpaths[0].macaddr; | 4304 | macaddr = (u8 *)vdev->vpaths[0].macaddr; |
4298 | 4305 | ||
4299 | ll_config.device_hw_info.serial_number[VXGE_HW_INFO_LEN - 1] = '\0'; | 4306 | ll_config->device_hw_info.serial_number[VXGE_HW_INFO_LEN - 1] = '\0'; |
4300 | ll_config.device_hw_info.product_desc[VXGE_HW_INFO_LEN - 1] = '\0'; | 4307 | ll_config->device_hw_info.product_desc[VXGE_HW_INFO_LEN - 1] = '\0'; |
4301 | ll_config.device_hw_info.part_number[VXGE_HW_INFO_LEN - 1] = '\0'; | 4308 | ll_config->device_hw_info.part_number[VXGE_HW_INFO_LEN - 1] = '\0'; |
4302 | 4309 | ||
4303 | vxge_debug_init(VXGE_TRACE, "%s: SERIAL NUMBER: %s", | 4310 | vxge_debug_init(VXGE_TRACE, "%s: SERIAL NUMBER: %s", |
4304 | vdev->ndev->name, ll_config.device_hw_info.serial_number); | 4311 | vdev->ndev->name, ll_config->device_hw_info.serial_number); |
4305 | 4312 | ||
4306 | vxge_debug_init(VXGE_TRACE, "%s: PART NUMBER: %s", | 4313 | vxge_debug_init(VXGE_TRACE, "%s: PART NUMBER: %s", |
4307 | vdev->ndev->name, ll_config.device_hw_info.part_number); | 4314 | vdev->ndev->name, ll_config->device_hw_info.part_number); |
4308 | 4315 | ||
4309 | vxge_debug_init(VXGE_TRACE, "%s: Neterion %s Server Adapter", | 4316 | vxge_debug_init(VXGE_TRACE, "%s: Neterion %s Server Adapter", |
4310 | vdev->ndev->name, ll_config.device_hw_info.product_desc); | 4317 | vdev->ndev->name, ll_config->device_hw_info.product_desc); |
4311 | 4318 | ||
4312 | vxge_debug_init(VXGE_TRACE, "%s: MAC ADDR: %pM", | 4319 | vxge_debug_init(VXGE_TRACE, "%s: MAC ADDR: %pM", |
4313 | vdev->ndev->name, macaddr); | 4320 | vdev->ndev->name, macaddr); |
@@ -4317,11 +4324,11 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre) | |||
4317 | 4324 | ||
4318 | vxge_debug_init(VXGE_TRACE, | 4325 | vxge_debug_init(VXGE_TRACE, |
4319 | "%s: Firmware version : %s Date : %s", vdev->ndev->name, | 4326 | "%s: Firmware version : %s Date : %s", vdev->ndev->name, |
4320 | ll_config.device_hw_info.fw_version.version, | 4327 | ll_config->device_hw_info.fw_version.version, |
4321 | ll_config.device_hw_info.fw_date.date); | 4328 | ll_config->device_hw_info.fw_date.date); |
4322 | 4329 | ||
4323 | if (new_device) { | 4330 | if (new_device) { |
4324 | switch (ll_config.device_hw_info.function_mode) { | 4331 | switch (ll_config->device_hw_info.function_mode) { |
4325 | case VXGE_HW_FUNCTION_MODE_SINGLE_FUNCTION: | 4332 | case VXGE_HW_FUNCTION_MODE_SINGLE_FUNCTION: |
4326 | vxge_debug_init(VXGE_TRACE, | 4333 | vxge_debug_init(VXGE_TRACE, |
4327 | "%s: Single Function Mode Enabled", vdev->ndev->name); | 4334 | "%s: Single Function Mode Enabled", vdev->ndev->name); |
@@ -4344,7 +4351,7 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre) | |||
4344 | vxge_print_parm(vdev, vpath_mask); | 4351 | vxge_print_parm(vdev, vpath_mask); |
4345 | 4352 | ||
4346 | /* Store the fw version for ethttool option */ | 4353 | /* Store the fw version for ethttool option */ |
4347 | strcpy(vdev->fw_version, ll_config.device_hw_info.fw_version.version); | 4354 | strcpy(vdev->fw_version, ll_config->device_hw_info.fw_version.version); |
4348 | memcpy(vdev->ndev->dev_addr, (u8 *)vdev->vpaths[0].macaddr, ETH_ALEN); | 4355 | memcpy(vdev->ndev->dev_addr, (u8 *)vdev->vpaths[0].macaddr, ETH_ALEN); |
4349 | memcpy(vdev->ndev->perm_addr, vdev->ndev->dev_addr, ETH_ALEN); | 4356 | memcpy(vdev->ndev->perm_addr, vdev->ndev->dev_addr, ETH_ALEN); |
4350 | 4357 | ||
@@ -4383,7 +4390,7 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre) | |||
4383 | * present to prevent such a failure. | 4390 | * present to prevent such a failure. |
4384 | */ | 4391 | */ |
4385 | 4392 | ||
4386 | if (ll_config.device_hw_info.function_mode == | 4393 | if (ll_config->device_hw_info.function_mode == |
4387 | VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION) | 4394 | VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION) |
4388 | if (vdev->config.intr_type == INTA) | 4395 | if (vdev->config.intr_type == INTA) |
4389 | vxge_hw_device_unmask_all(hldev); | 4396 | vxge_hw_device_unmask_all(hldev); |
@@ -4395,6 +4402,7 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre) | |||
4395 | VXGE_COPY_DEBUG_INFO_TO_LL(vdev, vxge_hw_device_error_level_get(hldev), | 4402 | VXGE_COPY_DEBUG_INFO_TO_LL(vdev, vxge_hw_device_error_level_get(hldev), |
4396 | vxge_hw_device_trace_level_get(hldev)); | 4403 | vxge_hw_device_trace_level_get(hldev)); |
4397 | 4404 | ||
4405 | kfree(ll_config); | ||
4398 | return 0; | 4406 | return 0; |
4399 | 4407 | ||
4400 | _exit5: | 4408 | _exit5: |
@@ -4412,6 +4420,7 @@ _exit2: | |||
4412 | _exit1: | 4420 | _exit1: |
4413 | pci_disable_device(pdev); | 4421 | pci_disable_device(pdev); |
4414 | _exit0: | 4422 | _exit0: |
4423 | kfree(ll_config); | ||
4415 | kfree(device_config); | 4424 | kfree(device_config); |
4416 | driver_config->config_dev_cnt--; | 4425 | driver_config->config_dev_cnt--; |
4417 | pci_set_drvdata(pdev, NULL); | 4426 | pci_set_drvdata(pdev, NULL); |
diff --git a/drivers/net/wan/farsync.c b/drivers/net/wan/farsync.c index e087b9a6daaa..43b77271532b 100644 --- a/drivers/net/wan/farsync.c +++ b/drivers/net/wan/farsync.c | |||
@@ -2038,16 +2038,10 @@ fst_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) | |||
2038 | 2038 | ||
2039 | /* Now copy the data to the card. */ | 2039 | /* Now copy the data to the card. */ |
2040 | 2040 | ||
2041 | buf = kmalloc(wrthdr.size, GFP_KERNEL); | 2041 | buf = memdup_user(ifr->ifr_data + sizeof(struct fstioc_write), |
2042 | if (!buf) | 2042 | wrthdr.size); |
2043 | return -ENOMEM; | 2043 | if (IS_ERR(buf)) |
2044 | 2044 | return PTR_ERR(buf); | |
2045 | if (copy_from_user(buf, | ||
2046 | ifr->ifr_data + sizeof (struct fstioc_write), | ||
2047 | wrthdr.size)) { | ||
2048 | kfree(buf); | ||
2049 | return -EFAULT; | ||
2050 | } | ||
2051 | 2045 | ||
2052 | memcpy_toio(card->mem + wrthdr.offset, buf, wrthdr.size); | 2046 | memcpy_toio(card->mem + wrthdr.offset, buf, wrthdr.size); |
2053 | kfree(buf); | 2047 | kfree(buf); |
diff --git a/drivers/net/wan/hd64570.h b/drivers/net/wan/hd64570.h index 3839662ff201..e4f539ad071b 100644 --- a/drivers/net/wan/hd64570.h +++ b/drivers/net/wan/hd64570.h | |||
@@ -153,7 +153,7 @@ typedef struct { | |||
153 | u16 len; /* Data Length */ | 153 | u16 len; /* Data Length */ |
154 | u8 stat; /* Status */ | 154 | u8 stat; /* Status */ |
155 | u8 unused; /* pads to 2-byte boundary */ | 155 | u8 unused; /* pads to 2-byte boundary */ |
156 | }__attribute__ ((packed)) pkt_desc; | 156 | }__packed pkt_desc; |
157 | 157 | ||
158 | 158 | ||
159 | /* Packet Descriptor Status bits */ | 159 | /* Packet Descriptor Status bits */ |
diff --git a/drivers/net/wan/hdlc_cisco.c b/drivers/net/wan/hdlc_cisco.c index ee7083fbea50..b38ffa149aba 100644 --- a/drivers/net/wan/hdlc_cisco.c +++ b/drivers/net/wan/hdlc_cisco.c | |||
@@ -36,7 +36,7 @@ struct hdlc_header { | |||
36 | u8 address; | 36 | u8 address; |
37 | u8 control; | 37 | u8 control; |
38 | __be16 protocol; | 38 | __be16 protocol; |
39 | }__attribute__ ((packed)); | 39 | }__packed; |
40 | 40 | ||
41 | 41 | ||
42 | struct cisco_packet { | 42 | struct cisco_packet { |
@@ -45,7 +45,7 @@ struct cisco_packet { | |||
45 | __be32 par2; | 45 | __be32 par2; |
46 | __be16 rel; /* reliability */ | 46 | __be16 rel; /* reliability */ |
47 | __be32 time; | 47 | __be32 time; |
48 | }__attribute__ ((packed)); | 48 | }__packed; |
49 | #define CISCO_PACKET_LEN 18 | 49 | #define CISCO_PACKET_LEN 18 |
50 | #define CISCO_BIG_PACKET_LEN 20 | 50 | #define CISCO_BIG_PACKET_LEN 20 |
51 | 51 | ||
diff --git a/drivers/net/wan/hdlc_fr.c b/drivers/net/wan/hdlc_fr.c index 0e52993e2079..0edb535bb2b5 100644 --- a/drivers/net/wan/hdlc_fr.c +++ b/drivers/net/wan/hdlc_fr.c | |||
@@ -112,7 +112,7 @@ typedef struct { | |||
112 | unsigned de: 1; | 112 | unsigned de: 1; |
113 | unsigned ea2: 1; | 113 | unsigned ea2: 1; |
114 | #endif | 114 | #endif |
115 | }__attribute__ ((packed)) fr_hdr; | 115 | }__packed fr_hdr; |
116 | 116 | ||
117 | 117 | ||
118 | typedef struct pvc_device_struct { | 118 | typedef struct pvc_device_struct { |
diff --git a/drivers/net/wan/sdla.c b/drivers/net/wan/sdla.c index 43ae6f440bfb..f4125da2762f 100644 --- a/drivers/net/wan/sdla.c +++ b/drivers/net/wan/sdla.c | |||
@@ -330,7 +330,7 @@ struct _dlci_stat | |||
330 | { | 330 | { |
331 | short dlci; | 331 | short dlci; |
332 | char flags; | 332 | char flags; |
333 | } __attribute__((packed)); | 333 | } __packed; |
334 | 334 | ||
335 | struct _frad_stat | 335 | struct _frad_stat |
336 | { | 336 | { |
@@ -1211,14 +1211,9 @@ static int sdla_xfer(struct net_device *dev, struct sdla_mem __user *info, int r | |||
1211 | } | 1211 | } |
1212 | else | 1212 | else |
1213 | { | 1213 | { |
1214 | temp = kmalloc(mem.len, GFP_KERNEL); | 1214 | temp = memdup_user(mem.data, mem.len); |
1215 | if (!temp) | 1215 | if (IS_ERR(temp)) |
1216 | return(-ENOMEM); | 1216 | return PTR_ERR(temp); |
1217 | if(copy_from_user(temp, mem.data, mem.len)) | ||
1218 | { | ||
1219 | kfree(temp); | ||
1220 | return -EFAULT; | ||
1221 | } | ||
1222 | sdla_write(dev, mem.addr, temp, mem.len); | 1217 | sdla_write(dev, mem.addr, temp, mem.len); |
1223 | kfree(temp); | 1218 | kfree(temp); |
1224 | } | 1219 | } |
diff --git a/drivers/net/wimax/i2400m/control.c b/drivers/net/wimax/i2400m/control.c index d86e8f31e7fc..2f725d0cc762 100644 --- a/drivers/net/wimax/i2400m/control.c +++ b/drivers/net/wimax/i2400m/control.c | |||
@@ -848,7 +848,7 @@ struct i2400m_cmd_enter_power_save { | |||
848 | struct i2400m_l3l4_hdr hdr; | 848 | struct i2400m_l3l4_hdr hdr; |
849 | struct i2400m_tlv_hdr tlv; | 849 | struct i2400m_tlv_hdr tlv; |
850 | __le32 val; | 850 | __le32 val; |
851 | } __attribute__((packed)); | 851 | } __packed; |
852 | 852 | ||
853 | 853 | ||
854 | /* | 854 | /* |
diff --git a/drivers/net/wimax/i2400m/fw.c b/drivers/net/wimax/i2400m/fw.c index 3f283bff0ff7..e9b34b0cb197 100644 --- a/drivers/net/wimax/i2400m/fw.c +++ b/drivers/net/wimax/i2400m/fw.c | |||
@@ -651,7 +651,7 @@ static int i2400m_download_chunk(struct i2400m *i2400m, const void *chunk, | |||
651 | struct { | 651 | struct { |
652 | struct i2400m_bootrom_header cmd; | 652 | struct i2400m_bootrom_header cmd; |
653 | u8 cmd_payload[chunk_len]; | 653 | u8 cmd_payload[chunk_len]; |
654 | } __attribute__((packed)) *buf; | 654 | } __packed *buf; |
655 | struct i2400m_bootrom_header ack; | 655 | struct i2400m_bootrom_header ack; |
656 | 656 | ||
657 | d_fnstart(5, dev, "(i2400m %p chunk %p __chunk_len %zu addr 0x%08lx " | 657 | d_fnstart(5, dev, "(i2400m %p chunk %p __chunk_len %zu addr 0x%08lx " |
@@ -794,7 +794,7 @@ int i2400m_dnload_finalize(struct i2400m *i2400m, | |||
794 | struct { | 794 | struct { |
795 | struct i2400m_bootrom_header cmd; | 795 | struct i2400m_bootrom_header cmd; |
796 | u8 cmd_pl[0]; | 796 | u8 cmd_pl[0]; |
797 | } __attribute__((packed)) *cmd_buf; | 797 | } __packed *cmd_buf; |
798 | size_t signature_block_offset, signature_block_size; | 798 | size_t signature_block_offset, signature_block_size; |
799 | 799 | ||
800 | d_fnstart(3, dev, "offset %zu\n", offset); | 800 | d_fnstart(3, dev, "offset %zu\n", offset); |
@@ -1029,7 +1029,7 @@ int i2400m_read_mac_addr(struct i2400m *i2400m) | |||
1029 | struct { | 1029 | struct { |
1030 | struct i2400m_bootrom_header ack; | 1030 | struct i2400m_bootrom_header ack; |
1031 | u8 ack_pl[16]; | 1031 | u8 ack_pl[16]; |
1032 | } __attribute__((packed)) ack_buf; | 1032 | } __packed ack_buf; |
1033 | 1033 | ||
1034 | d_fnstart(5, dev, "(i2400m %p)\n", i2400m); | 1034 | d_fnstart(5, dev, "(i2400m %p)\n", i2400m); |
1035 | cmd = i2400m->bm_cmd_buf; | 1035 | cmd = i2400m->bm_cmd_buf; |
@@ -1115,7 +1115,7 @@ int i2400m_dnload_init_signed(struct i2400m *i2400m, | |||
1115 | struct { | 1115 | struct { |
1116 | struct i2400m_bootrom_header cmd; | 1116 | struct i2400m_bootrom_header cmd; |
1117 | struct i2400m_bcf_hdr cmd_pl; | 1117 | struct i2400m_bcf_hdr cmd_pl; |
1118 | } __attribute__((packed)) *cmd_buf; | 1118 | } __packed *cmd_buf; |
1119 | struct i2400m_bootrom_header ack; | 1119 | struct i2400m_bootrom_header ack; |
1120 | 1120 | ||
1121 | d_fnstart(5, dev, "(i2400m %p bcf_hdr %p)\n", i2400m, bcf_hdr); | 1121 | d_fnstart(5, dev, "(i2400m %p bcf_hdr %p)\n", i2400m, bcf_hdr); |
diff --git a/drivers/net/wimax/i2400m/op-rfkill.c b/drivers/net/wimax/i2400m/op-rfkill.c index 035e4cf3e6ed..9e02b90b0080 100644 --- a/drivers/net/wimax/i2400m/op-rfkill.c +++ b/drivers/net/wimax/i2400m/op-rfkill.c | |||
@@ -91,7 +91,7 @@ int i2400m_op_rfkill_sw_toggle(struct wimax_dev *wimax_dev, | |||
91 | struct { | 91 | struct { |
92 | struct i2400m_l3l4_hdr hdr; | 92 | struct i2400m_l3l4_hdr hdr; |
93 | struct i2400m_tlv_rf_operation sw_rf; | 93 | struct i2400m_tlv_rf_operation sw_rf; |
94 | } __attribute__((packed)) *cmd; | 94 | } __packed *cmd; |
95 | char strerr[32]; | 95 | char strerr[32]; |
96 | 96 | ||
97 | d_fnstart(4, dev, "(wimax_dev %p state %d)\n", wimax_dev, state); | 97 | d_fnstart(4, dev, "(wimax_dev %p state %d)\n", wimax_dev, state); |
diff --git a/drivers/net/wireless/adm8211.h b/drivers/net/wireless/adm8211.h index b07e4d3a6b4d..bbc10b1cde87 100644 --- a/drivers/net/wireless/adm8211.h +++ b/drivers/net/wireless/adm8211.h | |||
@@ -80,7 +80,7 @@ struct adm8211_csr { | |||
80 | __le32 FEMR; /* 0x104 */ | 80 | __le32 FEMR; /* 0x104 */ |
81 | __le32 FPSR; /* 0x108 */ | 81 | __le32 FPSR; /* 0x108 */ |
82 | __le32 FFER; /* 0x10C */ | 82 | __le32 FFER; /* 0x10C */ |
83 | } __attribute__ ((packed)); | 83 | } __packed; |
84 | 84 | ||
85 | /* CSR0 - PAR (PCI Address Register) */ | 85 | /* CSR0 - PAR (PCI Address Register) */ |
86 | #define ADM8211_PAR_MWIE (1 << 24) | 86 | #define ADM8211_PAR_MWIE (1 << 24) |
@@ -484,7 +484,7 @@ struct adm8211_tx_hdr { | |||
484 | u8 entry_control; // huh?? | 484 | u8 entry_control; // huh?? |
485 | u16 reserved_1; | 485 | u16 reserved_1; |
486 | u32 reserved_2; | 486 | u32 reserved_2; |
487 | } __attribute__ ((packed)); | 487 | } __packed; |
488 | 488 | ||
489 | 489 | ||
490 | #define RX_COPY_BREAK 128 | 490 | #define RX_COPY_BREAK 128 |
@@ -531,7 +531,7 @@ struct adm8211_eeprom { | |||
531 | u8 lnags_threshold[14]; /* 0x70 */ | 531 | u8 lnags_threshold[14]; /* 0x70 */ |
532 | __le16 checksum; /* 0x7E */ | 532 | __le16 checksum; /* 0x7E */ |
533 | u8 cis_data[0]; /* 0x80, 384 bytes */ | 533 | u8 cis_data[0]; /* 0x80, 384 bytes */ |
534 | } __attribute__ ((packed)); | 534 | } __packed; |
535 | 535 | ||
536 | struct adm8211_priv { | 536 | struct adm8211_priv { |
537 | struct pci_dev *pdev; | 537 | struct pci_dev *pdev; |
diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c index 3b7ab20a5c54..6b605df8a923 100644 --- a/drivers/net/wireless/airo.c +++ b/drivers/net/wireless/airo.c | |||
@@ -506,20 +506,20 @@ struct WepKeyRid { | |||
506 | u8 mac[ETH_ALEN]; | 506 | u8 mac[ETH_ALEN]; |
507 | __le16 klen; | 507 | __le16 klen; |
508 | u8 key[16]; | 508 | u8 key[16]; |
509 | } __attribute__ ((packed)); | 509 | } __packed; |
510 | 510 | ||
511 | /* These structures are from the Aironet's PC4500 Developers Manual */ | 511 | /* These structures are from the Aironet's PC4500 Developers Manual */ |
512 | typedef struct Ssid Ssid; | 512 | typedef struct Ssid Ssid; |
513 | struct Ssid { | 513 | struct Ssid { |
514 | __le16 len; | 514 | __le16 len; |
515 | u8 ssid[32]; | 515 | u8 ssid[32]; |
516 | } __attribute__ ((packed)); | 516 | } __packed; |
517 | 517 | ||
518 | typedef struct SsidRid SsidRid; | 518 | typedef struct SsidRid SsidRid; |
519 | struct SsidRid { | 519 | struct SsidRid { |
520 | __le16 len; | 520 | __le16 len; |
521 | Ssid ssids[3]; | 521 | Ssid ssids[3]; |
522 | } __attribute__ ((packed)); | 522 | } __packed; |
523 | 523 | ||
524 | typedef struct ModulationRid ModulationRid; | 524 | typedef struct ModulationRid ModulationRid; |
525 | struct ModulationRid { | 525 | struct ModulationRid { |
@@ -528,7 +528,7 @@ struct ModulationRid { | |||
528 | #define MOD_DEFAULT cpu_to_le16(0) | 528 | #define MOD_DEFAULT cpu_to_le16(0) |
529 | #define MOD_CCK cpu_to_le16(1) | 529 | #define MOD_CCK cpu_to_le16(1) |
530 | #define MOD_MOK cpu_to_le16(2) | 530 | #define MOD_MOK cpu_to_le16(2) |
531 | } __attribute__ ((packed)); | 531 | } __packed; |
532 | 532 | ||
533 | typedef struct ConfigRid ConfigRid; | 533 | typedef struct ConfigRid ConfigRid; |
534 | struct ConfigRid { | 534 | struct ConfigRid { |
@@ -652,7 +652,7 @@ struct ConfigRid { | |||
652 | #define MAGIC_STAY_IN_CAM (1<<10) | 652 | #define MAGIC_STAY_IN_CAM (1<<10) |
653 | u8 magicControl; | 653 | u8 magicControl; |
654 | __le16 autoWake; | 654 | __le16 autoWake; |
655 | } __attribute__ ((packed)); | 655 | } __packed; |
656 | 656 | ||
657 | typedef struct StatusRid StatusRid; | 657 | typedef struct StatusRid StatusRid; |
658 | struct StatusRid { | 658 | struct StatusRid { |
@@ -711,20 +711,20 @@ struct StatusRid { | |||
711 | #define STAT_LEAPFAILED 91 | 711 | #define STAT_LEAPFAILED 91 |
712 | #define STAT_LEAPTIMEDOUT 92 | 712 | #define STAT_LEAPTIMEDOUT 92 |
713 | #define STAT_LEAPCOMPLETE 93 | 713 | #define STAT_LEAPCOMPLETE 93 |
714 | } __attribute__ ((packed)); | 714 | } __packed; |
715 | 715 | ||
716 | typedef struct StatsRid StatsRid; | 716 | typedef struct StatsRid StatsRid; |
717 | struct StatsRid { | 717 | struct StatsRid { |
718 | __le16 len; | 718 | __le16 len; |
719 | __le16 spacer; | 719 | __le16 spacer; |
720 | __le32 vals[100]; | 720 | __le32 vals[100]; |
721 | } __attribute__ ((packed)); | 721 | } __packed; |
722 | 722 | ||
723 | typedef struct APListRid APListRid; | 723 | typedef struct APListRid APListRid; |
724 | struct APListRid { | 724 | struct APListRid { |
725 | __le16 len; | 725 | __le16 len; |
726 | u8 ap[4][ETH_ALEN]; | 726 | u8 ap[4][ETH_ALEN]; |
727 | } __attribute__ ((packed)); | 727 | } __packed; |
728 | 728 | ||
729 | typedef struct CapabilityRid CapabilityRid; | 729 | typedef struct CapabilityRid CapabilityRid; |
730 | struct CapabilityRid { | 730 | struct CapabilityRid { |
@@ -754,7 +754,7 @@ struct CapabilityRid { | |||
754 | __le16 bootBlockVer; | 754 | __le16 bootBlockVer; |
755 | __le16 requiredHard; | 755 | __le16 requiredHard; |
756 | __le16 extSoftCap; | 756 | __le16 extSoftCap; |
757 | } __attribute__ ((packed)); | 757 | } __packed; |
758 | 758 | ||
759 | /* Only present on firmware >= 5.30.17 */ | 759 | /* Only present on firmware >= 5.30.17 */ |
760 | typedef struct BSSListRidExtra BSSListRidExtra; | 760 | typedef struct BSSListRidExtra BSSListRidExtra; |
@@ -762,7 +762,7 @@ struct BSSListRidExtra { | |||
762 | __le16 unknown[4]; | 762 | __le16 unknown[4]; |
763 | u8 fixed[12]; /* WLAN management frame */ | 763 | u8 fixed[12]; /* WLAN management frame */ |
764 | u8 iep[624]; | 764 | u8 iep[624]; |
765 | } __attribute__ ((packed)); | 765 | } __packed; |
766 | 766 | ||
767 | typedef struct BSSListRid BSSListRid; | 767 | typedef struct BSSListRid BSSListRid; |
768 | struct BSSListRid { | 768 | struct BSSListRid { |
@@ -796,7 +796,7 @@ struct BSSListRid { | |||
796 | 796 | ||
797 | /* Only present on firmware >= 5.30.17 */ | 797 | /* Only present on firmware >= 5.30.17 */ |
798 | BSSListRidExtra extra; | 798 | BSSListRidExtra extra; |
799 | } __attribute__ ((packed)); | 799 | } __packed; |
800 | 800 | ||
801 | typedef struct { | 801 | typedef struct { |
802 | BSSListRid bss; | 802 | BSSListRid bss; |
@@ -807,13 +807,13 @@ typedef struct tdsRssiEntry tdsRssiEntry; | |||
807 | struct tdsRssiEntry { | 807 | struct tdsRssiEntry { |
808 | u8 rssipct; | 808 | u8 rssipct; |
809 | u8 rssidBm; | 809 | u8 rssidBm; |
810 | } __attribute__ ((packed)); | 810 | } __packed; |
811 | 811 | ||
812 | typedef struct tdsRssiRid tdsRssiRid; | 812 | typedef struct tdsRssiRid tdsRssiRid; |
813 | struct tdsRssiRid { | 813 | struct tdsRssiRid { |
814 | u16 len; | 814 | u16 len; |
815 | tdsRssiEntry x[256]; | 815 | tdsRssiEntry x[256]; |
816 | } __attribute__ ((packed)); | 816 | } __packed; |
817 | 817 | ||
818 | typedef struct MICRid MICRid; | 818 | typedef struct MICRid MICRid; |
819 | struct MICRid { | 819 | struct MICRid { |
@@ -823,7 +823,7 @@ struct MICRid { | |||
823 | u8 multicast[16]; | 823 | u8 multicast[16]; |
824 | __le16 unicastValid; | 824 | __le16 unicastValid; |
825 | u8 unicast[16]; | 825 | u8 unicast[16]; |
826 | } __attribute__ ((packed)); | 826 | } __packed; |
827 | 827 | ||
828 | typedef struct MICBuffer MICBuffer; | 828 | typedef struct MICBuffer MICBuffer; |
829 | struct MICBuffer { | 829 | struct MICBuffer { |
@@ -841,7 +841,7 @@ struct MICBuffer { | |||
841 | } u; | 841 | } u; |
842 | __be32 mic; | 842 | __be32 mic; |
843 | __be32 seq; | 843 | __be32 seq; |
844 | } __attribute__ ((packed)); | 844 | } __packed; |
845 | 845 | ||
846 | typedef struct { | 846 | typedef struct { |
847 | u8 da[ETH_ALEN]; | 847 | u8 da[ETH_ALEN]; |
@@ -996,7 +996,7 @@ struct rx_hdr { | |||
996 | u8 rate; | 996 | u8 rate; |
997 | u8 freq; | 997 | u8 freq; |
998 | __le16 tmp[4]; | 998 | __le16 tmp[4]; |
999 | } __attribute__ ((packed)); | 999 | } __packed; |
1000 | 1000 | ||
1001 | typedef struct { | 1001 | typedef struct { |
1002 | unsigned int ctl: 15; | 1002 | unsigned int ctl: 15; |
diff --git a/drivers/net/wireless/at76c50x-usb.c b/drivers/net/wireless/at76c50x-usb.c index 8a2d4afc74f8..429b281d40d1 100644 --- a/drivers/net/wireless/at76c50x-usb.c +++ b/drivers/net/wireless/at76c50x-usb.c | |||
@@ -305,7 +305,7 @@ struct dfu_status { | |||
305 | unsigned char poll_timeout[3]; | 305 | unsigned char poll_timeout[3]; |
306 | unsigned char state; | 306 | unsigned char state; |
307 | unsigned char string; | 307 | unsigned char string; |
308 | } __attribute__((packed)); | 308 | } __packed; |
309 | 309 | ||
310 | static inline int at76_is_intersil(enum board_type board) | 310 | static inline int at76_is_intersil(enum board_type board) |
311 | { | 311 | { |
diff --git a/drivers/net/wireless/at76c50x-usb.h b/drivers/net/wireless/at76c50x-usb.h index 1ec5ccffdbc0..972ea0fc1a0b 100644 --- a/drivers/net/wireless/at76c50x-usb.h +++ b/drivers/net/wireless/at76c50x-usb.h | |||
@@ -99,7 +99,7 @@ struct hwcfg_r505 { | |||
99 | u8 reserved2[14]; | 99 | u8 reserved2[14]; |
100 | u8 cr15_values[14]; | 100 | u8 cr15_values[14]; |
101 | u8 reserved3[3]; | 101 | u8 reserved3[3]; |
102 | } __attribute__((packed)); | 102 | } __packed; |
103 | 103 | ||
104 | struct hwcfg_rfmd { | 104 | struct hwcfg_rfmd { |
105 | u8 cr20_values[14]; | 105 | u8 cr20_values[14]; |
@@ -111,7 +111,7 @@ struct hwcfg_rfmd { | |||
111 | u8 low_power_values[14]; | 111 | u8 low_power_values[14]; |
112 | u8 normal_power_values[14]; | 112 | u8 normal_power_values[14]; |
113 | u8 reserved1[3]; | 113 | u8 reserved1[3]; |
114 | } __attribute__((packed)); | 114 | } __packed; |
115 | 115 | ||
116 | struct hwcfg_intersil { | 116 | struct hwcfg_intersil { |
117 | u8 mac_addr[ETH_ALEN]; | 117 | u8 mac_addr[ETH_ALEN]; |
@@ -120,7 +120,7 @@ struct hwcfg_intersil { | |||
120 | u8 pidvid[4]; | 120 | u8 pidvid[4]; |
121 | u8 regulatory_domain; | 121 | u8 regulatory_domain; |
122 | u8 reserved[1]; | 122 | u8 reserved[1]; |
123 | } __attribute__((packed)); | 123 | } __packed; |
124 | 124 | ||
125 | union at76_hwcfg { | 125 | union at76_hwcfg { |
126 | struct hwcfg_intersil i; | 126 | struct hwcfg_intersil i; |
@@ -149,14 +149,14 @@ struct at76_card_config { | |||
149 | u8 ssid_len; | 149 | u8 ssid_len; |
150 | u8 short_preamble; | 150 | u8 short_preamble; |
151 | __le16 beacon_period; | 151 | __le16 beacon_period; |
152 | } __attribute__((packed)); | 152 | } __packed; |
153 | 153 | ||
154 | struct at76_command { | 154 | struct at76_command { |
155 | u8 cmd; | 155 | u8 cmd; |
156 | u8 reserved; | 156 | u8 reserved; |
157 | __le16 size; | 157 | __le16 size; |
158 | u8 data[0]; | 158 | u8 data[0]; |
159 | } __attribute__((packed)); | 159 | } __packed; |
160 | 160 | ||
161 | /* Length of Atmel-specific Rx header before 802.11 frame */ | 161 | /* Length of Atmel-specific Rx header before 802.11 frame */ |
162 | #define AT76_RX_HDRLEN offsetof(struct at76_rx_buffer, packet) | 162 | #define AT76_RX_HDRLEN offsetof(struct at76_rx_buffer, packet) |
@@ -171,7 +171,7 @@ struct at76_rx_buffer { | |||
171 | u8 noise_level; | 171 | u8 noise_level; |
172 | __le32 rx_time; | 172 | __le32 rx_time; |
173 | u8 packet[IEEE80211_MAX_FRAG_THRESHOLD]; | 173 | u8 packet[IEEE80211_MAX_FRAG_THRESHOLD]; |
174 | } __attribute__((packed)); | 174 | } __packed; |
175 | 175 | ||
176 | /* Length of Atmel-specific Tx header before 802.11 frame */ | 176 | /* Length of Atmel-specific Tx header before 802.11 frame */ |
177 | #define AT76_TX_HDRLEN offsetof(struct at76_tx_buffer, packet) | 177 | #define AT76_TX_HDRLEN offsetof(struct at76_tx_buffer, packet) |
@@ -182,7 +182,7 @@ struct at76_tx_buffer { | |||
182 | u8 padding; | 182 | u8 padding; |
183 | u8 reserved[4]; | 183 | u8 reserved[4]; |
184 | u8 packet[IEEE80211_MAX_FRAG_THRESHOLD]; | 184 | u8 packet[IEEE80211_MAX_FRAG_THRESHOLD]; |
185 | } __attribute__((packed)); | 185 | } __packed; |
186 | 186 | ||
187 | /* defines for scan_type below */ | 187 | /* defines for scan_type below */ |
188 | #define SCAN_TYPE_ACTIVE 0 | 188 | #define SCAN_TYPE_ACTIVE 0 |
@@ -198,7 +198,7 @@ struct at76_req_scan { | |||
198 | __le16 max_channel_time; | 198 | __le16 max_channel_time; |
199 | u8 essid_size; | 199 | u8 essid_size; |
200 | u8 international_scan; | 200 | u8 international_scan; |
201 | } __attribute__((packed)); | 201 | } __packed; |
202 | 202 | ||
203 | struct at76_req_ibss { | 203 | struct at76_req_ibss { |
204 | u8 bssid[ETH_ALEN]; | 204 | u8 bssid[ETH_ALEN]; |
@@ -207,7 +207,7 @@ struct at76_req_ibss { | |||
207 | u8 channel; | 207 | u8 channel; |
208 | u8 essid_size; | 208 | u8 essid_size; |
209 | u8 reserved[3]; | 209 | u8 reserved[3]; |
210 | } __attribute__((packed)); | 210 | } __packed; |
211 | 211 | ||
212 | struct at76_req_join { | 212 | struct at76_req_join { |
213 | u8 bssid[ETH_ALEN]; | 213 | u8 bssid[ETH_ALEN]; |
@@ -217,7 +217,7 @@ struct at76_req_join { | |||
217 | __le16 timeout; | 217 | __le16 timeout; |
218 | u8 essid_size; | 218 | u8 essid_size; |
219 | u8 reserved; | 219 | u8 reserved; |
220 | } __attribute__((packed)); | 220 | } __packed; |
221 | 221 | ||
222 | struct set_mib_buffer { | 222 | struct set_mib_buffer { |
223 | u8 type; | 223 | u8 type; |
@@ -229,7 +229,7 @@ struct set_mib_buffer { | |||
229 | __le16 word; | 229 | __le16 word; |
230 | u8 addr[ETH_ALEN]; | 230 | u8 addr[ETH_ALEN]; |
231 | } data; | 231 | } data; |
232 | } __attribute__((packed)); | 232 | } __packed; |
233 | 233 | ||
234 | struct mib_local { | 234 | struct mib_local { |
235 | u16 reserved0; | 235 | u16 reserved0; |
@@ -241,14 +241,14 @@ struct mib_local { | |||
241 | u16 reserved2; | 241 | u16 reserved2; |
242 | u8 preamble_type; | 242 | u8 preamble_type; |
243 | u16 reserved3; | 243 | u16 reserved3; |
244 | } __attribute__((packed)); | 244 | } __packed; |
245 | 245 | ||
246 | struct mib_mac_addr { | 246 | struct mib_mac_addr { |
247 | u8 mac_addr[ETH_ALEN]; | 247 | u8 mac_addr[ETH_ALEN]; |
248 | u8 res[2]; /* ??? */ | 248 | u8 res[2]; /* ??? */ |
249 | u8 group_addr[4][ETH_ALEN]; | 249 | u8 group_addr[4][ETH_ALEN]; |
250 | u8 group_addr_status[4]; | 250 | u8 group_addr_status[4]; |
251 | } __attribute__((packed)); | 251 | } __packed; |
252 | 252 | ||
253 | struct mib_mac { | 253 | struct mib_mac { |
254 | __le32 max_tx_msdu_lifetime; | 254 | __le32 max_tx_msdu_lifetime; |
@@ -269,7 +269,7 @@ struct mib_mac { | |||
269 | u8 desired_bssid[ETH_ALEN]; | 269 | u8 desired_bssid[ETH_ALEN]; |
270 | u8 desired_bsstype; /* ad-hoc or infrastructure */ | 270 | u8 desired_bsstype; /* ad-hoc or infrastructure */ |
271 | u8 reserved2; | 271 | u8 reserved2; |
272 | } __attribute__((packed)); | 272 | } __packed; |
273 | 273 | ||
274 | struct mib_mac_mgmt { | 274 | struct mib_mac_mgmt { |
275 | __le16 beacon_period; | 275 | __le16 beacon_period; |
@@ -292,7 +292,7 @@ struct mib_mac_mgmt { | |||
292 | u8 multi_domain_capability_enabled; | 292 | u8 multi_domain_capability_enabled; |
293 | u8 country_string[3]; | 293 | u8 country_string[3]; |
294 | u8 reserved[3]; | 294 | u8 reserved[3]; |
295 | } __attribute__((packed)); | 295 | } __packed; |
296 | 296 | ||
297 | struct mib_mac_wep { | 297 | struct mib_mac_wep { |
298 | u8 privacy_invoked; /* 0 disable encr., 1 enable encr */ | 298 | u8 privacy_invoked; /* 0 disable encr., 1 enable encr */ |
@@ -303,7 +303,7 @@ struct mib_mac_wep { | |||
303 | __le32 wep_excluded_count; | 303 | __le32 wep_excluded_count; |
304 | u8 wep_default_keyvalue[WEP_KEYS][WEP_LARGE_KEY_LEN]; | 304 | u8 wep_default_keyvalue[WEP_KEYS][WEP_LARGE_KEY_LEN]; |
305 | u8 encryption_level; /* 1 for 40bit, 2 for 104bit encryption */ | 305 | u8 encryption_level; /* 1 for 40bit, 2 for 104bit encryption */ |
306 | } __attribute__((packed)); | 306 | } __packed; |
307 | 307 | ||
308 | struct mib_phy { | 308 | struct mib_phy { |
309 | __le32 ed_threshold; | 309 | __le32 ed_threshold; |
@@ -320,19 +320,19 @@ struct mib_phy { | |||
320 | u8 current_cca_mode; | 320 | u8 current_cca_mode; |
321 | u8 phy_type; | 321 | u8 phy_type; |
322 | u8 current_reg_domain; | 322 | u8 current_reg_domain; |
323 | } __attribute__((packed)); | 323 | } __packed; |
324 | 324 | ||
325 | struct mib_fw_version { | 325 | struct mib_fw_version { |
326 | u8 major; | 326 | u8 major; |
327 | u8 minor; | 327 | u8 minor; |
328 | u8 patch; | 328 | u8 patch; |
329 | u8 build; | 329 | u8 build; |
330 | } __attribute__((packed)); | 330 | } __packed; |
331 | 331 | ||
332 | struct mib_mdomain { | 332 | struct mib_mdomain { |
333 | u8 tx_powerlevel[14]; | 333 | u8 tx_powerlevel[14]; |
334 | u8 channel_list[14]; /* 0 for invalid channels */ | 334 | u8 channel_list[14]; /* 0 for invalid channels */ |
335 | } __attribute__((packed)); | 335 | } __packed; |
336 | 336 | ||
337 | struct at76_fw_header { | 337 | struct at76_fw_header { |
338 | __le32 crc; /* CRC32 of the whole image */ | 338 | __le32 crc; /* CRC32 of the whole image */ |
@@ -346,7 +346,7 @@ struct at76_fw_header { | |||
346 | __le32 int_fw_len; /* internal firmware image length */ | 346 | __le32 int_fw_len; /* internal firmware image length */ |
347 | __le32 ext_fw_offset; /* external firmware image offset */ | 347 | __le32 ext_fw_offset; /* external firmware image offset */ |
348 | __le32 ext_fw_len; /* external firmware image length */ | 348 | __le32 ext_fw_len; /* external firmware image length */ |
349 | } __attribute__((packed)); | 349 | } __packed; |
350 | 350 | ||
351 | /* a description of a regulatory domain and the allowed channels */ | 351 | /* a description of a regulatory domain and the allowed channels */ |
352 | struct reg_domain { | 352 | struct reg_domain { |
diff --git a/drivers/net/wireless/b43/b43.h b/drivers/net/wireless/b43/b43.h index 3a003e6803a5..8674a99356af 100644 --- a/drivers/net/wireless/b43/b43.h +++ b/drivers/net/wireless/b43/b43.h | |||
@@ -530,7 +530,7 @@ struct b43_fw_header { | |||
530 | /* Size of the data. For ucode and PCM this is in bytes. | 530 | /* Size of the data. For ucode and PCM this is in bytes. |
531 | * For IV this is number-of-ivs. */ | 531 | * For IV this is number-of-ivs. */ |
532 | __be32 size; | 532 | __be32 size; |
533 | } __attribute__((__packed__)); | 533 | } __packed; |
534 | 534 | ||
535 | /* Initial Value file format */ | 535 | /* Initial Value file format */ |
536 | #define B43_IV_OFFSET_MASK 0x7FFF | 536 | #define B43_IV_OFFSET_MASK 0x7FFF |
@@ -540,8 +540,8 @@ struct b43_iv { | |||
540 | union { | 540 | union { |
541 | __be16 d16; | 541 | __be16 d16; |
542 | __be32 d32; | 542 | __be32 d32; |
543 | } data __attribute__((__packed__)); | 543 | } data __packed; |
544 | } __attribute__((__packed__)); | 544 | } __packed; |
545 | 545 | ||
546 | 546 | ||
547 | /* Data structures for DMA transmission, per 80211 core. */ | 547 | /* Data structures for DMA transmission, per 80211 core. */ |
diff --git a/drivers/net/wireless/b43/dma.h b/drivers/net/wireless/b43/dma.h index dc91944d6022..a01c2100f166 100644 --- a/drivers/net/wireless/b43/dma.h +++ b/drivers/net/wireless/b43/dma.h | |||
@@ -67,7 +67,7 @@ | |||
67 | struct b43_dmadesc32 { | 67 | struct b43_dmadesc32 { |
68 | __le32 control; | 68 | __le32 control; |
69 | __le32 address; | 69 | __le32 address; |
70 | } __attribute__ ((__packed__)); | 70 | } __packed; |
71 | #define B43_DMA32_DCTL_BYTECNT 0x00001FFF | 71 | #define B43_DMA32_DCTL_BYTECNT 0x00001FFF |
72 | #define B43_DMA32_DCTL_ADDREXT_MASK 0x00030000 | 72 | #define B43_DMA32_DCTL_ADDREXT_MASK 0x00030000 |
73 | #define B43_DMA32_DCTL_ADDREXT_SHIFT 16 | 73 | #define B43_DMA32_DCTL_ADDREXT_SHIFT 16 |
@@ -140,7 +140,7 @@ struct b43_dmadesc64 { | |||
140 | __le32 control1; | 140 | __le32 control1; |
141 | __le32 address_low; | 141 | __le32 address_low; |
142 | __le32 address_high; | 142 | __le32 address_high; |
143 | } __attribute__ ((__packed__)); | 143 | } __packed; |
144 | #define B43_DMA64_DCTL0_DTABLEEND 0x10000000 | 144 | #define B43_DMA64_DCTL0_DTABLEEND 0x10000000 |
145 | #define B43_DMA64_DCTL0_IRQ 0x20000000 | 145 | #define B43_DMA64_DCTL0_IRQ 0x20000000 |
146 | #define B43_DMA64_DCTL0_FRAMEEND 0x40000000 | 146 | #define B43_DMA64_DCTL0_FRAMEEND 0x40000000 |
@@ -153,8 +153,8 @@ struct b43_dmadesc_generic { | |||
153 | union { | 153 | union { |
154 | struct b43_dmadesc32 dma32; | 154 | struct b43_dmadesc32 dma32; |
155 | struct b43_dmadesc64 dma64; | 155 | struct b43_dmadesc64 dma64; |
156 | } __attribute__ ((__packed__)); | 156 | } __packed; |
157 | } __attribute__ ((__packed__)); | 157 | } __packed; |
158 | 158 | ||
159 | /* Misc DMA constants */ | 159 | /* Misc DMA constants */ |
160 | #define B43_DMA_RINGMEMSIZE PAGE_SIZE | 160 | #define B43_DMA_RINGMEMSIZE PAGE_SIZE |
diff --git a/drivers/net/wireless/b43/xmit.h b/drivers/net/wireless/b43/xmit.h index d23ff9fe0c9e..d4cf9b390af3 100644 --- a/drivers/net/wireless/b43/xmit.h +++ b/drivers/net/wireless/b43/xmit.h | |||
@@ -10,8 +10,8 @@ | |||
10 | union { \ | 10 | union { \ |
11 | __le32 data; \ | 11 | __le32 data; \ |
12 | __u8 raw[size]; \ | 12 | __u8 raw[size]; \ |
13 | } __attribute__((__packed__)); \ | 13 | } __packed; \ |
14 | } __attribute__((__packed__)) | 14 | } __packed |
15 | 15 | ||
16 | /* struct b43_plcp_hdr4 */ | 16 | /* struct b43_plcp_hdr4 */ |
17 | _b43_declare_plcp_hdr(4); | 17 | _b43_declare_plcp_hdr(4); |
@@ -57,7 +57,7 @@ struct b43_txhdr { | |||
57 | __u8 rts_frame[16]; /* The RTS frame (if used) */ | 57 | __u8 rts_frame[16]; /* The RTS frame (if used) */ |
58 | PAD_BYTES(2); | 58 | PAD_BYTES(2); |
59 | struct b43_plcp_hdr6 plcp; /* Main PLCP header */ | 59 | struct b43_plcp_hdr6 plcp; /* Main PLCP header */ |
60 | } new_format __attribute__ ((__packed__)); | 60 | } new_format __packed; |
61 | 61 | ||
62 | /* The old r351 format. */ | 62 | /* The old r351 format. */ |
63 | struct { | 63 | struct { |
@@ -68,10 +68,10 @@ struct b43_txhdr { | |||
68 | __u8 rts_frame[16]; /* The RTS frame (if used) */ | 68 | __u8 rts_frame[16]; /* The RTS frame (if used) */ |
69 | PAD_BYTES(2); | 69 | PAD_BYTES(2); |
70 | struct b43_plcp_hdr6 plcp; /* Main PLCP header */ | 70 | struct b43_plcp_hdr6 plcp; /* Main PLCP header */ |
71 | } old_format __attribute__ ((__packed__)); | 71 | } old_format __packed; |
72 | 72 | ||
73 | } __attribute__ ((__packed__)); | 73 | } __packed; |
74 | } __attribute__ ((__packed__)); | 74 | } __packed; |
75 | 75 | ||
76 | /* MAC TX control */ | 76 | /* MAC TX control */ |
77 | #define B43_TXH_MAC_USEFBR 0x10000000 /* Use fallback rate for this AMPDU */ | 77 | #define B43_TXH_MAC_USEFBR 0x10000000 /* Use fallback rate for this AMPDU */ |
@@ -218,20 +218,20 @@ struct b43_rxhdr_fw4 { | |||
218 | struct { | 218 | struct { |
219 | __u8 jssi; /* PHY RX Status 1: JSSI */ | 219 | __u8 jssi; /* PHY RX Status 1: JSSI */ |
220 | __u8 sig_qual; /* PHY RX Status 1: Signal Quality */ | 220 | __u8 sig_qual; /* PHY RX Status 1: Signal Quality */ |
221 | } __attribute__ ((__packed__)); | 221 | } __packed; |
222 | 222 | ||
223 | /* RSSI for N-PHYs */ | 223 | /* RSSI for N-PHYs */ |
224 | struct { | 224 | struct { |
225 | __s8 power0; /* PHY RX Status 1: Power 0 */ | 225 | __s8 power0; /* PHY RX Status 1: Power 0 */ |
226 | __s8 power1; /* PHY RX Status 1: Power 1 */ | 226 | __s8 power1; /* PHY RX Status 1: Power 1 */ |
227 | } __attribute__ ((__packed__)); | 227 | } __packed; |
228 | } __attribute__ ((__packed__)); | 228 | } __packed; |
229 | __le16 phy_status2; /* PHY RX Status 2 */ | 229 | __le16 phy_status2; /* PHY RX Status 2 */ |
230 | __le16 phy_status3; /* PHY RX Status 3 */ | 230 | __le16 phy_status3; /* PHY RX Status 3 */ |
231 | __le32 mac_status; /* MAC RX status */ | 231 | __le32 mac_status; /* MAC RX status */ |
232 | __le16 mac_time; | 232 | __le16 mac_time; |
233 | __le16 channel; | 233 | __le16 channel; |
234 | } __attribute__ ((__packed__)); | 234 | } __packed; |
235 | 235 | ||
236 | /* PHY RX Status 0 */ | 236 | /* PHY RX Status 0 */ |
237 | #define B43_RX_PHYST0_GAINCTL 0x4000 /* Gain Control */ | 237 | #define B43_RX_PHYST0_GAINCTL 0x4000 /* Gain Control */ |
diff --git a/drivers/net/wireless/b43legacy/b43legacy.h b/drivers/net/wireless/b43legacy/b43legacy.h index 89fe2f972c72..c81b2f53b0c5 100644 --- a/drivers/net/wireless/b43legacy/b43legacy.h +++ b/drivers/net/wireless/b43legacy/b43legacy.h | |||
@@ -372,7 +372,7 @@ struct b43legacy_fw_header { | |||
372 | /* Size of the data. For ucode and PCM this is in bytes. | 372 | /* Size of the data. For ucode and PCM this is in bytes. |
373 | * For IV this is number-of-ivs. */ | 373 | * For IV this is number-of-ivs. */ |
374 | __be32 size; | 374 | __be32 size; |
375 | } __attribute__((__packed__)); | 375 | } __packed; |
376 | 376 | ||
377 | /* Initial Value file format */ | 377 | /* Initial Value file format */ |
378 | #define B43legacy_IV_OFFSET_MASK 0x7FFF | 378 | #define B43legacy_IV_OFFSET_MASK 0x7FFF |
@@ -382,8 +382,8 @@ struct b43legacy_iv { | |||
382 | union { | 382 | union { |
383 | __be16 d16; | 383 | __be16 d16; |
384 | __be32 d32; | 384 | __be32 d32; |
385 | } data __attribute__((__packed__)); | 385 | } data __packed; |
386 | } __attribute__((__packed__)); | 386 | } __packed; |
387 | 387 | ||
388 | #define B43legacy_PHYMODE(phytype) (1 << (phytype)) | 388 | #define B43legacy_PHYMODE(phytype) (1 << (phytype)) |
389 | #define B43legacy_PHYMODE_B B43legacy_PHYMODE \ | 389 | #define B43legacy_PHYMODE_B B43legacy_PHYMODE \ |
diff --git a/drivers/net/wireless/b43legacy/dma.h b/drivers/net/wireless/b43legacy/dma.h index f9681041c2d8..f89c34226288 100644 --- a/drivers/net/wireless/b43legacy/dma.h +++ b/drivers/net/wireless/b43legacy/dma.h | |||
@@ -72,7 +72,7 @@ | |||
72 | struct b43legacy_dmadesc32 { | 72 | struct b43legacy_dmadesc32 { |
73 | __le32 control; | 73 | __le32 control; |
74 | __le32 address; | 74 | __le32 address; |
75 | } __attribute__((__packed__)); | 75 | } __packed; |
76 | #define B43legacy_DMA32_DCTL_BYTECNT 0x00001FFF | 76 | #define B43legacy_DMA32_DCTL_BYTECNT 0x00001FFF |
77 | #define B43legacy_DMA32_DCTL_ADDREXT_MASK 0x00030000 | 77 | #define B43legacy_DMA32_DCTL_ADDREXT_MASK 0x00030000 |
78 | #define B43legacy_DMA32_DCTL_ADDREXT_SHIFT 16 | 78 | #define B43legacy_DMA32_DCTL_ADDREXT_SHIFT 16 |
@@ -147,7 +147,7 @@ struct b43legacy_dmadesc64 { | |||
147 | __le32 control1; | 147 | __le32 control1; |
148 | __le32 address_low; | 148 | __le32 address_low; |
149 | __le32 address_high; | 149 | __le32 address_high; |
150 | } __attribute__((__packed__)); | 150 | } __packed; |
151 | #define B43legacy_DMA64_DCTL0_DTABLEEND 0x10000000 | 151 | #define B43legacy_DMA64_DCTL0_DTABLEEND 0x10000000 |
152 | #define B43legacy_DMA64_DCTL0_IRQ 0x20000000 | 152 | #define B43legacy_DMA64_DCTL0_IRQ 0x20000000 |
153 | #define B43legacy_DMA64_DCTL0_FRAMEEND 0x40000000 | 153 | #define B43legacy_DMA64_DCTL0_FRAMEEND 0x40000000 |
@@ -162,8 +162,8 @@ struct b43legacy_dmadesc_generic { | |||
162 | union { | 162 | union { |
163 | struct b43legacy_dmadesc32 dma32; | 163 | struct b43legacy_dmadesc32 dma32; |
164 | struct b43legacy_dmadesc64 dma64; | 164 | struct b43legacy_dmadesc64 dma64; |
165 | } __attribute__((__packed__)); | 165 | } __packed; |
166 | } __attribute__((__packed__)); | 166 | } __packed; |
167 | 167 | ||
168 | 168 | ||
169 | /* Misc DMA constants */ | 169 | /* Misc DMA constants */ |
diff --git a/drivers/net/wireless/b43legacy/xmit.h b/drivers/net/wireless/b43legacy/xmit.h index 91633087a20b..289db00a4a7b 100644 --- a/drivers/net/wireless/b43legacy/xmit.h +++ b/drivers/net/wireless/b43legacy/xmit.h | |||
@@ -9,8 +9,8 @@ | |||
9 | union { \ | 9 | union { \ |
10 | __le32 data; \ | 10 | __le32 data; \ |
11 | __u8 raw[size]; \ | 11 | __u8 raw[size]; \ |
12 | } __attribute__((__packed__)); \ | 12 | } __packed; \ |
13 | } __attribute__((__packed__)) | 13 | } __packed |
14 | 14 | ||
15 | /* struct b43legacy_plcp_hdr4 */ | 15 | /* struct b43legacy_plcp_hdr4 */ |
16 | _b43legacy_declare_plcp_hdr(4); | 16 | _b43legacy_declare_plcp_hdr(4); |
@@ -39,7 +39,7 @@ struct b43legacy_txhdr_fw3 { | |||
39 | struct b43legacy_plcp_hdr6 rts_plcp; /* RTS PLCP */ | 39 | struct b43legacy_plcp_hdr6 rts_plcp; /* RTS PLCP */ |
40 | __u8 rts_frame[18]; /* The RTS frame (if used) */ | 40 | __u8 rts_frame[18]; /* The RTS frame (if used) */ |
41 | struct b43legacy_plcp_hdr6 plcp; | 41 | struct b43legacy_plcp_hdr6 plcp; |
42 | } __attribute__((__packed__)); | 42 | } __packed; |
43 | 43 | ||
44 | /* MAC TX control */ | 44 | /* MAC TX control */ |
45 | #define B43legacy_TX4_MAC_KEYIDX 0x0FF00000 /* Security key index */ | 45 | #define B43legacy_TX4_MAC_KEYIDX 0x0FF00000 /* Security key index */ |
@@ -123,7 +123,7 @@ struct b43legacy_hwtxstatus { | |||
123 | __le16 seq; | 123 | __le16 seq; |
124 | u8 phy_stat; | 124 | u8 phy_stat; |
125 | PAD_BYTES(1); | 125 | PAD_BYTES(1); |
126 | } __attribute__((__packed__)); | 126 | } __packed; |
127 | 127 | ||
128 | 128 | ||
129 | /* Receive header for v3 firmware. */ | 129 | /* Receive header for v3 firmware. */ |
@@ -138,7 +138,7 @@ struct b43legacy_rxhdr_fw3 { | |||
138 | __le16 mac_status; /* MAC RX status */ | 138 | __le16 mac_status; /* MAC RX status */ |
139 | __le16 mac_time; | 139 | __le16 mac_time; |
140 | __le16 channel; | 140 | __le16 channel; |
141 | } __attribute__((__packed__)); | 141 | } __packed; |
142 | 142 | ||
143 | 143 | ||
144 | /* PHY RX Status 0 */ | 144 | /* PHY RX Status 0 */ |
diff --git a/drivers/net/wireless/hostap/hostap_80211.h b/drivers/net/wireless/hostap/hostap_80211.h index 7f9d8d976aa8..ed98ce7c8f65 100644 --- a/drivers/net/wireless/hostap/hostap_80211.h +++ b/drivers/net/wireless/hostap/hostap_80211.h | |||
@@ -19,35 +19,35 @@ struct hostap_ieee80211_mgmt { | |||
19 | __le16 status_code; | 19 | __le16 status_code; |
20 | /* possibly followed by Challenge text */ | 20 | /* possibly followed by Challenge text */ |
21 | u8 variable[0]; | 21 | u8 variable[0]; |
22 | } __attribute__ ((packed)) auth; | 22 | } __packed auth; |
23 | struct { | 23 | struct { |
24 | __le16 reason_code; | 24 | __le16 reason_code; |
25 | } __attribute__ ((packed)) deauth; | 25 | } __packed deauth; |
26 | struct { | 26 | struct { |
27 | __le16 capab_info; | 27 | __le16 capab_info; |
28 | __le16 listen_interval; | 28 | __le16 listen_interval; |
29 | /* followed by SSID and Supported rates */ | 29 | /* followed by SSID and Supported rates */ |
30 | u8 variable[0]; | 30 | u8 variable[0]; |
31 | } __attribute__ ((packed)) assoc_req; | 31 | } __packed assoc_req; |
32 | struct { | 32 | struct { |
33 | __le16 capab_info; | 33 | __le16 capab_info; |
34 | __le16 status_code; | 34 | __le16 status_code; |
35 | __le16 aid; | 35 | __le16 aid; |
36 | /* followed by Supported rates */ | 36 | /* followed by Supported rates */ |
37 | u8 variable[0]; | 37 | u8 variable[0]; |
38 | } __attribute__ ((packed)) assoc_resp, reassoc_resp; | 38 | } __packed assoc_resp, reassoc_resp; |
39 | struct { | 39 | struct { |
40 | __le16 capab_info; | 40 | __le16 capab_info; |
41 | __le16 listen_interval; | 41 | __le16 listen_interval; |
42 | u8 current_ap[6]; | 42 | u8 current_ap[6]; |
43 | /* followed by SSID and Supported rates */ | 43 | /* followed by SSID and Supported rates */ |
44 | u8 variable[0]; | 44 | u8 variable[0]; |
45 | } __attribute__ ((packed)) reassoc_req; | 45 | } __packed reassoc_req; |
46 | struct { | 46 | struct { |
47 | __le16 reason_code; | 47 | __le16 reason_code; |
48 | } __attribute__ ((packed)) disassoc; | 48 | } __packed disassoc; |
49 | struct { | 49 | struct { |
50 | } __attribute__ ((packed)) probe_req; | 50 | } __packed probe_req; |
51 | struct { | 51 | struct { |
52 | u8 timestamp[8]; | 52 | u8 timestamp[8]; |
53 | __le16 beacon_int; | 53 | __le16 beacon_int; |
@@ -55,9 +55,9 @@ struct hostap_ieee80211_mgmt { | |||
55 | /* followed by some of SSID, Supported rates, | 55 | /* followed by some of SSID, Supported rates, |
56 | * FH Params, DS Params, CF Params, IBSS Params, TIM */ | 56 | * FH Params, DS Params, CF Params, IBSS Params, TIM */ |
57 | u8 variable[0]; | 57 | u8 variable[0]; |
58 | } __attribute__ ((packed)) beacon, probe_resp; | 58 | } __packed beacon, probe_resp; |
59 | } u; | 59 | } u; |
60 | } __attribute__ ((packed)); | 60 | } __packed; |
61 | 61 | ||
62 | 62 | ||
63 | #define IEEE80211_MGMT_HDR_LEN 24 | 63 | #define IEEE80211_MGMT_HDR_LEN 24 |
diff --git a/drivers/net/wireless/hostap/hostap_common.h b/drivers/net/wireless/hostap/hostap_common.h index 90b64b092007..4230102ac9e4 100644 --- a/drivers/net/wireless/hostap/hostap_common.h +++ b/drivers/net/wireless/hostap/hostap_common.h | |||
@@ -179,7 +179,7 @@ struct hfa384x_comp_ident | |||
179 | __le16 variant; | 179 | __le16 variant; |
180 | __le16 major; | 180 | __le16 major; |
181 | __le16 minor; | 181 | __le16 minor; |
182 | } __attribute__ ((packed)); | 182 | } __packed; |
183 | 183 | ||
184 | #define HFA384X_COMP_ID_PRI 0x15 | 184 | #define HFA384X_COMP_ID_PRI 0x15 |
185 | #define HFA384X_COMP_ID_STA 0x1f | 185 | #define HFA384X_COMP_ID_STA 0x1f |
@@ -192,14 +192,14 @@ struct hfa384x_sup_range | |||
192 | __le16 variant; | 192 | __le16 variant; |
193 | __le16 bottom; | 193 | __le16 bottom; |
194 | __le16 top; | 194 | __le16 top; |
195 | } __attribute__ ((packed)); | 195 | } __packed; |
196 | 196 | ||
197 | 197 | ||
198 | struct hfa384x_build_id | 198 | struct hfa384x_build_id |
199 | { | 199 | { |
200 | __le16 pri_seq; | 200 | __le16 pri_seq; |
201 | __le16 sec_seq; | 201 | __le16 sec_seq; |
202 | } __attribute__ ((packed)); | 202 | } __packed; |
203 | 203 | ||
204 | /* FD01 - Download Buffer */ | 204 | /* FD01 - Download Buffer */ |
205 | struct hfa384x_rid_download_buffer | 205 | struct hfa384x_rid_download_buffer |
@@ -207,14 +207,14 @@ struct hfa384x_rid_download_buffer | |||
207 | __le16 page; | 207 | __le16 page; |
208 | __le16 offset; | 208 | __le16 offset; |
209 | __le16 length; | 209 | __le16 length; |
210 | } __attribute__ ((packed)); | 210 | } __packed; |
211 | 211 | ||
212 | /* BSS connection quality (RID FD43 range, RID FD51 dBm-normalized) */ | 212 | /* BSS connection quality (RID FD43 range, RID FD51 dBm-normalized) */ |
213 | struct hfa384x_comms_quality { | 213 | struct hfa384x_comms_quality { |
214 | __le16 comm_qual; /* 0 .. 92 */ | 214 | __le16 comm_qual; /* 0 .. 92 */ |
215 | __le16 signal_level; /* 27 .. 154 */ | 215 | __le16 signal_level; /* 27 .. 154 */ |
216 | __le16 noise_level; /* 27 .. 154 */ | 216 | __le16 noise_level; /* 27 .. 154 */ |
217 | } __attribute__ ((packed)); | 217 | } __packed; |
218 | 218 | ||
219 | 219 | ||
220 | /* netdevice private ioctls (used, e.g., with iwpriv from user space) */ | 220 | /* netdevice private ioctls (used, e.g., with iwpriv from user space) */ |
diff --git a/drivers/net/wireless/hostap/hostap_wlan.h b/drivers/net/wireless/hostap/hostap_wlan.h index 3d238917af07..c02f8667a7e0 100644 --- a/drivers/net/wireless/hostap/hostap_wlan.h +++ b/drivers/net/wireless/hostap/hostap_wlan.h | |||
@@ -31,14 +31,14 @@ struct linux_wlan_ng_val { | |||
31 | u32 did; | 31 | u32 did; |
32 | u16 status, len; | 32 | u16 status, len; |
33 | u32 data; | 33 | u32 data; |
34 | } __attribute__ ((packed)); | 34 | } __packed; |
35 | 35 | ||
36 | struct linux_wlan_ng_prism_hdr { | 36 | struct linux_wlan_ng_prism_hdr { |
37 | u32 msgcode, msglen; | 37 | u32 msgcode, msglen; |
38 | char devname[16]; | 38 | char devname[16]; |
39 | struct linux_wlan_ng_val hosttime, mactime, channel, rssi, sq, signal, | 39 | struct linux_wlan_ng_val hosttime, mactime, channel, rssi, sq, signal, |
40 | noise, rate, istx, frmlen; | 40 | noise, rate, istx, frmlen; |
41 | } __attribute__ ((packed)); | 41 | } __packed; |
42 | 42 | ||
43 | struct linux_wlan_ng_cap_hdr { | 43 | struct linux_wlan_ng_cap_hdr { |
44 | __be32 version; | 44 | __be32 version; |
@@ -55,7 +55,7 @@ struct linux_wlan_ng_cap_hdr { | |||
55 | __be32 ssi_noise; | 55 | __be32 ssi_noise; |
56 | __be32 preamble; | 56 | __be32 preamble; |
57 | __be32 encoding; | 57 | __be32 encoding; |
58 | } __attribute__ ((packed)); | 58 | } __packed; |
59 | 59 | ||
60 | struct hostap_radiotap_rx { | 60 | struct hostap_radiotap_rx { |
61 | struct ieee80211_radiotap_header hdr; | 61 | struct ieee80211_radiotap_header hdr; |
@@ -66,7 +66,7 @@ struct hostap_radiotap_rx { | |||
66 | __le16 chan_flags; | 66 | __le16 chan_flags; |
67 | s8 dbm_antsignal; | 67 | s8 dbm_antsignal; |
68 | s8 dbm_antnoise; | 68 | s8 dbm_antnoise; |
69 | } __attribute__ ((packed)); | 69 | } __packed; |
70 | 70 | ||
71 | #define LWNG_CAP_DID_BASE (4 | (1 << 6)) /* section 4, group 1 */ | 71 | #define LWNG_CAP_DID_BASE (4 | (1 << 6)) /* section 4, group 1 */ |
72 | #define LWNG_CAPHDR_VERSION 0x80211001 | 72 | #define LWNG_CAPHDR_VERSION 0x80211001 |
@@ -97,7 +97,7 @@ struct hfa384x_rx_frame { | |||
97 | __be16 len; | 97 | __be16 len; |
98 | 98 | ||
99 | /* followed by frame data; max 2304 bytes */ | 99 | /* followed by frame data; max 2304 bytes */ |
100 | } __attribute__ ((packed)); | 100 | } __packed; |
101 | 101 | ||
102 | 102 | ||
103 | struct hfa384x_tx_frame { | 103 | struct hfa384x_tx_frame { |
@@ -126,14 +126,14 @@ struct hfa384x_tx_frame { | |||
126 | __be16 len; | 126 | __be16 len; |
127 | 127 | ||
128 | /* followed by frame data; max 2304 bytes */ | 128 | /* followed by frame data; max 2304 bytes */ |
129 | } __attribute__ ((packed)); | 129 | } __packed; |
130 | 130 | ||
131 | 131 | ||
132 | struct hfa384x_rid_hdr | 132 | struct hfa384x_rid_hdr |
133 | { | 133 | { |
134 | __le16 len; | 134 | __le16 len; |
135 | __le16 rid; | 135 | __le16 rid; |
136 | } __attribute__ ((packed)); | 136 | } __packed; |
137 | 137 | ||
138 | 138 | ||
139 | /* Macro for converting signal levels (range 27 .. 154) to wireless ext | 139 | /* Macro for converting signal levels (range 27 .. 154) to wireless ext |
@@ -145,24 +145,24 @@ struct hfa384x_rid_hdr | |||
145 | struct hfa384x_scan_request { | 145 | struct hfa384x_scan_request { |
146 | __le16 channel_list; | 146 | __le16 channel_list; |
147 | __le16 txrate; /* HFA384X_RATES_* */ | 147 | __le16 txrate; /* HFA384X_RATES_* */ |
148 | } __attribute__ ((packed)); | 148 | } __packed; |
149 | 149 | ||
150 | struct hfa384x_hostscan_request { | 150 | struct hfa384x_hostscan_request { |
151 | __le16 channel_list; | 151 | __le16 channel_list; |
152 | __le16 txrate; | 152 | __le16 txrate; |
153 | __le16 target_ssid_len; | 153 | __le16 target_ssid_len; |
154 | u8 target_ssid[32]; | 154 | u8 target_ssid[32]; |
155 | } __attribute__ ((packed)); | 155 | } __packed; |
156 | 156 | ||
157 | struct hfa384x_join_request { | 157 | struct hfa384x_join_request { |
158 | u8 bssid[6]; | 158 | u8 bssid[6]; |
159 | __le16 channel; | 159 | __le16 channel; |
160 | } __attribute__ ((packed)); | 160 | } __packed; |
161 | 161 | ||
162 | struct hfa384x_info_frame { | 162 | struct hfa384x_info_frame { |
163 | __le16 len; | 163 | __le16 len; |
164 | __le16 type; | 164 | __le16 type; |
165 | } __attribute__ ((packed)); | 165 | } __packed; |
166 | 166 | ||
167 | struct hfa384x_comm_tallies { | 167 | struct hfa384x_comm_tallies { |
168 | __le16 tx_unicast_frames; | 168 | __le16 tx_unicast_frames; |
@@ -186,7 +186,7 @@ struct hfa384x_comm_tallies { | |||
186 | __le16 rx_discards_wep_undecryptable; | 186 | __le16 rx_discards_wep_undecryptable; |
187 | __le16 rx_message_in_msg_fragments; | 187 | __le16 rx_message_in_msg_fragments; |
188 | __le16 rx_message_in_bad_msg_fragments; | 188 | __le16 rx_message_in_bad_msg_fragments; |
189 | } __attribute__ ((packed)); | 189 | } __packed; |
190 | 190 | ||
191 | struct hfa384x_comm_tallies32 { | 191 | struct hfa384x_comm_tallies32 { |
192 | __le32 tx_unicast_frames; | 192 | __le32 tx_unicast_frames; |
@@ -210,7 +210,7 @@ struct hfa384x_comm_tallies32 { | |||
210 | __le32 rx_discards_wep_undecryptable; | 210 | __le32 rx_discards_wep_undecryptable; |
211 | __le32 rx_message_in_msg_fragments; | 211 | __le32 rx_message_in_msg_fragments; |
212 | __le32 rx_message_in_bad_msg_fragments; | 212 | __le32 rx_message_in_bad_msg_fragments; |
213 | } __attribute__ ((packed)); | 213 | } __packed; |
214 | 214 | ||
215 | struct hfa384x_scan_result_hdr { | 215 | struct hfa384x_scan_result_hdr { |
216 | __le16 reserved; | 216 | __le16 reserved; |
@@ -219,7 +219,7 @@ struct hfa384x_scan_result_hdr { | |||
219 | #define HFA384X_SCAN_HOST_INITIATED 1 | 219 | #define HFA384X_SCAN_HOST_INITIATED 1 |
220 | #define HFA384X_SCAN_FIRMWARE_INITIATED 2 | 220 | #define HFA384X_SCAN_FIRMWARE_INITIATED 2 |
221 | #define HFA384X_SCAN_INQUIRY_FROM_HOST 3 | 221 | #define HFA384X_SCAN_INQUIRY_FROM_HOST 3 |
222 | } __attribute__ ((packed)); | 222 | } __packed; |
223 | 223 | ||
224 | #define HFA384X_SCAN_MAX_RESULTS 32 | 224 | #define HFA384X_SCAN_MAX_RESULTS 32 |
225 | 225 | ||
@@ -234,7 +234,7 @@ struct hfa384x_scan_result { | |||
234 | u8 ssid[32]; | 234 | u8 ssid[32]; |
235 | u8 sup_rates[10]; | 235 | u8 sup_rates[10]; |
236 | __le16 rate; | 236 | __le16 rate; |
237 | } __attribute__ ((packed)); | 237 | } __packed; |
238 | 238 | ||
239 | struct hfa384x_hostscan_result { | 239 | struct hfa384x_hostscan_result { |
240 | __le16 chid; | 240 | __le16 chid; |
@@ -248,7 +248,7 @@ struct hfa384x_hostscan_result { | |||
248 | u8 sup_rates[10]; | 248 | u8 sup_rates[10]; |
249 | __le16 rate; | 249 | __le16 rate; |
250 | __le16 atim; | 250 | __le16 atim; |
251 | } __attribute__ ((packed)); | 251 | } __packed; |
252 | 252 | ||
253 | struct comm_tallies_sums { | 253 | struct comm_tallies_sums { |
254 | unsigned int tx_unicast_frames; | 254 | unsigned int tx_unicast_frames; |
diff --git a/drivers/net/wireless/ipw2x00/ipw2100.c b/drivers/net/wireless/ipw2x00/ipw2100.c index 0bd4dfa59a8a..4264fc091ada 100644 --- a/drivers/net/wireless/ipw2x00/ipw2100.c +++ b/drivers/net/wireless/ipw2x00/ipw2100.c | |||
@@ -5233,7 +5233,7 @@ struct security_info_params { | |||
5233 | u8 auth_mode; | 5233 | u8 auth_mode; |
5234 | u8 replay_counters_number; | 5234 | u8 replay_counters_number; |
5235 | u8 unicast_using_group; | 5235 | u8 unicast_using_group; |
5236 | } __attribute__ ((packed)); | 5236 | } __packed; |
5237 | 5237 | ||
5238 | static int ipw2100_set_security_information(struct ipw2100_priv *priv, | 5238 | static int ipw2100_set_security_information(struct ipw2100_priv *priv, |
5239 | int auth_mode, | 5239 | int auth_mode, |
@@ -8475,7 +8475,7 @@ struct ipw2100_fw_header { | |||
8475 | short mode; | 8475 | short mode; |
8476 | unsigned int fw_size; | 8476 | unsigned int fw_size; |
8477 | unsigned int uc_size; | 8477 | unsigned int uc_size; |
8478 | } __attribute__ ((packed)); | 8478 | } __packed; |
8479 | 8479 | ||
8480 | static int ipw2100_mod_firmware_load(struct ipw2100_fw *fw) | 8480 | static int ipw2100_mod_firmware_load(struct ipw2100_fw *fw) |
8481 | { | 8481 | { |
diff --git a/drivers/net/wireless/ipw2x00/ipw2100.h b/drivers/net/wireless/ipw2x00/ipw2100.h index 1eab0d698f4d..838002b4881e 100644 --- a/drivers/net/wireless/ipw2x00/ipw2100.h +++ b/drivers/net/wireless/ipw2x00/ipw2100.h | |||
@@ -164,7 +164,7 @@ struct bd_status { | |||
164 | } fields; | 164 | } fields; |
165 | u8 field; | 165 | u8 field; |
166 | } info; | 166 | } info; |
167 | } __attribute__ ((packed)); | 167 | } __packed; |
168 | 168 | ||
169 | struct ipw2100_bd { | 169 | struct ipw2100_bd { |
170 | u32 host_addr; | 170 | u32 host_addr; |
@@ -174,7 +174,7 @@ struct ipw2100_bd { | |||
174 | * 1st TBD) */ | 174 | * 1st TBD) */ |
175 | u8 num_fragments; | 175 | u8 num_fragments; |
176 | u8 reserved[6]; | 176 | u8 reserved[6]; |
177 | } __attribute__ ((packed)); | 177 | } __packed; |
178 | 178 | ||
179 | #define IPW_BD_QUEUE_LENGTH(n) (1<<n) | 179 | #define IPW_BD_QUEUE_LENGTH(n) (1<<n) |
180 | #define IPW_BD_ALIGNMENT(L) (L*sizeof(struct ipw2100_bd)) | 180 | #define IPW_BD_ALIGNMENT(L) (L*sizeof(struct ipw2100_bd)) |
@@ -232,7 +232,7 @@ struct ipw2100_status { | |||
232 | #define IPW_STATUS_FLAG_WEP_ENCRYPTED (1<<1) | 232 | #define IPW_STATUS_FLAG_WEP_ENCRYPTED (1<<1) |
233 | #define IPW_STATUS_FLAG_CRC_ERROR (1<<2) | 233 | #define IPW_STATUS_FLAG_CRC_ERROR (1<<2) |
234 | u8 rssi; | 234 | u8 rssi; |
235 | } __attribute__ ((packed)); | 235 | } __packed; |
236 | 236 | ||
237 | struct ipw2100_status_queue { | 237 | struct ipw2100_status_queue { |
238 | /* driver (virtual) pointer to queue */ | 238 | /* driver (virtual) pointer to queue */ |
@@ -293,7 +293,7 @@ struct ipw2100_cmd_header { | |||
293 | u32 reserved1[3]; | 293 | u32 reserved1[3]; |
294 | u32 *ordinal1_ptr; | 294 | u32 *ordinal1_ptr; |
295 | u32 *ordinal2_ptr; | 295 | u32 *ordinal2_ptr; |
296 | } __attribute__ ((packed)); | 296 | } __packed; |
297 | 297 | ||
298 | struct ipw2100_data_header { | 298 | struct ipw2100_data_header { |
299 | u32 host_command_reg; | 299 | u32 host_command_reg; |
@@ -307,7 +307,7 @@ struct ipw2100_data_header { | |||
307 | u8 src_addr[ETH_ALEN]; | 307 | u8 src_addr[ETH_ALEN]; |
308 | u8 dst_addr[ETH_ALEN]; | 308 | u8 dst_addr[ETH_ALEN]; |
309 | u16 fragment_size; | 309 | u16 fragment_size; |
310 | } __attribute__ ((packed)); | 310 | } __packed; |
311 | 311 | ||
312 | /* Host command data structure */ | 312 | /* Host command data structure */ |
313 | struct host_command { | 313 | struct host_command { |
@@ -316,7 +316,7 @@ struct host_command { | |||
316 | u32 host_command_sequence; // UNIQUE COMMAND NUMBER (ID) | 316 | u32 host_command_sequence; // UNIQUE COMMAND NUMBER (ID) |
317 | u32 host_command_length; // LENGTH | 317 | u32 host_command_length; // LENGTH |
318 | u32 host_command_parameters[HOST_COMMAND_PARAMS_REG_LEN]; // COMMAND PARAMETERS | 318 | u32 host_command_parameters[HOST_COMMAND_PARAMS_REG_LEN]; // COMMAND PARAMETERS |
319 | } __attribute__ ((packed)); | 319 | } __packed; |
320 | 320 | ||
321 | typedef enum { | 321 | typedef enum { |
322 | POWER_ON_RESET, | 322 | POWER_ON_RESET, |
@@ -382,7 +382,7 @@ struct ipw2100_notification { | |||
382 | u32 hnhdr_size; /* size in bytes of data | 382 | u32 hnhdr_size; /* size in bytes of data |
383 | or number of entries, if table. | 383 | or number of entries, if table. |
384 | Does NOT include header */ | 384 | Does NOT include header */ |
385 | } __attribute__ ((packed)); | 385 | } __packed; |
386 | 386 | ||
387 | #define MAX_KEY_SIZE 16 | 387 | #define MAX_KEY_SIZE 16 |
388 | #define MAX_KEYS 8 | 388 | #define MAX_KEYS 8 |
@@ -814,7 +814,7 @@ struct ipw2100_rx { | |||
814 | struct ipw2100_notification notification; | 814 | struct ipw2100_notification notification; |
815 | struct ipw2100_cmd_header command; | 815 | struct ipw2100_cmd_header command; |
816 | } rx_data; | 816 | } rx_data; |
817 | } __attribute__ ((packed)); | 817 | } __packed; |
818 | 818 | ||
819 | /* Bit 0-7 are for 802.11b tx rates - . Bit 5-7 are reserved */ | 819 | /* Bit 0-7 are for 802.11b tx rates - . Bit 5-7 are reserved */ |
820 | #define TX_RATE_1_MBIT 0x0001 | 820 | #define TX_RATE_1_MBIT 0x0001 |
diff --git a/drivers/net/wireless/ipw2x00/ipw2200.h b/drivers/net/wireless/ipw2x00/ipw2200.h index bf0eeb2e873a..d7d049c7a4fa 100644 --- a/drivers/net/wireless/ipw2x00/ipw2200.h +++ b/drivers/net/wireless/ipw2x00/ipw2200.h | |||
@@ -388,7 +388,7 @@ struct clx2_queue { | |||
388 | dma_addr_t dma_addr; /**< physical addr for BD's */ | 388 | dma_addr_t dma_addr; /**< physical addr for BD's */ |
389 | int low_mark; /**< low watermark, resume queue if free space more than this */ | 389 | int low_mark; /**< low watermark, resume queue if free space more than this */ |
390 | int high_mark; /**< high watermark, stop queue if free space less than this */ | 390 | int high_mark; /**< high watermark, stop queue if free space less than this */ |
391 | } __attribute__ ((packed)); /* XXX */ | 391 | } __packed; /* XXX */ |
392 | 392 | ||
393 | struct machdr32 { | 393 | struct machdr32 { |
394 | __le16 frame_ctl; | 394 | __le16 frame_ctl; |
@@ -399,7 +399,7 @@ struct machdr32 { | |||
399 | __le16 seq_ctrl; // more endians! | 399 | __le16 seq_ctrl; // more endians! |
400 | u8 addr4[MACADRR_BYTE_LEN]; | 400 | u8 addr4[MACADRR_BYTE_LEN]; |
401 | __le16 qos_ctrl; | 401 | __le16 qos_ctrl; |
402 | } __attribute__ ((packed)); | 402 | } __packed; |
403 | 403 | ||
404 | struct machdr30 { | 404 | struct machdr30 { |
405 | __le16 frame_ctl; | 405 | __le16 frame_ctl; |
@@ -409,7 +409,7 @@ struct machdr30 { | |||
409 | u8 addr3[MACADRR_BYTE_LEN]; | 409 | u8 addr3[MACADRR_BYTE_LEN]; |
410 | __le16 seq_ctrl; // more endians! | 410 | __le16 seq_ctrl; // more endians! |
411 | u8 addr4[MACADRR_BYTE_LEN]; | 411 | u8 addr4[MACADRR_BYTE_LEN]; |
412 | } __attribute__ ((packed)); | 412 | } __packed; |
413 | 413 | ||
414 | struct machdr26 { | 414 | struct machdr26 { |
415 | __le16 frame_ctl; | 415 | __le16 frame_ctl; |
@@ -419,7 +419,7 @@ struct machdr26 { | |||
419 | u8 addr3[MACADRR_BYTE_LEN]; | 419 | u8 addr3[MACADRR_BYTE_LEN]; |
420 | __le16 seq_ctrl; // more endians! | 420 | __le16 seq_ctrl; // more endians! |
421 | __le16 qos_ctrl; | 421 | __le16 qos_ctrl; |
422 | } __attribute__ ((packed)); | 422 | } __packed; |
423 | 423 | ||
424 | struct machdr24 { | 424 | struct machdr24 { |
425 | __le16 frame_ctl; | 425 | __le16 frame_ctl; |
@@ -428,20 +428,20 @@ struct machdr24 { | |||
428 | u8 addr2[MACADRR_BYTE_LEN]; | 428 | u8 addr2[MACADRR_BYTE_LEN]; |
429 | u8 addr3[MACADRR_BYTE_LEN]; | 429 | u8 addr3[MACADRR_BYTE_LEN]; |
430 | __le16 seq_ctrl; // more endians! | 430 | __le16 seq_ctrl; // more endians! |
431 | } __attribute__ ((packed)); | 431 | } __packed; |
432 | 432 | ||
433 | // TX TFD with 32 byte MAC Header | 433 | // TX TFD with 32 byte MAC Header |
434 | struct tx_tfd_32 { | 434 | struct tx_tfd_32 { |
435 | struct machdr32 mchdr; // 32 | 435 | struct machdr32 mchdr; // 32 |
436 | __le32 uivplaceholder[2]; // 8 | 436 | __le32 uivplaceholder[2]; // 8 |
437 | } __attribute__ ((packed)); | 437 | } __packed; |
438 | 438 | ||
439 | // TX TFD with 30 byte MAC Header | 439 | // TX TFD with 30 byte MAC Header |
440 | struct tx_tfd_30 { | 440 | struct tx_tfd_30 { |
441 | struct machdr30 mchdr; // 30 | 441 | struct machdr30 mchdr; // 30 |
442 | u8 reserved[2]; // 2 | 442 | u8 reserved[2]; // 2 |
443 | __le32 uivplaceholder[2]; // 8 | 443 | __le32 uivplaceholder[2]; // 8 |
444 | } __attribute__ ((packed)); | 444 | } __packed; |
445 | 445 | ||
446 | // tx tfd with 26 byte mac header | 446 | // tx tfd with 26 byte mac header |
447 | struct tx_tfd_26 { | 447 | struct tx_tfd_26 { |
@@ -449,14 +449,14 @@ struct tx_tfd_26 { | |||
449 | u8 reserved1[2]; // 2 | 449 | u8 reserved1[2]; // 2 |
450 | __le32 uivplaceholder[2]; // 8 | 450 | __le32 uivplaceholder[2]; // 8 |
451 | u8 reserved2[4]; // 4 | 451 | u8 reserved2[4]; // 4 |
452 | } __attribute__ ((packed)); | 452 | } __packed; |
453 | 453 | ||
454 | // tx tfd with 24 byte mac header | 454 | // tx tfd with 24 byte mac header |
455 | struct tx_tfd_24 { | 455 | struct tx_tfd_24 { |
456 | struct machdr24 mchdr; // 24 | 456 | struct machdr24 mchdr; // 24 |
457 | __le32 uivplaceholder[2]; // 8 | 457 | __le32 uivplaceholder[2]; // 8 |
458 | u8 reserved[8]; // 8 | 458 | u8 reserved[8]; // 8 |
459 | } __attribute__ ((packed)); | 459 | } __packed; |
460 | 460 | ||
461 | #define DCT_WEP_KEY_FIELD_LENGTH 16 | 461 | #define DCT_WEP_KEY_FIELD_LENGTH 16 |
462 | 462 | ||
@@ -465,7 +465,7 @@ struct tfd_command { | |||
465 | u8 length; | 465 | u8 length; |
466 | __le16 reserved; | 466 | __le16 reserved; |
467 | u8 payload[0]; | 467 | u8 payload[0]; |
468 | } __attribute__ ((packed)); | 468 | } __packed; |
469 | 469 | ||
470 | struct tfd_data { | 470 | struct tfd_data { |
471 | /* Header */ | 471 | /* Header */ |
@@ -504,14 +504,14 @@ struct tfd_data { | |||
504 | __le32 num_chunks; | 504 | __le32 num_chunks; |
505 | __le32 chunk_ptr[NUM_TFD_CHUNKS]; | 505 | __le32 chunk_ptr[NUM_TFD_CHUNKS]; |
506 | __le16 chunk_len[NUM_TFD_CHUNKS]; | 506 | __le16 chunk_len[NUM_TFD_CHUNKS]; |
507 | } __attribute__ ((packed)); | 507 | } __packed; |
508 | 508 | ||
509 | struct txrx_control_flags { | 509 | struct txrx_control_flags { |
510 | u8 message_type; | 510 | u8 message_type; |
511 | u8 rx_seq_num; | 511 | u8 rx_seq_num; |
512 | u8 control_bits; | 512 | u8 control_bits; |
513 | u8 reserved; | 513 | u8 reserved; |
514 | } __attribute__ ((packed)); | 514 | } __packed; |
515 | 515 | ||
516 | #define TFD_SIZE 128 | 516 | #define TFD_SIZE 128 |
517 | #define TFD_CMD_IMMEDIATE_PAYLOAD_LENGTH (TFD_SIZE - sizeof(struct txrx_control_flags)) | 517 | #define TFD_CMD_IMMEDIATE_PAYLOAD_LENGTH (TFD_SIZE - sizeof(struct txrx_control_flags)) |
@@ -523,7 +523,7 @@ struct tfd_frame { | |||
523 | struct tfd_command cmd; | 523 | struct tfd_command cmd; |
524 | u8 raw[TFD_CMD_IMMEDIATE_PAYLOAD_LENGTH]; | 524 | u8 raw[TFD_CMD_IMMEDIATE_PAYLOAD_LENGTH]; |
525 | } u; | 525 | } u; |
526 | } __attribute__ ((packed)); | 526 | } __packed; |
527 | 527 | ||
528 | typedef void destructor_func(const void *); | 528 | typedef void destructor_func(const void *); |
529 | 529 | ||
@@ -559,7 +559,7 @@ struct rate_histogram { | |||
559 | __le32 b[SUP_RATE_11B_MAX_NUM_CHANNELS]; | 559 | __le32 b[SUP_RATE_11B_MAX_NUM_CHANNELS]; |
560 | __le32 g[SUP_RATE_11G_MAX_NUM_CHANNELS]; | 560 | __le32 g[SUP_RATE_11G_MAX_NUM_CHANNELS]; |
561 | } failed; | 561 | } failed; |
562 | } __attribute__ ((packed)); | 562 | } __packed; |
563 | 563 | ||
564 | /* statistics command response */ | 564 | /* statistics command response */ |
565 | struct ipw_cmd_stats { | 565 | struct ipw_cmd_stats { |
@@ -586,13 +586,13 @@ struct ipw_cmd_stats { | |||
586 | __le16 rx_autodetec_no_ofdm; | 586 | __le16 rx_autodetec_no_ofdm; |
587 | __le16 rx_autodetec_no_barker; | 587 | __le16 rx_autodetec_no_barker; |
588 | __le16 reserved; | 588 | __le16 reserved; |
589 | } __attribute__ ((packed)); | 589 | } __packed; |
590 | 590 | ||
591 | struct notif_channel_result { | 591 | struct notif_channel_result { |
592 | u8 channel_num; | 592 | u8 channel_num; |
593 | struct ipw_cmd_stats stats; | 593 | struct ipw_cmd_stats stats; |
594 | u8 uReserved; | 594 | u8 uReserved; |
595 | } __attribute__ ((packed)); | 595 | } __packed; |
596 | 596 | ||
597 | #define SCAN_COMPLETED_STATUS_COMPLETE 1 | 597 | #define SCAN_COMPLETED_STATUS_COMPLETE 1 |
598 | #define SCAN_COMPLETED_STATUS_ABORTED 2 | 598 | #define SCAN_COMPLETED_STATUS_ABORTED 2 |
@@ -602,24 +602,24 @@ struct notif_scan_complete { | |||
602 | u8 num_channels; | 602 | u8 num_channels; |
603 | u8 status; | 603 | u8 status; |
604 | u8 reserved; | 604 | u8 reserved; |
605 | } __attribute__ ((packed)); | 605 | } __packed; |
606 | 606 | ||
607 | struct notif_frag_length { | 607 | struct notif_frag_length { |
608 | __le16 frag_length; | 608 | __le16 frag_length; |
609 | __le16 reserved; | 609 | __le16 reserved; |
610 | } __attribute__ ((packed)); | 610 | } __packed; |
611 | 611 | ||
612 | struct notif_beacon_state { | 612 | struct notif_beacon_state { |
613 | __le32 state; | 613 | __le32 state; |
614 | __le32 number; | 614 | __le32 number; |
615 | } __attribute__ ((packed)); | 615 | } __packed; |
616 | 616 | ||
617 | struct notif_tgi_tx_key { | 617 | struct notif_tgi_tx_key { |
618 | u8 key_state; | 618 | u8 key_state; |
619 | u8 security_type; | 619 | u8 security_type; |
620 | u8 station_index; | 620 | u8 station_index; |
621 | u8 reserved; | 621 | u8 reserved; |
622 | } __attribute__ ((packed)); | 622 | } __packed; |
623 | 623 | ||
624 | #define SILENCE_OVER_THRESH (1) | 624 | #define SILENCE_OVER_THRESH (1) |
625 | #define SILENCE_UNDER_THRESH (2) | 625 | #define SILENCE_UNDER_THRESH (2) |
@@ -631,25 +631,25 @@ struct notif_link_deterioration { | |||
631 | struct rate_histogram histogram; | 631 | struct rate_histogram histogram; |
632 | u8 silence_notification_type; /* SILENCE_OVER/UNDER_THRESH */ | 632 | u8 silence_notification_type; /* SILENCE_OVER/UNDER_THRESH */ |
633 | __le16 silence_count; | 633 | __le16 silence_count; |
634 | } __attribute__ ((packed)); | 634 | } __packed; |
635 | 635 | ||
636 | struct notif_association { | 636 | struct notif_association { |
637 | u8 state; | 637 | u8 state; |
638 | } __attribute__ ((packed)); | 638 | } __packed; |
639 | 639 | ||
640 | struct notif_authenticate { | 640 | struct notif_authenticate { |
641 | u8 state; | 641 | u8 state; |
642 | struct machdr24 addr; | 642 | struct machdr24 addr; |
643 | __le16 status; | 643 | __le16 status; |
644 | } __attribute__ ((packed)); | 644 | } __packed; |
645 | 645 | ||
646 | struct notif_calibration { | 646 | struct notif_calibration { |
647 | u8 data[104]; | 647 | u8 data[104]; |
648 | } __attribute__ ((packed)); | 648 | } __packed; |
649 | 649 | ||
650 | struct notif_noise { | 650 | struct notif_noise { |
651 | __le32 value; | 651 | __le32 value; |
652 | } __attribute__ ((packed)); | 652 | } __packed; |
653 | 653 | ||
654 | struct ipw_rx_notification { | 654 | struct ipw_rx_notification { |
655 | u8 reserved[8]; | 655 | u8 reserved[8]; |
@@ -669,7 +669,7 @@ struct ipw_rx_notification { | |||
669 | struct notif_noise noise; | 669 | struct notif_noise noise; |
670 | u8 raw[0]; | 670 | u8 raw[0]; |
671 | } u; | 671 | } u; |
672 | } __attribute__ ((packed)); | 672 | } __packed; |
673 | 673 | ||
674 | struct ipw_rx_frame { | 674 | struct ipw_rx_frame { |
675 | __le32 reserved1; | 675 | __le32 reserved1; |
@@ -692,14 +692,14 @@ struct ipw_rx_frame { | |||
692 | u8 rtscts_seen; // 0x1 RTS seen ; 0x2 CTS seen | 692 | u8 rtscts_seen; // 0x1 RTS seen ; 0x2 CTS seen |
693 | __le16 length; | 693 | __le16 length; |
694 | u8 data[0]; | 694 | u8 data[0]; |
695 | } __attribute__ ((packed)); | 695 | } __packed; |
696 | 696 | ||
697 | struct ipw_rx_header { | 697 | struct ipw_rx_header { |
698 | u8 message_type; | 698 | u8 message_type; |
699 | u8 rx_seq_num; | 699 | u8 rx_seq_num; |
700 | u8 control_bits; | 700 | u8 control_bits; |
701 | u8 reserved; | 701 | u8 reserved; |
702 | } __attribute__ ((packed)); | 702 | } __packed; |
703 | 703 | ||
704 | struct ipw_rx_packet { | 704 | struct ipw_rx_packet { |
705 | struct ipw_rx_header header; | 705 | struct ipw_rx_header header; |
@@ -707,7 +707,7 @@ struct ipw_rx_packet { | |||
707 | struct ipw_rx_frame frame; | 707 | struct ipw_rx_frame frame; |
708 | struct ipw_rx_notification notification; | 708 | struct ipw_rx_notification notification; |
709 | } u; | 709 | } u; |
710 | } __attribute__ ((packed)); | 710 | } __packed; |
711 | 711 | ||
712 | #define IPW_RX_NOTIFICATION_SIZE sizeof(struct ipw_rx_header) + 12 | 712 | #define IPW_RX_NOTIFICATION_SIZE sizeof(struct ipw_rx_header) + 12 |
713 | #define IPW_RX_FRAME_SIZE (unsigned int)(sizeof(struct ipw_rx_header) + \ | 713 | #define IPW_RX_FRAME_SIZE (unsigned int)(sizeof(struct ipw_rx_header) + \ |
@@ -717,7 +717,7 @@ struct ipw_rx_mem_buffer { | |||
717 | dma_addr_t dma_addr; | 717 | dma_addr_t dma_addr; |
718 | struct sk_buff *skb; | 718 | struct sk_buff *skb; |
719 | struct list_head list; | 719 | struct list_head list; |
720 | }; /* Not transferred over network, so not __attribute__ ((packed)) */ | 720 | }; /* Not transferred over network, so not __packed */ |
721 | 721 | ||
722 | struct ipw_rx_queue { | 722 | struct ipw_rx_queue { |
723 | struct ipw_rx_mem_buffer pool[RX_QUEUE_SIZE + RX_FREE_BUFFERS]; | 723 | struct ipw_rx_mem_buffer pool[RX_QUEUE_SIZE + RX_FREE_BUFFERS]; |
@@ -730,7 +730,7 @@ struct ipw_rx_queue { | |||
730 | struct list_head rx_free; /* Own an SKBs */ | 730 | struct list_head rx_free; /* Own an SKBs */ |
731 | struct list_head rx_used; /* No SKB allocated */ | 731 | struct list_head rx_used; /* No SKB allocated */ |
732 | spinlock_t lock; | 732 | spinlock_t lock; |
733 | }; /* Not transferred over network, so not __attribute__ ((packed)) */ | 733 | }; /* Not transferred over network, so not __packed */ |
734 | 734 | ||
735 | struct alive_command_responce { | 735 | struct alive_command_responce { |
736 | u8 alive_command; | 736 | u8 alive_command; |
@@ -745,21 +745,21 @@ struct alive_command_responce { | |||
745 | __le16 reserved4; | 745 | __le16 reserved4; |
746 | u8 time_stamp[5]; /* month, day, year, hours, minutes */ | 746 | u8 time_stamp[5]; /* month, day, year, hours, minutes */ |
747 | u8 ucode_valid; | 747 | u8 ucode_valid; |
748 | } __attribute__ ((packed)); | 748 | } __packed; |
749 | 749 | ||
750 | #define IPW_MAX_RATES 12 | 750 | #define IPW_MAX_RATES 12 |
751 | 751 | ||
752 | struct ipw_rates { | 752 | struct ipw_rates { |
753 | u8 num_rates; | 753 | u8 num_rates; |
754 | u8 rates[IPW_MAX_RATES]; | 754 | u8 rates[IPW_MAX_RATES]; |
755 | } __attribute__ ((packed)); | 755 | } __packed; |
756 | 756 | ||
757 | struct command_block { | 757 | struct command_block { |
758 | unsigned int control; | 758 | unsigned int control; |
759 | u32 source_addr; | 759 | u32 source_addr; |
760 | u32 dest_addr; | 760 | u32 dest_addr; |
761 | unsigned int status; | 761 | unsigned int status; |
762 | } __attribute__ ((packed)); | 762 | } __packed; |
763 | 763 | ||
764 | #define CB_NUMBER_OF_ELEMENTS_SMALL 64 | 764 | #define CB_NUMBER_OF_ELEMENTS_SMALL 64 |
765 | struct fw_image_desc { | 765 | struct fw_image_desc { |
@@ -792,7 +792,7 @@ struct ipw_sys_config { | |||
792 | u8 accept_all_mgmt_frames; | 792 | u8 accept_all_mgmt_frames; |
793 | u8 pass_noise_stats_to_host; | 793 | u8 pass_noise_stats_to_host; |
794 | u8 reserved3; | 794 | u8 reserved3; |
795 | } __attribute__ ((packed)); | 795 | } __packed; |
796 | 796 | ||
797 | struct ipw_multicast_addr { | 797 | struct ipw_multicast_addr { |
798 | u8 num_of_multicast_addresses; | 798 | u8 num_of_multicast_addresses; |
@@ -801,7 +801,7 @@ struct ipw_multicast_addr { | |||
801 | u8 mac2[6]; | 801 | u8 mac2[6]; |
802 | u8 mac3[6]; | 802 | u8 mac3[6]; |
803 | u8 mac4[6]; | 803 | u8 mac4[6]; |
804 | } __attribute__ ((packed)); | 804 | } __packed; |
805 | 805 | ||
806 | #define DCW_WEP_KEY_INDEX_MASK 0x03 /* bits [0:1] */ | 806 | #define DCW_WEP_KEY_INDEX_MASK 0x03 /* bits [0:1] */ |
807 | #define DCW_WEP_KEY_SEC_TYPE_MASK 0x30 /* bits [4:5] */ | 807 | #define DCW_WEP_KEY_SEC_TYPE_MASK 0x30 /* bits [4:5] */ |
@@ -822,7 +822,7 @@ struct ipw_wep_key { | |||
822 | u8 key_index; | 822 | u8 key_index; |
823 | u8 key_size; | 823 | u8 key_size; |
824 | u8 key[16]; | 824 | u8 key[16]; |
825 | } __attribute__ ((packed)); | 825 | } __packed; |
826 | 826 | ||
827 | struct ipw_tgi_tx_key { | 827 | struct ipw_tgi_tx_key { |
828 | u8 key_id; | 828 | u8 key_id; |
@@ -831,7 +831,7 @@ struct ipw_tgi_tx_key { | |||
831 | u8 flags; | 831 | u8 flags; |
832 | u8 key[16]; | 832 | u8 key[16]; |
833 | __le32 tx_counter[2]; | 833 | __le32 tx_counter[2]; |
834 | } __attribute__ ((packed)); | 834 | } __packed; |
835 | 835 | ||
836 | #define IPW_SCAN_CHANNELS 54 | 836 | #define IPW_SCAN_CHANNELS 54 |
837 | 837 | ||
@@ -840,7 +840,7 @@ struct ipw_scan_request { | |||
840 | __le16 dwell_time; | 840 | __le16 dwell_time; |
841 | u8 channels_list[IPW_SCAN_CHANNELS]; | 841 | u8 channels_list[IPW_SCAN_CHANNELS]; |
842 | u8 channels_reserved[3]; | 842 | u8 channels_reserved[3]; |
843 | } __attribute__ ((packed)); | 843 | } __packed; |
844 | 844 | ||
845 | enum { | 845 | enum { |
846 | IPW_SCAN_PASSIVE_TILL_FIRST_BEACON_SCAN = 0, | 846 | IPW_SCAN_PASSIVE_TILL_FIRST_BEACON_SCAN = 0, |
@@ -857,7 +857,7 @@ struct ipw_scan_request_ext { | |||
857 | u8 scan_type[IPW_SCAN_CHANNELS / 2]; | 857 | u8 scan_type[IPW_SCAN_CHANNELS / 2]; |
858 | u8 reserved; | 858 | u8 reserved; |
859 | __le16 dwell_time[IPW_SCAN_TYPES]; | 859 | __le16 dwell_time[IPW_SCAN_TYPES]; |
860 | } __attribute__ ((packed)); | 860 | } __packed; |
861 | 861 | ||
862 | static inline u8 ipw_get_scan_type(struct ipw_scan_request_ext *scan, u8 index) | 862 | static inline u8 ipw_get_scan_type(struct ipw_scan_request_ext *scan, u8 index) |
863 | { | 863 | { |
@@ -902,7 +902,7 @@ struct ipw_associate { | |||
902 | u8 smr; | 902 | u8 smr; |
903 | u8 reserved1; | 903 | u8 reserved1; |
904 | __le16 reserved2; | 904 | __le16 reserved2; |
905 | } __attribute__ ((packed)); | 905 | } __packed; |
906 | 906 | ||
907 | struct ipw_supported_rates { | 907 | struct ipw_supported_rates { |
908 | u8 ieee_mode; | 908 | u8 ieee_mode; |
@@ -910,36 +910,36 @@ struct ipw_supported_rates { | |||
910 | u8 purpose; | 910 | u8 purpose; |
911 | u8 reserved; | 911 | u8 reserved; |
912 | u8 supported_rates[IPW_MAX_RATES]; | 912 | u8 supported_rates[IPW_MAX_RATES]; |
913 | } __attribute__ ((packed)); | 913 | } __packed; |
914 | 914 | ||
915 | struct ipw_rts_threshold { | 915 | struct ipw_rts_threshold { |
916 | __le16 rts_threshold; | 916 | __le16 rts_threshold; |
917 | __le16 reserved; | 917 | __le16 reserved; |
918 | } __attribute__ ((packed)); | 918 | } __packed; |
919 | 919 | ||
920 | struct ipw_frag_threshold { | 920 | struct ipw_frag_threshold { |
921 | __le16 frag_threshold; | 921 | __le16 frag_threshold; |
922 | __le16 reserved; | 922 | __le16 reserved; |
923 | } __attribute__ ((packed)); | 923 | } __packed; |
924 | 924 | ||
925 | struct ipw_retry_limit { | 925 | struct ipw_retry_limit { |
926 | u8 short_retry_limit; | 926 | u8 short_retry_limit; |
927 | u8 long_retry_limit; | 927 | u8 long_retry_limit; |
928 | __le16 reserved; | 928 | __le16 reserved; |
929 | } __attribute__ ((packed)); | 929 | } __packed; |
930 | 930 | ||
931 | struct ipw_dino_config { | 931 | struct ipw_dino_config { |
932 | __le32 dino_config_addr; | 932 | __le32 dino_config_addr; |
933 | __le16 dino_config_size; | 933 | __le16 dino_config_size; |
934 | u8 dino_response; | 934 | u8 dino_response; |
935 | u8 reserved; | 935 | u8 reserved; |
936 | } __attribute__ ((packed)); | 936 | } __packed; |
937 | 937 | ||
938 | struct ipw_aironet_info { | 938 | struct ipw_aironet_info { |
939 | u8 id; | 939 | u8 id; |
940 | u8 length; | 940 | u8 length; |
941 | __le16 reserved; | 941 | __le16 reserved; |
942 | } __attribute__ ((packed)); | 942 | } __packed; |
943 | 943 | ||
944 | struct ipw_rx_key { | 944 | struct ipw_rx_key { |
945 | u8 station_index; | 945 | u8 station_index; |
@@ -950,25 +950,25 @@ struct ipw_rx_key { | |||
950 | u8 station_address[6]; | 950 | u8 station_address[6]; |
951 | u8 key_index; | 951 | u8 key_index; |
952 | u8 reserved; | 952 | u8 reserved; |
953 | } __attribute__ ((packed)); | 953 | } __packed; |
954 | 954 | ||
955 | struct ipw_country_channel_info { | 955 | struct ipw_country_channel_info { |
956 | u8 first_channel; | 956 | u8 first_channel; |
957 | u8 no_channels; | 957 | u8 no_channels; |
958 | s8 max_tx_power; | 958 | s8 max_tx_power; |
959 | } __attribute__ ((packed)); | 959 | } __packed; |
960 | 960 | ||
961 | struct ipw_country_info { | 961 | struct ipw_country_info { |
962 | u8 id; | 962 | u8 id; |
963 | u8 length; | 963 | u8 length; |
964 | u8 country_str[3]; | 964 | u8 country_str[3]; |
965 | struct ipw_country_channel_info groups[7]; | 965 | struct ipw_country_channel_info groups[7]; |
966 | } __attribute__ ((packed)); | 966 | } __packed; |
967 | 967 | ||
968 | struct ipw_channel_tx_power { | 968 | struct ipw_channel_tx_power { |
969 | u8 channel_number; | 969 | u8 channel_number; |
970 | s8 tx_power; | 970 | s8 tx_power; |
971 | } __attribute__ ((packed)); | 971 | } __packed; |
972 | 972 | ||
973 | #define SCAN_ASSOCIATED_INTERVAL (HZ) | 973 | #define SCAN_ASSOCIATED_INTERVAL (HZ) |
974 | #define SCAN_INTERVAL (HZ / 10) | 974 | #define SCAN_INTERVAL (HZ / 10) |
@@ -979,18 +979,18 @@ struct ipw_tx_power { | |||
979 | u8 num_channels; | 979 | u8 num_channels; |
980 | u8 ieee_mode; | 980 | u8 ieee_mode; |
981 | struct ipw_channel_tx_power channels_tx_power[MAX_A_CHANNELS]; | 981 | struct ipw_channel_tx_power channels_tx_power[MAX_A_CHANNELS]; |
982 | } __attribute__ ((packed)); | 982 | } __packed; |
983 | 983 | ||
984 | struct ipw_rsn_capabilities { | 984 | struct ipw_rsn_capabilities { |
985 | u8 id; | 985 | u8 id; |
986 | u8 length; | 986 | u8 length; |
987 | __le16 version; | 987 | __le16 version; |
988 | } __attribute__ ((packed)); | 988 | } __packed; |
989 | 989 | ||
990 | struct ipw_sensitivity_calib { | 990 | struct ipw_sensitivity_calib { |
991 | __le16 beacon_rssi_raw; | 991 | __le16 beacon_rssi_raw; |
992 | __le16 reserved; | 992 | __le16 reserved; |
993 | } __attribute__ ((packed)); | 993 | } __packed; |
994 | 994 | ||
995 | /** | 995 | /** |
996 | * Host command structure. | 996 | * Host command structure. |
@@ -1019,7 +1019,7 @@ struct ipw_cmd { /* XXX */ | |||
1019 | * nParams=(len+3)/4+status_len | 1019 | * nParams=(len+3)/4+status_len |
1020 | */ | 1020 | */ |
1021 | u32 param[0]; | 1021 | u32 param[0]; |
1022 | } __attribute__ ((packed)); | 1022 | } __packed; |
1023 | 1023 | ||
1024 | #define STATUS_HCMD_ACTIVE (1<<0) /**< host command in progress */ | 1024 | #define STATUS_HCMD_ACTIVE (1<<0) /**< host command in progress */ |
1025 | 1025 | ||
@@ -1114,7 +1114,7 @@ struct ipw_event { /* XXX */ | |||
1114 | u32 event; | 1114 | u32 event; |
1115 | u32 time; | 1115 | u32 time; |
1116 | u32 data; | 1116 | u32 data; |
1117 | } __attribute__ ((packed)); | 1117 | } __packed; |
1118 | 1118 | ||
1119 | struct ipw_fw_error { /* XXX */ | 1119 | struct ipw_fw_error { /* XXX */ |
1120 | unsigned long jiffies; | 1120 | unsigned long jiffies; |
@@ -1125,7 +1125,7 @@ struct ipw_fw_error { /* XXX */ | |||
1125 | struct ipw_error_elem *elem; | 1125 | struct ipw_error_elem *elem; |
1126 | struct ipw_event *log; | 1126 | struct ipw_event *log; |
1127 | u8 payload[0]; | 1127 | u8 payload[0]; |
1128 | } __attribute__ ((packed)); | 1128 | } __packed; |
1129 | 1129 | ||
1130 | #ifdef CONFIG_IPW2200_PROMISCUOUS | 1130 | #ifdef CONFIG_IPW2200_PROMISCUOUS |
1131 | 1131 | ||
@@ -1170,7 +1170,7 @@ struct ipw_rt_hdr { | |||
1170 | s8 rt_dbmnoise; | 1170 | s8 rt_dbmnoise; |
1171 | u8 rt_antenna; /* antenna number */ | 1171 | u8 rt_antenna; /* antenna number */ |
1172 | u8 payload[0]; /* payload... */ | 1172 | u8 payload[0]; /* payload... */ |
1173 | } __attribute__ ((packed)); | 1173 | } __packed; |
1174 | #endif | 1174 | #endif |
1175 | 1175 | ||
1176 | struct ipw_priv { | 1176 | struct ipw_priv { |
@@ -1957,7 +1957,7 @@ enum { | |||
1957 | struct ipw_fixed_rate { | 1957 | struct ipw_fixed_rate { |
1958 | __le16 tx_rates; | 1958 | __le16 tx_rates; |
1959 | __le16 reserved; | 1959 | __le16 reserved; |
1960 | } __attribute__ ((packed)); | 1960 | } __packed; |
1961 | 1961 | ||
1962 | #define IPW_INDIRECT_ADDR_MASK (~0x3ul) | 1962 | #define IPW_INDIRECT_ADDR_MASK (~0x3ul) |
1963 | 1963 | ||
@@ -1966,14 +1966,14 @@ struct host_cmd { | |||
1966 | u8 len; | 1966 | u8 len; |
1967 | u16 reserved; | 1967 | u16 reserved; |
1968 | u32 *param; | 1968 | u32 *param; |
1969 | } __attribute__ ((packed)); /* XXX */ | 1969 | } __packed; /* XXX */ |
1970 | 1970 | ||
1971 | struct cmdlog_host_cmd { | 1971 | struct cmdlog_host_cmd { |
1972 | u8 cmd; | 1972 | u8 cmd; |
1973 | u8 len; | 1973 | u8 len; |
1974 | __le16 reserved; | 1974 | __le16 reserved; |
1975 | char param[124]; | 1975 | char param[124]; |
1976 | } __attribute__ ((packed)); | 1976 | } __packed; |
1977 | 1977 | ||
1978 | struct ipw_cmd_log { | 1978 | struct ipw_cmd_log { |
1979 | unsigned long jiffies; | 1979 | unsigned long jiffies; |
diff --git a/drivers/net/wireless/ipw2x00/libipw.h b/drivers/net/wireless/ipw2x00/libipw.h index 284b0e4cb815..4736861bc4f8 100644 --- a/drivers/net/wireless/ipw2x00/libipw.h +++ b/drivers/net/wireless/ipw2x00/libipw.h | |||
@@ -154,7 +154,7 @@ struct libipw_snap_hdr { | |||
154 | u8 ctrl; /* always 0x03 */ | 154 | u8 ctrl; /* always 0x03 */ |
155 | u8 oui[P80211_OUI_LEN]; /* organizational universal id */ | 155 | u8 oui[P80211_OUI_LEN]; /* organizational universal id */ |
156 | 156 | ||
157 | } __attribute__ ((packed)); | 157 | } __packed; |
158 | 158 | ||
159 | #define SNAP_SIZE sizeof(struct libipw_snap_hdr) | 159 | #define SNAP_SIZE sizeof(struct libipw_snap_hdr) |
160 | 160 | ||
@@ -323,7 +323,7 @@ struct libipw_security { | |||
323 | u8 keys[WEP_KEYS][SCM_KEY_LEN]; | 323 | u8 keys[WEP_KEYS][SCM_KEY_LEN]; |
324 | u8 level; | 324 | u8 level; |
325 | u16 flags; | 325 | u16 flags; |
326 | } __attribute__ ((packed)); | 326 | } __packed; |
327 | 327 | ||
328 | /* | 328 | /* |
329 | 329 | ||
@@ -347,7 +347,7 @@ struct libipw_hdr_1addr { | |||
347 | __le16 duration_id; | 347 | __le16 duration_id; |
348 | u8 addr1[ETH_ALEN]; | 348 | u8 addr1[ETH_ALEN]; |
349 | u8 payload[0]; | 349 | u8 payload[0]; |
350 | } __attribute__ ((packed)); | 350 | } __packed; |
351 | 351 | ||
352 | struct libipw_hdr_2addr { | 352 | struct libipw_hdr_2addr { |
353 | __le16 frame_ctl; | 353 | __le16 frame_ctl; |
@@ -355,7 +355,7 @@ struct libipw_hdr_2addr { | |||
355 | u8 addr1[ETH_ALEN]; | 355 | u8 addr1[ETH_ALEN]; |
356 | u8 addr2[ETH_ALEN]; | 356 | u8 addr2[ETH_ALEN]; |
357 | u8 payload[0]; | 357 | u8 payload[0]; |
358 | } __attribute__ ((packed)); | 358 | } __packed; |
359 | 359 | ||
360 | struct libipw_hdr_3addr { | 360 | struct libipw_hdr_3addr { |
361 | __le16 frame_ctl; | 361 | __le16 frame_ctl; |
@@ -365,7 +365,7 @@ struct libipw_hdr_3addr { | |||
365 | u8 addr3[ETH_ALEN]; | 365 | u8 addr3[ETH_ALEN]; |
366 | __le16 seq_ctl; | 366 | __le16 seq_ctl; |
367 | u8 payload[0]; | 367 | u8 payload[0]; |
368 | } __attribute__ ((packed)); | 368 | } __packed; |
369 | 369 | ||
370 | struct libipw_hdr_4addr { | 370 | struct libipw_hdr_4addr { |
371 | __le16 frame_ctl; | 371 | __le16 frame_ctl; |
@@ -376,7 +376,7 @@ struct libipw_hdr_4addr { | |||
376 | __le16 seq_ctl; | 376 | __le16 seq_ctl; |
377 | u8 addr4[ETH_ALEN]; | 377 | u8 addr4[ETH_ALEN]; |
378 | u8 payload[0]; | 378 | u8 payload[0]; |
379 | } __attribute__ ((packed)); | 379 | } __packed; |
380 | 380 | ||
381 | struct libipw_hdr_3addrqos { | 381 | struct libipw_hdr_3addrqos { |
382 | __le16 frame_ctl; | 382 | __le16 frame_ctl; |
@@ -387,13 +387,13 @@ struct libipw_hdr_3addrqos { | |||
387 | __le16 seq_ctl; | 387 | __le16 seq_ctl; |
388 | u8 payload[0]; | 388 | u8 payload[0]; |
389 | __le16 qos_ctl; | 389 | __le16 qos_ctl; |
390 | } __attribute__ ((packed)); | 390 | } __packed; |
391 | 391 | ||
392 | struct libipw_info_element { | 392 | struct libipw_info_element { |
393 | u8 id; | 393 | u8 id; |
394 | u8 len; | 394 | u8 len; |
395 | u8 data[0]; | 395 | u8 data[0]; |
396 | } __attribute__ ((packed)); | 396 | } __packed; |
397 | 397 | ||
398 | /* | 398 | /* |
399 | * These are the data types that can make up management packets | 399 | * These are the data types that can make up management packets |
@@ -406,7 +406,7 @@ struct libipw_info_element { | |||
406 | u16 listen_interval; | 406 | u16 listen_interval; |
407 | struct { | 407 | struct { |
408 | u16 association_id:14, reserved:2; | 408 | u16 association_id:14, reserved:2; |
409 | } __attribute__ ((packed)); | 409 | } __packed; |
410 | u32 time_stamp[2]; | 410 | u32 time_stamp[2]; |
411 | u16 reason; | 411 | u16 reason; |
412 | u16 status; | 412 | u16 status; |
@@ -419,7 +419,7 @@ struct libipw_auth { | |||
419 | __le16 status; | 419 | __le16 status; |
420 | /* challenge */ | 420 | /* challenge */ |
421 | struct libipw_info_element info_element[0]; | 421 | struct libipw_info_element info_element[0]; |
422 | } __attribute__ ((packed)); | 422 | } __packed; |
423 | 423 | ||
424 | struct libipw_channel_switch { | 424 | struct libipw_channel_switch { |
425 | u8 id; | 425 | u8 id; |
@@ -427,7 +427,7 @@ struct libipw_channel_switch { | |||
427 | u8 mode; | 427 | u8 mode; |
428 | u8 channel; | 428 | u8 channel; |
429 | u8 count; | 429 | u8 count; |
430 | } __attribute__ ((packed)); | 430 | } __packed; |
431 | 431 | ||
432 | struct libipw_action { | 432 | struct libipw_action { |
433 | struct libipw_hdr_3addr header; | 433 | struct libipw_hdr_3addr header; |
@@ -441,12 +441,12 @@ struct libipw_action { | |||
441 | struct libipw_channel_switch channel_switch; | 441 | struct libipw_channel_switch channel_switch; |
442 | 442 | ||
443 | } format; | 443 | } format; |
444 | } __attribute__ ((packed)); | 444 | } __packed; |
445 | 445 | ||
446 | struct libipw_disassoc { | 446 | struct libipw_disassoc { |
447 | struct libipw_hdr_3addr header; | 447 | struct libipw_hdr_3addr header; |
448 | __le16 reason; | 448 | __le16 reason; |
449 | } __attribute__ ((packed)); | 449 | } __packed; |
450 | 450 | ||
451 | /* Alias deauth for disassoc */ | 451 | /* Alias deauth for disassoc */ |
452 | #define libipw_deauth libipw_disassoc | 452 | #define libipw_deauth libipw_disassoc |
@@ -455,7 +455,7 @@ struct libipw_probe_request { | |||
455 | struct libipw_hdr_3addr header; | 455 | struct libipw_hdr_3addr header; |
456 | /* SSID, supported rates */ | 456 | /* SSID, supported rates */ |
457 | struct libipw_info_element info_element[0]; | 457 | struct libipw_info_element info_element[0]; |
458 | } __attribute__ ((packed)); | 458 | } __packed; |
459 | 459 | ||
460 | struct libipw_probe_response { | 460 | struct libipw_probe_response { |
461 | struct libipw_hdr_3addr header; | 461 | struct libipw_hdr_3addr header; |
@@ -465,7 +465,7 @@ struct libipw_probe_response { | |||
465 | /* SSID, supported rates, FH params, DS params, | 465 | /* SSID, supported rates, FH params, DS params, |
466 | * CF params, IBSS params, TIM (if beacon), RSN */ | 466 | * CF params, IBSS params, TIM (if beacon), RSN */ |
467 | struct libipw_info_element info_element[0]; | 467 | struct libipw_info_element info_element[0]; |
468 | } __attribute__ ((packed)); | 468 | } __packed; |
469 | 469 | ||
470 | /* Alias beacon for probe_response */ | 470 | /* Alias beacon for probe_response */ |
471 | #define libipw_beacon libipw_probe_response | 471 | #define libipw_beacon libipw_probe_response |
@@ -476,7 +476,7 @@ struct libipw_assoc_request { | |||
476 | __le16 listen_interval; | 476 | __le16 listen_interval; |
477 | /* SSID, supported rates, RSN */ | 477 | /* SSID, supported rates, RSN */ |
478 | struct libipw_info_element info_element[0]; | 478 | struct libipw_info_element info_element[0]; |
479 | } __attribute__ ((packed)); | 479 | } __packed; |
480 | 480 | ||
481 | struct libipw_reassoc_request { | 481 | struct libipw_reassoc_request { |
482 | struct libipw_hdr_3addr header; | 482 | struct libipw_hdr_3addr header; |
@@ -484,7 +484,7 @@ struct libipw_reassoc_request { | |||
484 | __le16 listen_interval; | 484 | __le16 listen_interval; |
485 | u8 current_ap[ETH_ALEN]; | 485 | u8 current_ap[ETH_ALEN]; |
486 | struct libipw_info_element info_element[0]; | 486 | struct libipw_info_element info_element[0]; |
487 | } __attribute__ ((packed)); | 487 | } __packed; |
488 | 488 | ||
489 | struct libipw_assoc_response { | 489 | struct libipw_assoc_response { |
490 | struct libipw_hdr_3addr header; | 490 | struct libipw_hdr_3addr header; |
@@ -493,7 +493,7 @@ struct libipw_assoc_response { | |||
493 | __le16 aid; | 493 | __le16 aid; |
494 | /* supported rates */ | 494 | /* supported rates */ |
495 | struct libipw_info_element info_element[0]; | 495 | struct libipw_info_element info_element[0]; |
496 | } __attribute__ ((packed)); | 496 | } __packed; |
497 | 497 | ||
498 | struct libipw_txb { | 498 | struct libipw_txb { |
499 | u8 nr_frags; | 499 | u8 nr_frags; |
@@ -555,19 +555,19 @@ struct libipw_qos_information_element { | |||
555 | u8 qui_subtype; | 555 | u8 qui_subtype; |
556 | u8 version; | 556 | u8 version; |
557 | u8 ac_info; | 557 | u8 ac_info; |
558 | } __attribute__ ((packed)); | 558 | } __packed; |
559 | 559 | ||
560 | struct libipw_qos_ac_parameter { | 560 | struct libipw_qos_ac_parameter { |
561 | u8 aci_aifsn; | 561 | u8 aci_aifsn; |
562 | u8 ecw_min_max; | 562 | u8 ecw_min_max; |
563 | __le16 tx_op_limit; | 563 | __le16 tx_op_limit; |
564 | } __attribute__ ((packed)); | 564 | } __packed; |
565 | 565 | ||
566 | struct libipw_qos_parameter_info { | 566 | struct libipw_qos_parameter_info { |
567 | struct libipw_qos_information_element info_element; | 567 | struct libipw_qos_information_element info_element; |
568 | u8 reserved; | 568 | u8 reserved; |
569 | struct libipw_qos_ac_parameter ac_params_record[QOS_QUEUE_NUM]; | 569 | struct libipw_qos_ac_parameter ac_params_record[QOS_QUEUE_NUM]; |
570 | } __attribute__ ((packed)); | 570 | } __packed; |
571 | 571 | ||
572 | struct libipw_qos_parameters { | 572 | struct libipw_qos_parameters { |
573 | __le16 cw_min[QOS_QUEUE_NUM]; | 573 | __le16 cw_min[QOS_QUEUE_NUM]; |
@@ -575,7 +575,7 @@ struct libipw_qos_parameters { | |||
575 | u8 aifs[QOS_QUEUE_NUM]; | 575 | u8 aifs[QOS_QUEUE_NUM]; |
576 | u8 flag[QOS_QUEUE_NUM]; | 576 | u8 flag[QOS_QUEUE_NUM]; |
577 | __le16 tx_op_limit[QOS_QUEUE_NUM]; | 577 | __le16 tx_op_limit[QOS_QUEUE_NUM]; |
578 | } __attribute__ ((packed)); | 578 | } __packed; |
579 | 579 | ||
580 | struct libipw_qos_data { | 580 | struct libipw_qos_data { |
581 | struct libipw_qos_parameters parameters; | 581 | struct libipw_qos_parameters parameters; |
@@ -588,7 +588,7 @@ struct libipw_qos_data { | |||
588 | struct libipw_tim_parameters { | 588 | struct libipw_tim_parameters { |
589 | u8 tim_count; | 589 | u8 tim_count; |
590 | u8 tim_period; | 590 | u8 tim_period; |
591 | } __attribute__ ((packed)); | 591 | } __packed; |
592 | 592 | ||
593 | /*******************************************************/ | 593 | /*******************************************************/ |
594 | 594 | ||
@@ -606,7 +606,7 @@ struct libipw_basic_report { | |||
606 | __le64 start_time; | 606 | __le64 start_time; |
607 | __le16 duration; | 607 | __le16 duration; |
608 | u8 map; | 608 | u8 map; |
609 | } __attribute__ ((packed)); | 609 | } __packed; |
610 | 610 | ||
611 | enum { /* libipw_measurement_request.mode */ | 611 | enum { /* libipw_measurement_request.mode */ |
612 | /* Bit 0 is reserved */ | 612 | /* Bit 0 is reserved */ |
@@ -627,7 +627,7 @@ struct libipw_measurement_params { | |||
627 | u8 channel; | 627 | u8 channel; |
628 | __le64 start_time; | 628 | __le64 start_time; |
629 | __le16 duration; | 629 | __le16 duration; |
630 | } __attribute__ ((packed)); | 630 | } __packed; |
631 | 631 | ||
632 | struct libipw_measurement_request { | 632 | struct libipw_measurement_request { |
633 | struct libipw_info_element ie; | 633 | struct libipw_info_element ie; |
@@ -635,7 +635,7 @@ struct libipw_measurement_request { | |||
635 | u8 mode; | 635 | u8 mode; |
636 | u8 type; | 636 | u8 type; |
637 | struct libipw_measurement_params params[0]; | 637 | struct libipw_measurement_params params[0]; |
638 | } __attribute__ ((packed)); | 638 | } __packed; |
639 | 639 | ||
640 | struct libipw_measurement_report { | 640 | struct libipw_measurement_report { |
641 | struct libipw_info_element ie; | 641 | struct libipw_info_element ie; |
@@ -645,17 +645,17 @@ struct libipw_measurement_report { | |||
645 | union { | 645 | union { |
646 | struct libipw_basic_report basic[0]; | 646 | struct libipw_basic_report basic[0]; |
647 | } u; | 647 | } u; |
648 | } __attribute__ ((packed)); | 648 | } __packed; |
649 | 649 | ||
650 | struct libipw_tpc_report { | 650 | struct libipw_tpc_report { |
651 | u8 transmit_power; | 651 | u8 transmit_power; |
652 | u8 link_margin; | 652 | u8 link_margin; |
653 | } __attribute__ ((packed)); | 653 | } __packed; |
654 | 654 | ||
655 | struct libipw_channel_map { | 655 | struct libipw_channel_map { |
656 | u8 channel; | 656 | u8 channel; |
657 | u8 map; | 657 | u8 map; |
658 | } __attribute__ ((packed)); | 658 | } __packed; |
659 | 659 | ||
660 | struct libipw_ibss_dfs { | 660 | struct libipw_ibss_dfs { |
661 | struct libipw_info_element ie; | 661 | struct libipw_info_element ie; |
@@ -668,14 +668,14 @@ struct libipw_csa { | |||
668 | u8 mode; | 668 | u8 mode; |
669 | u8 channel; | 669 | u8 channel; |
670 | u8 count; | 670 | u8 count; |
671 | } __attribute__ ((packed)); | 671 | } __packed; |
672 | 672 | ||
673 | struct libipw_quiet { | 673 | struct libipw_quiet { |
674 | u8 count; | 674 | u8 count; |
675 | u8 period; | 675 | u8 period; |
676 | u8 duration; | 676 | u8 duration; |
677 | u8 offset; | 677 | u8 offset; |
678 | } __attribute__ ((packed)); | 678 | } __packed; |
679 | 679 | ||
680 | struct libipw_network { | 680 | struct libipw_network { |
681 | /* These entries are used to identify a unique network */ | 681 | /* These entries are used to identify a unique network */ |
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-fh.h b/drivers/net/wireless/iwlwifi/iwl-3945-fh.h index 042f6bc0df13..2c9ed2b502a3 100644 --- a/drivers/net/wireless/iwlwifi/iwl-3945-fh.h +++ b/drivers/net/wireless/iwlwifi/iwl-3945-fh.h | |||
@@ -175,13 +175,13 @@ | |||
175 | struct iwl3945_tfd_tb { | 175 | struct iwl3945_tfd_tb { |
176 | __le32 addr; | 176 | __le32 addr; |
177 | __le32 len; | 177 | __le32 len; |
178 | } __attribute__ ((packed)); | 178 | } __packed; |
179 | 179 | ||
180 | struct iwl3945_tfd { | 180 | struct iwl3945_tfd { |
181 | __le32 control_flags; | 181 | __le32 control_flags; |
182 | struct iwl3945_tfd_tb tbs[4]; | 182 | struct iwl3945_tfd_tb tbs[4]; |
183 | u8 __pad[28]; | 183 | u8 __pad[28]; |
184 | } __attribute__ ((packed)); | 184 | } __packed; |
185 | 185 | ||
186 | 186 | ||
187 | #endif /* __iwl_3945_fh_h__ */ | 187 | #endif /* __iwl_3945_fh_h__ */ |
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-hw.h b/drivers/net/wireless/iwlwifi/iwl-3945-hw.h index 91bcb4e3cdfb..7c731a793632 100644 --- a/drivers/net/wireless/iwlwifi/iwl-3945-hw.h +++ b/drivers/net/wireless/iwlwifi/iwl-3945-hw.h | |||
@@ -96,7 +96,7 @@ struct iwl3945_eeprom_txpower_sample { | |||
96 | u8 gain_index; /* index into power (gain) setup table ... */ | 96 | u8 gain_index; /* index into power (gain) setup table ... */ |
97 | s8 power; /* ... for this pwr level for this chnl group */ | 97 | s8 power; /* ... for this pwr level for this chnl group */ |
98 | u16 v_det; /* PA output voltage */ | 98 | u16 v_det; /* PA output voltage */ |
99 | } __attribute__ ((packed)); | 99 | } __packed; |
100 | 100 | ||
101 | /* | 101 | /* |
102 | * Mappings of Tx power levels -> nominal radio/DSP gain table indexes. | 102 | * Mappings of Tx power levels -> nominal radio/DSP gain table indexes. |
@@ -117,7 +117,7 @@ struct iwl3945_eeprom_txpower_group { | |||
117 | u8 group_channel; /* "representative" channel # in this band */ | 117 | u8 group_channel; /* "representative" channel # in this band */ |
118 | s16 temperature; /* h/w temperature at factory calib this band | 118 | s16 temperature; /* h/w temperature at factory calib this band |
119 | * (signed) */ | 119 | * (signed) */ |
120 | } __attribute__ ((packed)); | 120 | } __packed; |
121 | 121 | ||
122 | /* | 122 | /* |
123 | * Temperature-based Tx-power compensation data, not band-specific. | 123 | * Temperature-based Tx-power compensation data, not band-specific. |
@@ -131,7 +131,7 @@ struct iwl3945_eeprom_temperature_corr { | |||
131 | u32 Tc; | 131 | u32 Tc; |
132 | u32 Td; | 132 | u32 Td; |
133 | u32 Te; | 133 | u32 Te; |
134 | } __attribute__ ((packed)); | 134 | } __packed; |
135 | 135 | ||
136 | /* | 136 | /* |
137 | * EEPROM map | 137 | * EEPROM map |
@@ -215,7 +215,7 @@ struct iwl3945_eeprom { | |||
215 | /* abs.ofs: 512 */ | 215 | /* abs.ofs: 512 */ |
216 | struct iwl3945_eeprom_temperature_corr corrections; /* abs.ofs: 832 */ | 216 | struct iwl3945_eeprom_temperature_corr corrections; /* abs.ofs: 832 */ |
217 | u8 reserved16[172]; /* fill out to full 1024 byte block */ | 217 | u8 reserved16[172]; /* fill out to full 1024 byte block */ |
218 | } __attribute__ ((packed)); | 218 | } __packed; |
219 | 219 | ||
220 | #define IWL3945_EEPROM_IMG_SIZE 1024 | 220 | #define IWL3945_EEPROM_IMG_SIZE 1024 |
221 | 221 | ||
@@ -274,7 +274,7 @@ static inline int iwl3945_hw_valid_rtc_data_addr(u32 addr) | |||
274 | * and &iwl3945_shared.rx_read_ptr[0] is provided to FH_RCSR_RPTR_ADDR(0) */ | 274 | * and &iwl3945_shared.rx_read_ptr[0] is provided to FH_RCSR_RPTR_ADDR(0) */ |
275 | struct iwl3945_shared { | 275 | struct iwl3945_shared { |
276 | __le32 tx_base_ptr[8]; | 276 | __le32 tx_base_ptr[8]; |
277 | } __attribute__ ((packed)); | 277 | } __packed; |
278 | 278 | ||
279 | static inline u8 iwl3945_hw_get_rate(__le16 rate_n_flags) | 279 | static inline u8 iwl3945_hw_get_rate(__le16 rate_n_flags) |
280 | { | 280 | { |
diff --git a/drivers/net/wireless/iwlwifi/iwl-4965-hw.h b/drivers/net/wireless/iwlwifi/iwl-4965-hw.h index cd4b61ae25b7..9166794eda0d 100644 --- a/drivers/net/wireless/iwlwifi/iwl-4965-hw.h +++ b/drivers/net/wireless/iwlwifi/iwl-4965-hw.h | |||
@@ -787,6 +787,6 @@ enum { | |||
787 | struct iwl4965_scd_bc_tbl { | 787 | struct iwl4965_scd_bc_tbl { |
788 | __le16 tfd_offset[TFD_QUEUE_BC_SIZE]; | 788 | __le16 tfd_offset[TFD_QUEUE_BC_SIZE]; |
789 | u8 pad[1024 - (TFD_QUEUE_BC_SIZE) * sizeof(__le16)]; | 789 | u8 pad[1024 - (TFD_QUEUE_BC_SIZE) * sizeof(__le16)]; |
790 | } __attribute__ ((packed)); | 790 | } __packed; |
791 | 791 | ||
792 | #endif /* !__iwl_4965_hw_h__ */ | 792 | #endif /* !__iwl_4965_hw_h__ */ |
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-hw.h b/drivers/net/wireless/iwlwifi/iwl-agn-hw.h index f9a3fbb6338f..a52b82c8e7a6 100644 --- a/drivers/net/wireless/iwlwifi/iwl-agn-hw.h +++ b/drivers/net/wireless/iwlwifi/iwl-agn-hw.h | |||
@@ -112,7 +112,7 @@ | |||
112 | */ | 112 | */ |
113 | struct iwlagn_scd_bc_tbl { | 113 | struct iwlagn_scd_bc_tbl { |
114 | __le16 tfd_offset[TFD_QUEUE_BC_SIZE]; | 114 | __le16 tfd_offset[TFD_QUEUE_BC_SIZE]; |
115 | } __attribute__ ((packed)); | 115 | } __packed; |
116 | 116 | ||
117 | 117 | ||
118 | #endif /* __iwl_agn_hw_h__ */ | 118 | #endif /* __iwl_agn_hw_h__ */ |
diff --git a/drivers/net/wireless/iwlwifi/iwl-commands.h b/drivers/net/wireless/iwlwifi/iwl-commands.h index 9aab020c474b..73d2d59bc1df 100644 --- a/drivers/net/wireless/iwlwifi/iwl-commands.h +++ b/drivers/net/wireless/iwlwifi/iwl-commands.h | |||
@@ -227,7 +227,7 @@ struct iwl_cmd_header { | |||
227 | 227 | ||
228 | /* command or response/notification data follows immediately */ | 228 | /* command or response/notification data follows immediately */ |
229 | u8 data[0]; | 229 | u8 data[0]; |
230 | } __attribute__ ((packed)); | 230 | } __packed; |
231 | 231 | ||
232 | 232 | ||
233 | /** | 233 | /** |
@@ -247,7 +247,7 @@ struct iwl_cmd_header { | |||
247 | struct iwl3945_tx_power { | 247 | struct iwl3945_tx_power { |
248 | u8 tx_gain; /* gain for analog radio */ | 248 | u8 tx_gain; /* gain for analog radio */ |
249 | u8 dsp_atten; /* gain for DSP */ | 249 | u8 dsp_atten; /* gain for DSP */ |
250 | } __attribute__ ((packed)); | 250 | } __packed; |
251 | 251 | ||
252 | /** | 252 | /** |
253 | * struct iwl3945_power_per_rate | 253 | * struct iwl3945_power_per_rate |
@@ -258,7 +258,7 @@ struct iwl3945_power_per_rate { | |||
258 | u8 rate; /* plcp */ | 258 | u8 rate; /* plcp */ |
259 | struct iwl3945_tx_power tpc; | 259 | struct iwl3945_tx_power tpc; |
260 | u8 reserved; | 260 | u8 reserved; |
261 | } __attribute__ ((packed)); | 261 | } __packed; |
262 | 262 | ||
263 | /** | 263 | /** |
264 | * iwlagn rate_n_flags bit fields | 264 | * iwlagn rate_n_flags bit fields |
@@ -389,7 +389,7 @@ union iwl4965_tx_power_dual_stream { | |||
389 | */ | 389 | */ |
390 | struct tx_power_dual_stream { | 390 | struct tx_power_dual_stream { |
391 | __le32 dw; | 391 | __le32 dw; |
392 | } __attribute__ ((packed)); | 392 | } __packed; |
393 | 393 | ||
394 | /** | 394 | /** |
395 | * struct iwl4965_tx_power_db | 395 | * struct iwl4965_tx_power_db |
@@ -398,7 +398,7 @@ struct tx_power_dual_stream { | |||
398 | */ | 398 | */ |
399 | struct iwl4965_tx_power_db { | 399 | struct iwl4965_tx_power_db { |
400 | struct tx_power_dual_stream power_tbl[POWER_TABLE_NUM_ENTRIES]; | 400 | struct tx_power_dual_stream power_tbl[POWER_TABLE_NUM_ENTRIES]; |
401 | } __attribute__ ((packed)); | 401 | } __packed; |
402 | 402 | ||
403 | /** | 403 | /** |
404 | * Command REPLY_TX_POWER_DBM_CMD = 0x98 | 404 | * Command REPLY_TX_POWER_DBM_CMD = 0x98 |
@@ -412,7 +412,7 @@ struct iwl5000_tx_power_dbm_cmd { | |||
412 | u8 flags; | 412 | u8 flags; |
413 | s8 srv_chan_lmt; /*in half-dBm (e.g. 30 = 15 dBm) */ | 413 | s8 srv_chan_lmt; /*in half-dBm (e.g. 30 = 15 dBm) */ |
414 | u8 reserved; | 414 | u8 reserved; |
415 | } __attribute__ ((packed)); | 415 | } __packed; |
416 | 416 | ||
417 | /** | 417 | /** |
418 | * Command TX_ANT_CONFIGURATION_CMD = 0x98 | 418 | * Command TX_ANT_CONFIGURATION_CMD = 0x98 |
@@ -422,7 +422,7 @@ struct iwl5000_tx_power_dbm_cmd { | |||
422 | */ | 422 | */ |
423 | struct iwl_tx_ant_config_cmd { | 423 | struct iwl_tx_ant_config_cmd { |
424 | __le32 valid; | 424 | __le32 valid; |
425 | } __attribute__ ((packed)); | 425 | } __packed; |
426 | 426 | ||
427 | /****************************************************************************** | 427 | /****************************************************************************** |
428 | * (0a) | 428 | * (0a) |
@@ -478,7 +478,7 @@ struct iwl_init_alive_resp { | |||
478 | __le32 therm_r4[2]; /* signed */ | 478 | __le32 therm_r4[2]; /* signed */ |
479 | __le32 tx_atten[5][2]; /* signed MIMO gain comp, 5 freq groups, | 479 | __le32 tx_atten[5][2]; /* signed MIMO gain comp, 5 freq groups, |
480 | * 2 Tx chains */ | 480 | * 2 Tx chains */ |
481 | } __attribute__ ((packed)); | 481 | } __packed; |
482 | 482 | ||
483 | 483 | ||
484 | /** | 484 | /** |
@@ -570,7 +570,7 @@ struct iwl_alive_resp { | |||
570 | __le32 error_event_table_ptr; /* SRAM address for error log */ | 570 | __le32 error_event_table_ptr; /* SRAM address for error log */ |
571 | __le32 timestamp; | 571 | __le32 timestamp; |
572 | __le32 is_valid; | 572 | __le32 is_valid; |
573 | } __attribute__ ((packed)); | 573 | } __packed; |
574 | 574 | ||
575 | /* | 575 | /* |
576 | * REPLY_ERROR = 0x2 (response only, not a command) | 576 | * REPLY_ERROR = 0x2 (response only, not a command) |
@@ -582,7 +582,7 @@ struct iwl_error_resp { | |||
582 | __le16 bad_cmd_seq_num; | 582 | __le16 bad_cmd_seq_num; |
583 | __le32 error_info; | 583 | __le32 error_info; |
584 | __le64 timestamp; | 584 | __le64 timestamp; |
585 | } __attribute__ ((packed)); | 585 | } __packed; |
586 | 586 | ||
587 | /****************************************************************************** | 587 | /****************************************************************************** |
588 | * (1) | 588 | * (1) |
@@ -718,7 +718,7 @@ struct iwl3945_rxon_cmd { | |||
718 | __le32 filter_flags; | 718 | __le32 filter_flags; |
719 | __le16 channel; | 719 | __le16 channel; |
720 | __le16 reserved5; | 720 | __le16 reserved5; |
721 | } __attribute__ ((packed)); | 721 | } __packed; |
722 | 722 | ||
723 | struct iwl4965_rxon_cmd { | 723 | struct iwl4965_rxon_cmd { |
724 | u8 node_addr[6]; | 724 | u8 node_addr[6]; |
@@ -738,7 +738,7 @@ struct iwl4965_rxon_cmd { | |||
738 | __le16 channel; | 738 | __le16 channel; |
739 | u8 ofdm_ht_single_stream_basic_rates; | 739 | u8 ofdm_ht_single_stream_basic_rates; |
740 | u8 ofdm_ht_dual_stream_basic_rates; | 740 | u8 ofdm_ht_dual_stream_basic_rates; |
741 | } __attribute__ ((packed)); | 741 | } __packed; |
742 | 742 | ||
743 | /* 5000 HW just extend this command */ | 743 | /* 5000 HW just extend this command */ |
744 | struct iwl_rxon_cmd { | 744 | struct iwl_rxon_cmd { |
@@ -763,7 +763,7 @@ struct iwl_rxon_cmd { | |||
763 | u8 reserved5; | 763 | u8 reserved5; |
764 | __le16 acquisition_data; | 764 | __le16 acquisition_data; |
765 | __le16 reserved6; | 765 | __le16 reserved6; |
766 | } __attribute__ ((packed)); | 766 | } __packed; |
767 | 767 | ||
768 | /* | 768 | /* |
769 | * REPLY_RXON_ASSOC = 0x11 (command, has simple generic response) | 769 | * REPLY_RXON_ASSOC = 0x11 (command, has simple generic response) |
@@ -774,7 +774,7 @@ struct iwl3945_rxon_assoc_cmd { | |||
774 | u8 ofdm_basic_rates; | 774 | u8 ofdm_basic_rates; |
775 | u8 cck_basic_rates; | 775 | u8 cck_basic_rates; |
776 | __le16 reserved; | 776 | __le16 reserved; |
777 | } __attribute__ ((packed)); | 777 | } __packed; |
778 | 778 | ||
779 | struct iwl4965_rxon_assoc_cmd { | 779 | struct iwl4965_rxon_assoc_cmd { |
780 | __le32 flags; | 780 | __le32 flags; |
@@ -785,7 +785,7 @@ struct iwl4965_rxon_assoc_cmd { | |||
785 | u8 ofdm_ht_dual_stream_basic_rates; | 785 | u8 ofdm_ht_dual_stream_basic_rates; |
786 | __le16 rx_chain_select_flags; | 786 | __le16 rx_chain_select_flags; |
787 | __le16 reserved; | 787 | __le16 reserved; |
788 | } __attribute__ ((packed)); | 788 | } __packed; |
789 | 789 | ||
790 | struct iwl5000_rxon_assoc_cmd { | 790 | struct iwl5000_rxon_assoc_cmd { |
791 | __le32 flags; | 791 | __le32 flags; |
@@ -800,7 +800,7 @@ struct iwl5000_rxon_assoc_cmd { | |||
800 | __le16 rx_chain_select_flags; | 800 | __le16 rx_chain_select_flags; |
801 | __le16 acquisition_data; | 801 | __le16 acquisition_data; |
802 | __le32 reserved3; | 802 | __le32 reserved3; |
803 | } __attribute__ ((packed)); | 803 | } __packed; |
804 | 804 | ||
805 | #define IWL_CONN_MAX_LISTEN_INTERVAL 10 | 805 | #define IWL_CONN_MAX_LISTEN_INTERVAL 10 |
806 | #define IWL_MAX_UCODE_BEACON_INTERVAL 4 /* 4096 */ | 806 | #define IWL_MAX_UCODE_BEACON_INTERVAL 4 /* 4096 */ |
@@ -816,7 +816,7 @@ struct iwl_rxon_time_cmd { | |||
816 | __le32 beacon_init_val; | 816 | __le32 beacon_init_val; |
817 | __le16 listen_interval; | 817 | __le16 listen_interval; |
818 | __le16 reserved; | 818 | __le16 reserved; |
819 | } __attribute__ ((packed)); | 819 | } __packed; |
820 | 820 | ||
821 | /* | 821 | /* |
822 | * REPLY_CHANNEL_SWITCH = 0x72 (command, has simple generic response) | 822 | * REPLY_CHANNEL_SWITCH = 0x72 (command, has simple generic response) |
@@ -829,7 +829,7 @@ struct iwl3945_channel_switch_cmd { | |||
829 | __le32 rxon_filter_flags; | 829 | __le32 rxon_filter_flags; |
830 | __le32 switch_time; | 830 | __le32 switch_time; |
831 | struct iwl3945_power_per_rate power[IWL_MAX_RATES]; | 831 | struct iwl3945_power_per_rate power[IWL_MAX_RATES]; |
832 | } __attribute__ ((packed)); | 832 | } __packed; |
833 | 833 | ||
834 | struct iwl4965_channel_switch_cmd { | 834 | struct iwl4965_channel_switch_cmd { |
835 | u8 band; | 835 | u8 band; |
@@ -839,7 +839,7 @@ struct iwl4965_channel_switch_cmd { | |||
839 | __le32 rxon_filter_flags; | 839 | __le32 rxon_filter_flags; |
840 | __le32 switch_time; | 840 | __le32 switch_time; |
841 | struct iwl4965_tx_power_db tx_power; | 841 | struct iwl4965_tx_power_db tx_power; |
842 | } __attribute__ ((packed)); | 842 | } __packed; |
843 | 843 | ||
844 | /** | 844 | /** |
845 | * struct iwl5000_channel_switch_cmd | 845 | * struct iwl5000_channel_switch_cmd |
@@ -860,7 +860,7 @@ struct iwl5000_channel_switch_cmd { | |||
860 | __le32 rxon_filter_flags; | 860 | __le32 rxon_filter_flags; |
861 | __le32 switch_time; | 861 | __le32 switch_time; |
862 | __le32 reserved[2][IWL_PWR_NUM_HT_OFDM_ENTRIES + IWL_PWR_CCK_ENTRIES]; | 862 | __le32 reserved[2][IWL_PWR_NUM_HT_OFDM_ENTRIES + IWL_PWR_CCK_ENTRIES]; |
863 | } __attribute__ ((packed)); | 863 | } __packed; |
864 | 864 | ||
865 | /** | 865 | /** |
866 | * struct iwl6000_channel_switch_cmd | 866 | * struct iwl6000_channel_switch_cmd |
@@ -881,7 +881,7 @@ struct iwl6000_channel_switch_cmd { | |||
881 | __le32 rxon_filter_flags; | 881 | __le32 rxon_filter_flags; |
882 | __le32 switch_time; | 882 | __le32 switch_time; |
883 | __le32 reserved[3][IWL_PWR_NUM_HT_OFDM_ENTRIES + IWL_PWR_CCK_ENTRIES]; | 883 | __le32 reserved[3][IWL_PWR_NUM_HT_OFDM_ENTRIES + IWL_PWR_CCK_ENTRIES]; |
884 | } __attribute__ ((packed)); | 884 | } __packed; |
885 | 885 | ||
886 | /* | 886 | /* |
887 | * CHANNEL_SWITCH_NOTIFICATION = 0x73 (notification only, not a command) | 887 | * CHANNEL_SWITCH_NOTIFICATION = 0x73 (notification only, not a command) |
@@ -890,7 +890,7 @@ struct iwl_csa_notification { | |||
890 | __le16 band; | 890 | __le16 band; |
891 | __le16 channel; | 891 | __le16 channel; |
892 | __le32 status; /* 0 - OK, 1 - fail */ | 892 | __le32 status; /* 0 - OK, 1 - fail */ |
893 | } __attribute__ ((packed)); | 893 | } __packed; |
894 | 894 | ||
895 | /****************************************************************************** | 895 | /****************************************************************************** |
896 | * (2) | 896 | * (2) |
@@ -920,7 +920,7 @@ struct iwl_ac_qos { | |||
920 | u8 aifsn; | 920 | u8 aifsn; |
921 | u8 reserved1; | 921 | u8 reserved1; |
922 | __le16 edca_txop; | 922 | __le16 edca_txop; |
923 | } __attribute__ ((packed)); | 923 | } __packed; |
924 | 924 | ||
925 | /* QoS flags defines */ | 925 | /* QoS flags defines */ |
926 | #define QOS_PARAM_FLG_UPDATE_EDCA_MSK cpu_to_le32(0x01) | 926 | #define QOS_PARAM_FLG_UPDATE_EDCA_MSK cpu_to_le32(0x01) |
@@ -939,7 +939,7 @@ struct iwl_ac_qos { | |||
939 | struct iwl_qosparam_cmd { | 939 | struct iwl_qosparam_cmd { |
940 | __le32 qos_flags; | 940 | __le32 qos_flags; |
941 | struct iwl_ac_qos ac[AC_NUM]; | 941 | struct iwl_ac_qos ac[AC_NUM]; |
942 | } __attribute__ ((packed)); | 942 | } __packed; |
943 | 943 | ||
944 | /****************************************************************************** | 944 | /****************************************************************************** |
945 | * (3) | 945 | * (3) |
@@ -1015,7 +1015,7 @@ struct iwl4965_keyinfo { | |||
1015 | u8 key_offset; | 1015 | u8 key_offset; |
1016 | u8 reserved2; | 1016 | u8 reserved2; |
1017 | u8 key[16]; /* 16-byte unicast decryption key */ | 1017 | u8 key[16]; /* 16-byte unicast decryption key */ |
1018 | } __attribute__ ((packed)); | 1018 | } __packed; |
1019 | 1019 | ||
1020 | /* 5000 */ | 1020 | /* 5000 */ |
1021 | struct iwl_keyinfo { | 1021 | struct iwl_keyinfo { |
@@ -1029,7 +1029,7 @@ struct iwl_keyinfo { | |||
1029 | __le64 tx_secur_seq_cnt; | 1029 | __le64 tx_secur_seq_cnt; |
1030 | __le64 hw_tkip_mic_rx_key; | 1030 | __le64 hw_tkip_mic_rx_key; |
1031 | __le64 hw_tkip_mic_tx_key; | 1031 | __le64 hw_tkip_mic_tx_key; |
1032 | } __attribute__ ((packed)); | 1032 | } __packed; |
1033 | 1033 | ||
1034 | /** | 1034 | /** |
1035 | * struct sta_id_modify | 1035 | * struct sta_id_modify |
@@ -1049,7 +1049,7 @@ struct sta_id_modify { | |||
1049 | u8 sta_id; | 1049 | u8 sta_id; |
1050 | u8 modify_mask; | 1050 | u8 modify_mask; |
1051 | __le16 reserved2; | 1051 | __le16 reserved2; |
1052 | } __attribute__ ((packed)); | 1052 | } __packed; |
1053 | 1053 | ||
1054 | /* | 1054 | /* |
1055 | * REPLY_ADD_STA = 0x18 (command) | 1055 | * REPLY_ADD_STA = 0x18 (command) |
@@ -1103,7 +1103,7 @@ struct iwl3945_addsta_cmd { | |||
1103 | /* Starting Sequence Number for added block-ack support. | 1103 | /* Starting Sequence Number for added block-ack support. |
1104 | * Set modify_mask bit STA_MODIFY_ADDBA_TID_MSK to use this field. */ | 1104 | * Set modify_mask bit STA_MODIFY_ADDBA_TID_MSK to use this field. */ |
1105 | __le16 add_immediate_ba_ssn; | 1105 | __le16 add_immediate_ba_ssn; |
1106 | } __attribute__ ((packed)); | 1106 | } __packed; |
1107 | 1107 | ||
1108 | struct iwl4965_addsta_cmd { | 1108 | struct iwl4965_addsta_cmd { |
1109 | u8 mode; /* 1: modify existing, 0: add new station */ | 1109 | u8 mode; /* 1: modify existing, 0: add new station */ |
@@ -1140,7 +1140,7 @@ struct iwl4965_addsta_cmd { | |||
1140 | __le16 sleep_tx_count; | 1140 | __le16 sleep_tx_count; |
1141 | 1141 | ||
1142 | __le16 reserved2; | 1142 | __le16 reserved2; |
1143 | } __attribute__ ((packed)); | 1143 | } __packed; |
1144 | 1144 | ||
1145 | /* 5000 */ | 1145 | /* 5000 */ |
1146 | struct iwl_addsta_cmd { | 1146 | struct iwl_addsta_cmd { |
@@ -1178,7 +1178,7 @@ struct iwl_addsta_cmd { | |||
1178 | __le16 sleep_tx_count; | 1178 | __le16 sleep_tx_count; |
1179 | 1179 | ||
1180 | __le16 reserved2; | 1180 | __le16 reserved2; |
1181 | } __attribute__ ((packed)); | 1181 | } __packed; |
1182 | 1182 | ||
1183 | 1183 | ||
1184 | #define ADD_STA_SUCCESS_MSK 0x1 | 1184 | #define ADD_STA_SUCCESS_MSK 0x1 |
@@ -1190,7 +1190,7 @@ struct iwl_addsta_cmd { | |||
1190 | */ | 1190 | */ |
1191 | struct iwl_add_sta_resp { | 1191 | struct iwl_add_sta_resp { |
1192 | u8 status; /* ADD_STA_* */ | 1192 | u8 status; /* ADD_STA_* */ |
1193 | } __attribute__ ((packed)); | 1193 | } __packed; |
1194 | 1194 | ||
1195 | #define REM_STA_SUCCESS_MSK 0x1 | 1195 | #define REM_STA_SUCCESS_MSK 0x1 |
1196 | /* | 1196 | /* |
@@ -1198,7 +1198,7 @@ struct iwl_add_sta_resp { | |||
1198 | */ | 1198 | */ |
1199 | struct iwl_rem_sta_resp { | 1199 | struct iwl_rem_sta_resp { |
1200 | u8 status; | 1200 | u8 status; |
1201 | } __attribute__ ((packed)); | 1201 | } __packed; |
1202 | 1202 | ||
1203 | /* | 1203 | /* |
1204 | * REPLY_REM_STA = 0x19 (command) | 1204 | * REPLY_REM_STA = 0x19 (command) |
@@ -1208,7 +1208,7 @@ struct iwl_rem_sta_cmd { | |||
1208 | u8 reserved[3]; | 1208 | u8 reserved[3]; |
1209 | u8 addr[ETH_ALEN]; /* MAC addr of the first station */ | 1209 | u8 addr[ETH_ALEN]; /* MAC addr of the first station */ |
1210 | u8 reserved2[2]; | 1210 | u8 reserved2[2]; |
1211 | } __attribute__ ((packed)); | 1211 | } __packed; |
1212 | 1212 | ||
1213 | /* | 1213 | /* |
1214 | * REPLY_WEP_KEY = 0x20 | 1214 | * REPLY_WEP_KEY = 0x20 |
@@ -1220,7 +1220,7 @@ struct iwl_wep_key { | |||
1220 | u8 key_size; | 1220 | u8 key_size; |
1221 | u8 reserved2[3]; | 1221 | u8 reserved2[3]; |
1222 | u8 key[16]; | 1222 | u8 key[16]; |
1223 | } __attribute__ ((packed)); | 1223 | } __packed; |
1224 | 1224 | ||
1225 | struct iwl_wep_cmd { | 1225 | struct iwl_wep_cmd { |
1226 | u8 num_keys; | 1226 | u8 num_keys; |
@@ -1228,7 +1228,7 @@ struct iwl_wep_cmd { | |||
1228 | u8 flags; | 1228 | u8 flags; |
1229 | u8 reserved; | 1229 | u8 reserved; |
1230 | struct iwl_wep_key key[0]; | 1230 | struct iwl_wep_key key[0]; |
1231 | } __attribute__ ((packed)); | 1231 | } __packed; |
1232 | 1232 | ||
1233 | #define WEP_KEY_WEP_TYPE 1 | 1233 | #define WEP_KEY_WEP_TYPE 1 |
1234 | #define WEP_KEYS_MAX 4 | 1234 | #define WEP_KEYS_MAX 4 |
@@ -1282,7 +1282,7 @@ struct iwl3945_rx_frame_stats { | |||
1282 | __le16 sig_avg; | 1282 | __le16 sig_avg; |
1283 | __le16 noise_diff; | 1283 | __le16 noise_diff; |
1284 | u8 payload[0]; | 1284 | u8 payload[0]; |
1285 | } __attribute__ ((packed)); | 1285 | } __packed; |
1286 | 1286 | ||
1287 | struct iwl3945_rx_frame_hdr { | 1287 | struct iwl3945_rx_frame_hdr { |
1288 | __le16 channel; | 1288 | __le16 channel; |
@@ -1291,13 +1291,13 @@ struct iwl3945_rx_frame_hdr { | |||
1291 | u8 rate; | 1291 | u8 rate; |
1292 | __le16 len; | 1292 | __le16 len; |
1293 | u8 payload[0]; | 1293 | u8 payload[0]; |
1294 | } __attribute__ ((packed)); | 1294 | } __packed; |
1295 | 1295 | ||
1296 | struct iwl3945_rx_frame_end { | 1296 | struct iwl3945_rx_frame_end { |
1297 | __le32 status; | 1297 | __le32 status; |
1298 | __le64 timestamp; | 1298 | __le64 timestamp; |
1299 | __le32 beacon_timestamp; | 1299 | __le32 beacon_timestamp; |
1300 | } __attribute__ ((packed)); | 1300 | } __packed; |
1301 | 1301 | ||
1302 | /* | 1302 | /* |
1303 | * REPLY_3945_RX = 0x1b (response only, not a command) | 1303 | * REPLY_3945_RX = 0x1b (response only, not a command) |
@@ -1311,7 +1311,7 @@ struct iwl3945_rx_frame { | |||
1311 | struct iwl3945_rx_frame_stats stats; | 1311 | struct iwl3945_rx_frame_stats stats; |
1312 | struct iwl3945_rx_frame_hdr hdr; | 1312 | struct iwl3945_rx_frame_hdr hdr; |
1313 | struct iwl3945_rx_frame_end end; | 1313 | struct iwl3945_rx_frame_end end; |
1314 | } __attribute__ ((packed)); | 1314 | } __packed; |
1315 | 1315 | ||
1316 | #define IWL39_RX_FRAME_SIZE (4 + sizeof(struct iwl3945_rx_frame)) | 1316 | #define IWL39_RX_FRAME_SIZE (4 + sizeof(struct iwl3945_rx_frame)) |
1317 | 1317 | ||
@@ -1327,7 +1327,7 @@ struct iwl4965_rx_non_cfg_phy { | |||
1327 | __le16 agc_info; /* agc code 0:6, agc dB 7:13, reserved 14:15 */ | 1327 | __le16 agc_info; /* agc code 0:6, agc dB 7:13, reserved 14:15 */ |
1328 | u8 rssi_info[6]; /* we use even entries, 0/2/4 for A/B/C rssi */ | 1328 | u8 rssi_info[6]; /* we use even entries, 0/2/4 for A/B/C rssi */ |
1329 | u8 pad[0]; | 1329 | u8 pad[0]; |
1330 | } __attribute__ ((packed)); | 1330 | } __packed; |
1331 | 1331 | ||
1332 | 1332 | ||
1333 | #define IWL50_RX_RES_PHY_CNT 8 | 1333 | #define IWL50_RX_RES_PHY_CNT 8 |
@@ -1345,7 +1345,7 @@ struct iwl4965_rx_non_cfg_phy { | |||
1345 | 1345 | ||
1346 | struct iwl5000_non_cfg_phy { | 1346 | struct iwl5000_non_cfg_phy { |
1347 | __le32 non_cfg_phy[IWL50_RX_RES_PHY_CNT]; /* up to 8 phy entries */ | 1347 | __le32 non_cfg_phy[IWL50_RX_RES_PHY_CNT]; /* up to 8 phy entries */ |
1348 | } __attribute__ ((packed)); | 1348 | } __packed; |
1349 | 1349 | ||
1350 | 1350 | ||
1351 | /* | 1351 | /* |
@@ -1365,12 +1365,12 @@ struct iwl_rx_phy_res { | |||
1365 | __le32 rate_n_flags; /* RATE_MCS_* */ | 1365 | __le32 rate_n_flags; /* RATE_MCS_* */ |
1366 | __le16 byte_count; /* frame's byte-count */ | 1366 | __le16 byte_count; /* frame's byte-count */ |
1367 | __le16 reserved3; | 1367 | __le16 reserved3; |
1368 | } __attribute__ ((packed)); | 1368 | } __packed; |
1369 | 1369 | ||
1370 | struct iwl4965_rx_mpdu_res_start { | 1370 | struct iwl4965_rx_mpdu_res_start { |
1371 | __le16 byte_count; | 1371 | __le16 byte_count; |
1372 | __le16 reserved; | 1372 | __le16 reserved; |
1373 | } __attribute__ ((packed)); | 1373 | } __packed; |
1374 | 1374 | ||
1375 | 1375 | ||
1376 | /****************************************************************************** | 1376 | /****************************************************************************** |
@@ -1557,7 +1557,7 @@ struct iwl3945_tx_cmd { | |||
1557 | */ | 1557 | */ |
1558 | u8 payload[0]; | 1558 | u8 payload[0]; |
1559 | struct ieee80211_hdr hdr[0]; | 1559 | struct ieee80211_hdr hdr[0]; |
1560 | } __attribute__ ((packed)); | 1560 | } __packed; |
1561 | 1561 | ||
1562 | /* | 1562 | /* |
1563 | * REPLY_TX = 0x1c (response) | 1563 | * REPLY_TX = 0x1c (response) |
@@ -1569,7 +1569,7 @@ struct iwl3945_tx_resp { | |||
1569 | u8 rate; | 1569 | u8 rate; |
1570 | __le32 wireless_media_time; | 1570 | __le32 wireless_media_time; |
1571 | __le32 status; /* TX status */ | 1571 | __le32 status; /* TX status */ |
1572 | } __attribute__ ((packed)); | 1572 | } __packed; |
1573 | 1573 | ||
1574 | 1574 | ||
1575 | /* | 1575 | /* |
@@ -1581,7 +1581,7 @@ struct iwl_dram_scratch { | |||
1581 | u8 try_cnt; /* Tx attempts */ | 1581 | u8 try_cnt; /* Tx attempts */ |
1582 | u8 bt_kill_cnt; /* Tx attempts blocked by Bluetooth device */ | 1582 | u8 bt_kill_cnt; /* Tx attempts blocked by Bluetooth device */ |
1583 | __le16 reserved; | 1583 | __le16 reserved; |
1584 | } __attribute__ ((packed)); | 1584 | } __packed; |
1585 | 1585 | ||
1586 | struct iwl_tx_cmd { | 1586 | struct iwl_tx_cmd { |
1587 | /* | 1587 | /* |
@@ -1660,7 +1660,7 @@ struct iwl_tx_cmd { | |||
1660 | */ | 1660 | */ |
1661 | u8 payload[0]; | 1661 | u8 payload[0]; |
1662 | struct ieee80211_hdr hdr[0]; | 1662 | struct ieee80211_hdr hdr[0]; |
1663 | } __attribute__ ((packed)); | 1663 | } __packed; |
1664 | 1664 | ||
1665 | /* TX command response is sent after *3945* transmission attempts. | 1665 | /* TX command response is sent after *3945* transmission attempts. |
1666 | * | 1666 | * |
@@ -1826,7 +1826,7 @@ enum { | |||
1826 | struct agg_tx_status { | 1826 | struct agg_tx_status { |
1827 | __le16 status; | 1827 | __le16 status; |
1828 | __le16 sequence; | 1828 | __le16 sequence; |
1829 | } __attribute__ ((packed)); | 1829 | } __packed; |
1830 | 1830 | ||
1831 | struct iwl4965_tx_resp { | 1831 | struct iwl4965_tx_resp { |
1832 | u8 frame_count; /* 1 no aggregation, >1 aggregation */ | 1832 | u8 frame_count; /* 1 no aggregation, >1 aggregation */ |
@@ -1863,7 +1863,7 @@ struct iwl4965_tx_resp { | |||
1863 | __le32 status; | 1863 | __le32 status; |
1864 | struct agg_tx_status agg_status[0]; /* for each agg frame */ | 1864 | struct agg_tx_status agg_status[0]; /* for each agg frame */ |
1865 | } u; | 1865 | } u; |
1866 | } __attribute__ ((packed)); | 1866 | } __packed; |
1867 | 1867 | ||
1868 | /* | 1868 | /* |
1869 | * definitions for initial rate index field | 1869 | * definitions for initial rate index field |
@@ -1927,7 +1927,7 @@ struct iwl5000_tx_resp { | |||
1927 | */ | 1927 | */ |
1928 | struct agg_tx_status status; /* TX status (in aggregation - | 1928 | struct agg_tx_status status; /* TX status (in aggregation - |
1929 | * status of 1st frame) */ | 1929 | * status of 1st frame) */ |
1930 | } __attribute__ ((packed)); | 1930 | } __packed; |
1931 | /* | 1931 | /* |
1932 | * REPLY_COMPRESSED_BA = 0xc5 (response only, not a command) | 1932 | * REPLY_COMPRESSED_BA = 0xc5 (response only, not a command) |
1933 | * | 1933 | * |
@@ -1945,7 +1945,7 @@ struct iwl_compressed_ba_resp { | |||
1945 | __le64 bitmap; | 1945 | __le64 bitmap; |
1946 | __le16 scd_flow; | 1946 | __le16 scd_flow; |
1947 | __le16 scd_ssn; | 1947 | __le16 scd_ssn; |
1948 | } __attribute__ ((packed)); | 1948 | } __packed; |
1949 | 1949 | ||
1950 | /* | 1950 | /* |
1951 | * REPLY_TX_PWR_TABLE_CMD = 0x97 (command, has simple generic response) | 1951 | * REPLY_TX_PWR_TABLE_CMD = 0x97 (command, has simple generic response) |
@@ -1958,14 +1958,14 @@ struct iwl3945_txpowertable_cmd { | |||
1958 | u8 reserved; | 1958 | u8 reserved; |
1959 | __le16 channel; | 1959 | __le16 channel; |
1960 | struct iwl3945_power_per_rate power[IWL_MAX_RATES]; | 1960 | struct iwl3945_power_per_rate power[IWL_MAX_RATES]; |
1961 | } __attribute__ ((packed)); | 1961 | } __packed; |
1962 | 1962 | ||
1963 | struct iwl4965_txpowertable_cmd { | 1963 | struct iwl4965_txpowertable_cmd { |
1964 | u8 band; /* 0: 5 GHz, 1: 2.4 GHz */ | 1964 | u8 band; /* 0: 5 GHz, 1: 2.4 GHz */ |
1965 | u8 reserved; | 1965 | u8 reserved; |
1966 | __le16 channel; | 1966 | __le16 channel; |
1967 | struct iwl4965_tx_power_db tx_power; | 1967 | struct iwl4965_tx_power_db tx_power; |
1968 | } __attribute__ ((packed)); | 1968 | } __packed; |
1969 | 1969 | ||
1970 | 1970 | ||
1971 | /** | 1971 | /** |
@@ -1987,13 +1987,13 @@ struct iwl3945_rate_scaling_info { | |||
1987 | __le16 rate_n_flags; | 1987 | __le16 rate_n_flags; |
1988 | u8 try_cnt; | 1988 | u8 try_cnt; |
1989 | u8 next_rate_index; | 1989 | u8 next_rate_index; |
1990 | } __attribute__ ((packed)); | 1990 | } __packed; |
1991 | 1991 | ||
1992 | struct iwl3945_rate_scaling_cmd { | 1992 | struct iwl3945_rate_scaling_cmd { |
1993 | u8 table_id; | 1993 | u8 table_id; |
1994 | u8 reserved[3]; | 1994 | u8 reserved[3]; |
1995 | struct iwl3945_rate_scaling_info table[IWL_MAX_RATES]; | 1995 | struct iwl3945_rate_scaling_info table[IWL_MAX_RATES]; |
1996 | } __attribute__ ((packed)); | 1996 | } __packed; |
1997 | 1997 | ||
1998 | 1998 | ||
1999 | /*RS_NEW_API: only TLC_RTS remains and moved to bit 0 */ | 1999 | /*RS_NEW_API: only TLC_RTS remains and moved to bit 0 */ |
@@ -2040,7 +2040,7 @@ struct iwl_link_qual_general_params { | |||
2040 | * TX FIFOs above 3 use same value (typically 0) as TX FIFO 3. | 2040 | * TX FIFOs above 3 use same value (typically 0) as TX FIFO 3. |
2041 | */ | 2041 | */ |
2042 | u8 start_rate_index[LINK_QUAL_AC_NUM]; | 2042 | u8 start_rate_index[LINK_QUAL_AC_NUM]; |
2043 | } __attribute__ ((packed)); | 2043 | } __packed; |
2044 | 2044 | ||
2045 | #define LINK_QUAL_AGG_TIME_LIMIT_DEF (4000) /* 4 milliseconds */ | 2045 | #define LINK_QUAL_AGG_TIME_LIMIT_DEF (4000) /* 4 milliseconds */ |
2046 | #define LINK_QUAL_AGG_TIME_LIMIT_MAX (65535) | 2046 | #define LINK_QUAL_AGG_TIME_LIMIT_MAX (65535) |
@@ -2081,7 +2081,7 @@ struct iwl_link_qual_agg_params { | |||
2081 | u8 agg_frame_cnt_limit; | 2081 | u8 agg_frame_cnt_limit; |
2082 | 2082 | ||
2083 | __le32 reserved; | 2083 | __le32 reserved; |
2084 | } __attribute__ ((packed)); | 2084 | } __packed; |
2085 | 2085 | ||
2086 | /* | 2086 | /* |
2087 | * REPLY_TX_LINK_QUALITY_CMD = 0x4e (command, has simple generic response) | 2087 | * REPLY_TX_LINK_QUALITY_CMD = 0x4e (command, has simple generic response) |
@@ -2287,7 +2287,7 @@ struct iwl_link_quality_cmd { | |||
2287 | __le32 rate_n_flags; /* RATE_MCS_*, IWL_RATE_* */ | 2287 | __le32 rate_n_flags; /* RATE_MCS_*, IWL_RATE_* */ |
2288 | } rs_table[LINK_QUAL_MAX_RETRY_NUM]; | 2288 | } rs_table[LINK_QUAL_MAX_RETRY_NUM]; |
2289 | __le32 reserved2; | 2289 | __le32 reserved2; |
2290 | } __attribute__ ((packed)); | 2290 | } __packed; |
2291 | 2291 | ||
2292 | /* | 2292 | /* |
2293 | * BT configuration enable flags: | 2293 | * BT configuration enable flags: |
@@ -2328,7 +2328,7 @@ struct iwl_bt_cmd { | |||
2328 | u8 reserved; | 2328 | u8 reserved; |
2329 | __le32 kill_ack_mask; | 2329 | __le32 kill_ack_mask; |
2330 | __le32 kill_cts_mask; | 2330 | __le32 kill_cts_mask; |
2331 | } __attribute__ ((packed)); | 2331 | } __packed; |
2332 | 2332 | ||
2333 | /****************************************************************************** | 2333 | /****************************************************************************** |
2334 | * (6) | 2334 | * (6) |
@@ -2353,7 +2353,7 @@ struct iwl_measure_channel { | |||
2353 | u8 channel; /* channel to measure */ | 2353 | u8 channel; /* channel to measure */ |
2354 | u8 type; /* see enum iwl_measure_type */ | 2354 | u8 type; /* see enum iwl_measure_type */ |
2355 | __le16 reserved; | 2355 | __le16 reserved; |
2356 | } __attribute__ ((packed)); | 2356 | } __packed; |
2357 | 2357 | ||
2358 | /* | 2358 | /* |
2359 | * REPLY_SPECTRUM_MEASUREMENT_CMD = 0x74 (command) | 2359 | * REPLY_SPECTRUM_MEASUREMENT_CMD = 0x74 (command) |
@@ -2372,7 +2372,7 @@ struct iwl_spectrum_cmd { | |||
2372 | __le16 channel_count; /* minimum 1, maximum 10 */ | 2372 | __le16 channel_count; /* minimum 1, maximum 10 */ |
2373 | __le16 reserved3; | 2373 | __le16 reserved3; |
2374 | struct iwl_measure_channel channels[10]; | 2374 | struct iwl_measure_channel channels[10]; |
2375 | } __attribute__ ((packed)); | 2375 | } __packed; |
2376 | 2376 | ||
2377 | /* | 2377 | /* |
2378 | * REPLY_SPECTRUM_MEASUREMENT_CMD = 0x74 (response) | 2378 | * REPLY_SPECTRUM_MEASUREMENT_CMD = 0x74 (response) |
@@ -2383,7 +2383,7 @@ struct iwl_spectrum_resp { | |||
2383 | __le16 status; /* 0 - command will be handled | 2383 | __le16 status; /* 0 - command will be handled |
2384 | * 1 - cannot handle (conflicts with another | 2384 | * 1 - cannot handle (conflicts with another |
2385 | * measurement) */ | 2385 | * measurement) */ |
2386 | } __attribute__ ((packed)); | 2386 | } __packed; |
2387 | 2387 | ||
2388 | enum iwl_measurement_state { | 2388 | enum iwl_measurement_state { |
2389 | IWL_MEASUREMENT_START = 0, | 2389 | IWL_MEASUREMENT_START = 0, |
@@ -2406,13 +2406,13 @@ enum iwl_measurement_status { | |||
2406 | struct iwl_measurement_histogram { | 2406 | struct iwl_measurement_histogram { |
2407 | __le32 ofdm[NUM_ELEMENTS_IN_HISTOGRAM]; /* in 0.8usec counts */ | 2407 | __le32 ofdm[NUM_ELEMENTS_IN_HISTOGRAM]; /* in 0.8usec counts */ |
2408 | __le32 cck[NUM_ELEMENTS_IN_HISTOGRAM]; /* in 1usec counts */ | 2408 | __le32 cck[NUM_ELEMENTS_IN_HISTOGRAM]; /* in 1usec counts */ |
2409 | } __attribute__ ((packed)); | 2409 | } __packed; |
2410 | 2410 | ||
2411 | /* clear channel availability counters */ | 2411 | /* clear channel availability counters */ |
2412 | struct iwl_measurement_cca_counters { | 2412 | struct iwl_measurement_cca_counters { |
2413 | __le32 ofdm; | 2413 | __le32 ofdm; |
2414 | __le32 cck; | 2414 | __le32 cck; |
2415 | } __attribute__ ((packed)); | 2415 | } __packed; |
2416 | 2416 | ||
2417 | enum iwl_measure_type { | 2417 | enum iwl_measure_type { |
2418 | IWL_MEASURE_BASIC = (1 << 0), | 2418 | IWL_MEASURE_BASIC = (1 << 0), |
@@ -2448,7 +2448,7 @@ struct iwl_spectrum_notification { | |||
2448 | struct iwl_measurement_histogram histogram; | 2448 | struct iwl_measurement_histogram histogram; |
2449 | __le32 stop_time; /* lower 32-bits of TSF */ | 2449 | __le32 stop_time; /* lower 32-bits of TSF */ |
2450 | __le32 status; /* see iwl_measurement_status */ | 2450 | __le32 status; /* see iwl_measurement_status */ |
2451 | } __attribute__ ((packed)); | 2451 | } __packed; |
2452 | 2452 | ||
2453 | /****************************************************************************** | 2453 | /****************************************************************************** |
2454 | * (7) | 2454 | * (7) |
@@ -2504,7 +2504,7 @@ struct iwl3945_powertable_cmd { | |||
2504 | __le32 rx_data_timeout; | 2504 | __le32 rx_data_timeout; |
2505 | __le32 tx_data_timeout; | 2505 | __le32 tx_data_timeout; |
2506 | __le32 sleep_interval[IWL_POWER_VEC_SIZE]; | 2506 | __le32 sleep_interval[IWL_POWER_VEC_SIZE]; |
2507 | } __attribute__ ((packed)); | 2507 | } __packed; |
2508 | 2508 | ||
2509 | struct iwl_powertable_cmd { | 2509 | struct iwl_powertable_cmd { |
2510 | __le16 flags; | 2510 | __le16 flags; |
@@ -2514,7 +2514,7 @@ struct iwl_powertable_cmd { | |||
2514 | __le32 tx_data_timeout; | 2514 | __le32 tx_data_timeout; |
2515 | __le32 sleep_interval[IWL_POWER_VEC_SIZE]; | 2515 | __le32 sleep_interval[IWL_POWER_VEC_SIZE]; |
2516 | __le32 keep_alive_beacons; | 2516 | __le32 keep_alive_beacons; |
2517 | } __attribute__ ((packed)); | 2517 | } __packed; |
2518 | 2518 | ||
2519 | /* | 2519 | /* |
2520 | * PM_SLEEP_NOTIFICATION = 0x7A (notification only, not a command) | 2520 | * PM_SLEEP_NOTIFICATION = 0x7A (notification only, not a command) |
@@ -2527,7 +2527,7 @@ struct iwl_sleep_notification { | |||
2527 | __le32 sleep_time; | 2527 | __le32 sleep_time; |
2528 | __le32 tsf_low; | 2528 | __le32 tsf_low; |
2529 | __le32 bcon_timer; | 2529 | __le32 bcon_timer; |
2530 | } __attribute__ ((packed)); | 2530 | } __packed; |
2531 | 2531 | ||
2532 | /* Sleep states. 3945 and 4965 identical. */ | 2532 | /* Sleep states. 3945 and 4965 identical. */ |
2533 | enum { | 2533 | enum { |
@@ -2552,14 +2552,14 @@ enum { | |||
2552 | #define CARD_STATE_CMD_HALT 0x02 /* Power down permanently */ | 2552 | #define CARD_STATE_CMD_HALT 0x02 /* Power down permanently */ |
2553 | struct iwl_card_state_cmd { | 2553 | struct iwl_card_state_cmd { |
2554 | __le32 status; /* CARD_STATE_CMD_* request new power state */ | 2554 | __le32 status; /* CARD_STATE_CMD_* request new power state */ |
2555 | } __attribute__ ((packed)); | 2555 | } __packed; |
2556 | 2556 | ||
2557 | /* | 2557 | /* |
2558 | * CARD_STATE_NOTIFICATION = 0xa1 (notification only, not a command) | 2558 | * CARD_STATE_NOTIFICATION = 0xa1 (notification only, not a command) |
2559 | */ | 2559 | */ |
2560 | struct iwl_card_state_notif { | 2560 | struct iwl_card_state_notif { |
2561 | __le32 flags; | 2561 | __le32 flags; |
2562 | } __attribute__ ((packed)); | 2562 | } __packed; |
2563 | 2563 | ||
2564 | #define HW_CARD_DISABLED 0x01 | 2564 | #define HW_CARD_DISABLED 0x01 |
2565 | #define SW_CARD_DISABLED 0x02 | 2565 | #define SW_CARD_DISABLED 0x02 |
@@ -2570,14 +2570,14 @@ struct iwl_ct_kill_config { | |||
2570 | __le32 reserved; | 2570 | __le32 reserved; |
2571 | __le32 critical_temperature_M; | 2571 | __le32 critical_temperature_M; |
2572 | __le32 critical_temperature_R; | 2572 | __le32 critical_temperature_R; |
2573 | } __attribute__ ((packed)); | 2573 | } __packed; |
2574 | 2574 | ||
2575 | /* 1000, and 6x00 */ | 2575 | /* 1000, and 6x00 */ |
2576 | struct iwl_ct_kill_throttling_config { | 2576 | struct iwl_ct_kill_throttling_config { |
2577 | __le32 critical_temperature_exit; | 2577 | __le32 critical_temperature_exit; |
2578 | __le32 reserved; | 2578 | __le32 reserved; |
2579 | __le32 critical_temperature_enter; | 2579 | __le32 critical_temperature_enter; |
2580 | } __attribute__ ((packed)); | 2580 | } __packed; |
2581 | 2581 | ||
2582 | /****************************************************************************** | 2582 | /****************************************************************************** |
2583 | * (8) | 2583 | * (8) |
@@ -2622,7 +2622,7 @@ struct iwl3945_scan_channel { | |||
2622 | struct iwl3945_tx_power tpc; | 2622 | struct iwl3945_tx_power tpc; |
2623 | __le16 active_dwell; /* in 1024-uSec TU (time units), typ 5-50 */ | 2623 | __le16 active_dwell; /* in 1024-uSec TU (time units), typ 5-50 */ |
2624 | __le16 passive_dwell; /* in 1024-uSec TU (time units), typ 20-500 */ | 2624 | __le16 passive_dwell; /* in 1024-uSec TU (time units), typ 20-500 */ |
2625 | } __attribute__ ((packed)); | 2625 | } __packed; |
2626 | 2626 | ||
2627 | /* set number of direct probes u8 type */ | 2627 | /* set number of direct probes u8 type */ |
2628 | #define IWL39_SCAN_PROBE_MASK(n) ((BIT(n) | (BIT(n) - BIT(1)))) | 2628 | #define IWL39_SCAN_PROBE_MASK(n) ((BIT(n) | (BIT(n) - BIT(1)))) |
@@ -2641,7 +2641,7 @@ struct iwl_scan_channel { | |||
2641 | u8 dsp_atten; /* gain for DSP */ | 2641 | u8 dsp_atten; /* gain for DSP */ |
2642 | __le16 active_dwell; /* in 1024-uSec TU (time units), typ 5-50 */ | 2642 | __le16 active_dwell; /* in 1024-uSec TU (time units), typ 5-50 */ |
2643 | __le16 passive_dwell; /* in 1024-uSec TU (time units), typ 20-500 */ | 2643 | __le16 passive_dwell; /* in 1024-uSec TU (time units), typ 20-500 */ |
2644 | } __attribute__ ((packed)); | 2644 | } __packed; |
2645 | 2645 | ||
2646 | /* set number of direct probes __le32 type */ | 2646 | /* set number of direct probes __le32 type */ |
2647 | #define IWL_SCAN_PROBE_MASK(n) cpu_to_le32((BIT(n) | (BIT(n) - BIT(1)))) | 2647 | #define IWL_SCAN_PROBE_MASK(n) cpu_to_le32((BIT(n) | (BIT(n) - BIT(1)))) |
@@ -2658,7 +2658,7 @@ struct iwl_ssid_ie { | |||
2658 | u8 id; | 2658 | u8 id; |
2659 | u8 len; | 2659 | u8 len; |
2660 | u8 ssid[32]; | 2660 | u8 ssid[32]; |
2661 | } __attribute__ ((packed)); | 2661 | } __packed; |
2662 | 2662 | ||
2663 | #define PROBE_OPTION_MAX_3945 4 | 2663 | #define PROBE_OPTION_MAX_3945 4 |
2664 | #define PROBE_OPTION_MAX 20 | 2664 | #define PROBE_OPTION_MAX 20 |
@@ -2764,7 +2764,7 @@ struct iwl3945_scan_cmd { | |||
2764 | * before requesting another scan. | 2764 | * before requesting another scan. |
2765 | */ | 2765 | */ |
2766 | u8 data[0]; | 2766 | u8 data[0]; |
2767 | } __attribute__ ((packed)); | 2767 | } __packed; |
2768 | 2768 | ||
2769 | struct iwl_scan_cmd { | 2769 | struct iwl_scan_cmd { |
2770 | __le16 len; | 2770 | __le16 len; |
@@ -2808,7 +2808,7 @@ struct iwl_scan_cmd { | |||
2808 | * before requesting another scan. | 2808 | * before requesting another scan. |
2809 | */ | 2809 | */ |
2810 | u8 data[0]; | 2810 | u8 data[0]; |
2811 | } __attribute__ ((packed)); | 2811 | } __packed; |
2812 | 2812 | ||
2813 | /* Can abort will notify by complete notification with abort status. */ | 2813 | /* Can abort will notify by complete notification with abort status. */ |
2814 | #define CAN_ABORT_STATUS cpu_to_le32(0x1) | 2814 | #define CAN_ABORT_STATUS cpu_to_le32(0x1) |
@@ -2820,7 +2820,7 @@ struct iwl_scan_cmd { | |||
2820 | */ | 2820 | */ |
2821 | struct iwl_scanreq_notification { | 2821 | struct iwl_scanreq_notification { |
2822 | __le32 status; /* 1: okay, 2: cannot fulfill request */ | 2822 | __le32 status; /* 1: okay, 2: cannot fulfill request */ |
2823 | } __attribute__ ((packed)); | 2823 | } __packed; |
2824 | 2824 | ||
2825 | /* | 2825 | /* |
2826 | * SCAN_START_NOTIFICATION = 0x82 (notification only, not a command) | 2826 | * SCAN_START_NOTIFICATION = 0x82 (notification only, not a command) |
@@ -2833,7 +2833,7 @@ struct iwl_scanstart_notification { | |||
2833 | u8 band; | 2833 | u8 band; |
2834 | u8 reserved[2]; | 2834 | u8 reserved[2]; |
2835 | __le32 status; | 2835 | __le32 status; |
2836 | } __attribute__ ((packed)); | 2836 | } __packed; |
2837 | 2837 | ||
2838 | #define SCAN_OWNER_STATUS 0x1; | 2838 | #define SCAN_OWNER_STATUS 0x1; |
2839 | #define MEASURE_OWNER_STATUS 0x2; | 2839 | #define MEASURE_OWNER_STATUS 0x2; |
@@ -2849,7 +2849,7 @@ struct iwl_scanresults_notification { | |||
2849 | __le32 tsf_low; | 2849 | __le32 tsf_low; |
2850 | __le32 tsf_high; | 2850 | __le32 tsf_high; |
2851 | __le32 statistics[NUMBER_OF_STATISTICS]; | 2851 | __le32 statistics[NUMBER_OF_STATISTICS]; |
2852 | } __attribute__ ((packed)); | 2852 | } __packed; |
2853 | 2853 | ||
2854 | /* | 2854 | /* |
2855 | * SCAN_COMPLETE_NOTIFICATION = 0x84 (notification only, not a command) | 2855 | * SCAN_COMPLETE_NOTIFICATION = 0x84 (notification only, not a command) |
@@ -2861,7 +2861,7 @@ struct iwl_scancomplete_notification { | |||
2861 | u8 last_channel; | 2861 | u8 last_channel; |
2862 | __le32 tsf_low; | 2862 | __le32 tsf_low; |
2863 | __le32 tsf_high; | 2863 | __le32 tsf_high; |
2864 | } __attribute__ ((packed)); | 2864 | } __packed; |
2865 | 2865 | ||
2866 | 2866 | ||
2867 | /****************************************************************************** | 2867 | /****************************************************************************** |
@@ -2879,14 +2879,14 @@ struct iwl3945_beacon_notif { | |||
2879 | __le32 low_tsf; | 2879 | __le32 low_tsf; |
2880 | __le32 high_tsf; | 2880 | __le32 high_tsf; |
2881 | __le32 ibss_mgr_status; | 2881 | __le32 ibss_mgr_status; |
2882 | } __attribute__ ((packed)); | 2882 | } __packed; |
2883 | 2883 | ||
2884 | struct iwl4965_beacon_notif { | 2884 | struct iwl4965_beacon_notif { |
2885 | struct iwl4965_tx_resp beacon_notify_hdr; | 2885 | struct iwl4965_tx_resp beacon_notify_hdr; |
2886 | __le32 low_tsf; | 2886 | __le32 low_tsf; |
2887 | __le32 high_tsf; | 2887 | __le32 high_tsf; |
2888 | __le32 ibss_mgr_status; | 2888 | __le32 ibss_mgr_status; |
2889 | } __attribute__ ((packed)); | 2889 | } __packed; |
2890 | 2890 | ||
2891 | /* | 2891 | /* |
2892 | * REPLY_TX_BEACON = 0x91 (command, has simple generic response) | 2892 | * REPLY_TX_BEACON = 0x91 (command, has simple generic response) |
@@ -2898,7 +2898,7 @@ struct iwl3945_tx_beacon_cmd { | |||
2898 | u8 tim_size; | 2898 | u8 tim_size; |
2899 | u8 reserved1; | 2899 | u8 reserved1; |
2900 | struct ieee80211_hdr frame[0]; /* beacon frame */ | 2900 | struct ieee80211_hdr frame[0]; /* beacon frame */ |
2901 | } __attribute__ ((packed)); | 2901 | } __packed; |
2902 | 2902 | ||
2903 | struct iwl_tx_beacon_cmd { | 2903 | struct iwl_tx_beacon_cmd { |
2904 | struct iwl_tx_cmd tx; | 2904 | struct iwl_tx_cmd tx; |
@@ -2906,7 +2906,7 @@ struct iwl_tx_beacon_cmd { | |||
2906 | u8 tim_size; | 2906 | u8 tim_size; |
2907 | u8 reserved1; | 2907 | u8 reserved1; |
2908 | struct ieee80211_hdr frame[0]; /* beacon frame */ | 2908 | struct ieee80211_hdr frame[0]; /* beacon frame */ |
2909 | } __attribute__ ((packed)); | 2909 | } __packed; |
2910 | 2910 | ||
2911 | /****************************************************************************** | 2911 | /****************************************************************************** |
2912 | * (10) | 2912 | * (10) |
@@ -2932,7 +2932,7 @@ struct rate_histogram { | |||
2932 | __le32 b[SUP_RATE_11B_MAX_NUM_CHANNELS]; | 2932 | __le32 b[SUP_RATE_11B_MAX_NUM_CHANNELS]; |
2933 | __le32 g[SUP_RATE_11G_MAX_NUM_CHANNELS]; | 2933 | __le32 g[SUP_RATE_11G_MAX_NUM_CHANNELS]; |
2934 | } failed; | 2934 | } failed; |
2935 | } __attribute__ ((packed)); | 2935 | } __packed; |
2936 | 2936 | ||
2937 | /* statistics command response */ | 2937 | /* statistics command response */ |
2938 | 2938 | ||
@@ -2952,7 +2952,7 @@ struct iwl39_statistics_rx_phy { | |||
2952 | __le32 rxe_frame_limit_overrun; | 2952 | __le32 rxe_frame_limit_overrun; |
2953 | __le32 sent_ack_cnt; | 2953 | __le32 sent_ack_cnt; |
2954 | __le32 sent_cts_cnt; | 2954 | __le32 sent_cts_cnt; |
2955 | } __attribute__ ((packed)); | 2955 | } __packed; |
2956 | 2956 | ||
2957 | struct iwl39_statistics_rx_non_phy { | 2957 | struct iwl39_statistics_rx_non_phy { |
2958 | __le32 bogus_cts; /* CTS received when not expecting CTS */ | 2958 | __le32 bogus_cts; /* CTS received when not expecting CTS */ |
@@ -2963,13 +2963,13 @@ struct iwl39_statistics_rx_non_phy { | |||
2963 | * filtering process */ | 2963 | * filtering process */ |
2964 | __le32 non_channel_beacons; /* beacons with our bss id but not on | 2964 | __le32 non_channel_beacons; /* beacons with our bss id but not on |
2965 | * our serving channel */ | 2965 | * our serving channel */ |
2966 | } __attribute__ ((packed)); | 2966 | } __packed; |
2967 | 2967 | ||
2968 | struct iwl39_statistics_rx { | 2968 | struct iwl39_statistics_rx { |
2969 | struct iwl39_statistics_rx_phy ofdm; | 2969 | struct iwl39_statistics_rx_phy ofdm; |
2970 | struct iwl39_statistics_rx_phy cck; | 2970 | struct iwl39_statistics_rx_phy cck; |
2971 | struct iwl39_statistics_rx_non_phy general; | 2971 | struct iwl39_statistics_rx_non_phy general; |
2972 | } __attribute__ ((packed)); | 2972 | } __packed; |
2973 | 2973 | ||
2974 | struct iwl39_statistics_tx { | 2974 | struct iwl39_statistics_tx { |
2975 | __le32 preamble_cnt; | 2975 | __le32 preamble_cnt; |
@@ -2981,20 +2981,20 @@ struct iwl39_statistics_tx { | |||
2981 | __le32 ack_timeout; | 2981 | __le32 ack_timeout; |
2982 | __le32 expected_ack_cnt; | 2982 | __le32 expected_ack_cnt; |
2983 | __le32 actual_ack_cnt; | 2983 | __le32 actual_ack_cnt; |
2984 | } __attribute__ ((packed)); | 2984 | } __packed; |
2985 | 2985 | ||
2986 | struct statistics_dbg { | 2986 | struct statistics_dbg { |
2987 | __le32 burst_check; | 2987 | __le32 burst_check; |
2988 | __le32 burst_count; | 2988 | __le32 burst_count; |
2989 | __le32 reserved[4]; | 2989 | __le32 reserved[4]; |
2990 | } __attribute__ ((packed)); | 2990 | } __packed; |
2991 | 2991 | ||
2992 | struct iwl39_statistics_div { | 2992 | struct iwl39_statistics_div { |
2993 | __le32 tx_on_a; | 2993 | __le32 tx_on_a; |
2994 | __le32 tx_on_b; | 2994 | __le32 tx_on_b; |
2995 | __le32 exec_time; | 2995 | __le32 exec_time; |
2996 | __le32 probe_time; | 2996 | __le32 probe_time; |
2997 | } __attribute__ ((packed)); | 2997 | } __packed; |
2998 | 2998 | ||
2999 | struct iwl39_statistics_general { | 2999 | struct iwl39_statistics_general { |
3000 | __le32 temperature; | 3000 | __le32 temperature; |
@@ -3004,7 +3004,7 @@ struct iwl39_statistics_general { | |||
3004 | __le32 slots_idle; | 3004 | __le32 slots_idle; |
3005 | __le32 ttl_timestamp; | 3005 | __le32 ttl_timestamp; |
3006 | struct iwl39_statistics_div div; | 3006 | struct iwl39_statistics_div div; |
3007 | } __attribute__ ((packed)); | 3007 | } __packed; |
3008 | 3008 | ||
3009 | struct statistics_rx_phy { | 3009 | struct statistics_rx_phy { |
3010 | __le32 ina_cnt; | 3010 | __le32 ina_cnt; |
@@ -3027,7 +3027,7 @@ struct statistics_rx_phy { | |||
3027 | __le32 mh_format_err; | 3027 | __le32 mh_format_err; |
3028 | __le32 re_acq_main_rssi_sum; | 3028 | __le32 re_acq_main_rssi_sum; |
3029 | __le32 reserved3; | 3029 | __le32 reserved3; |
3030 | } __attribute__ ((packed)); | 3030 | } __packed; |
3031 | 3031 | ||
3032 | struct statistics_rx_ht_phy { | 3032 | struct statistics_rx_ht_phy { |
3033 | __le32 plcp_err; | 3033 | __le32 plcp_err; |
@@ -3040,7 +3040,7 @@ struct statistics_rx_ht_phy { | |||
3040 | __le32 agg_mpdu_cnt; | 3040 | __le32 agg_mpdu_cnt; |
3041 | __le32 agg_cnt; | 3041 | __le32 agg_cnt; |
3042 | __le32 unsupport_mcs; | 3042 | __le32 unsupport_mcs; |
3043 | } __attribute__ ((packed)); | 3043 | } __packed; |
3044 | 3044 | ||
3045 | #define INTERFERENCE_DATA_AVAILABLE cpu_to_le32(1) | 3045 | #define INTERFERENCE_DATA_AVAILABLE cpu_to_le32(1) |
3046 | 3046 | ||
@@ -3075,14 +3075,14 @@ struct statistics_rx_non_phy { | |||
3075 | __le32 beacon_energy_a; | 3075 | __le32 beacon_energy_a; |
3076 | __le32 beacon_energy_b; | 3076 | __le32 beacon_energy_b; |
3077 | __le32 beacon_energy_c; | 3077 | __le32 beacon_energy_c; |
3078 | } __attribute__ ((packed)); | 3078 | } __packed; |
3079 | 3079 | ||
3080 | struct statistics_rx { | 3080 | struct statistics_rx { |
3081 | struct statistics_rx_phy ofdm; | 3081 | struct statistics_rx_phy ofdm; |
3082 | struct statistics_rx_phy cck; | 3082 | struct statistics_rx_phy cck; |
3083 | struct statistics_rx_non_phy general; | 3083 | struct statistics_rx_non_phy general; |
3084 | struct statistics_rx_ht_phy ofdm_ht; | 3084 | struct statistics_rx_ht_phy ofdm_ht; |
3085 | } __attribute__ ((packed)); | 3085 | } __packed; |
3086 | 3086 | ||
3087 | /** | 3087 | /** |
3088 | * struct statistics_tx_power - current tx power | 3088 | * struct statistics_tx_power - current tx power |
@@ -3096,7 +3096,7 @@ struct statistics_tx_power { | |||
3096 | u8 ant_b; | 3096 | u8 ant_b; |
3097 | u8 ant_c; | 3097 | u8 ant_c; |
3098 | u8 reserved; | 3098 | u8 reserved; |
3099 | } __attribute__ ((packed)); | 3099 | } __packed; |
3100 | 3100 | ||
3101 | struct statistics_tx_non_phy_agg { | 3101 | struct statistics_tx_non_phy_agg { |
3102 | __le32 ba_timeout; | 3102 | __le32 ba_timeout; |
@@ -3109,7 +3109,7 @@ struct statistics_tx_non_phy_agg { | |||
3109 | __le32 underrun; | 3109 | __le32 underrun; |
3110 | __le32 bt_prio_kill; | 3110 | __le32 bt_prio_kill; |
3111 | __le32 rx_ba_rsp_cnt; | 3111 | __le32 rx_ba_rsp_cnt; |
3112 | } __attribute__ ((packed)); | 3112 | } __packed; |
3113 | 3113 | ||
3114 | struct statistics_tx { | 3114 | struct statistics_tx { |
3115 | __le32 preamble_cnt; | 3115 | __le32 preamble_cnt; |
@@ -3134,7 +3134,7 @@ struct statistics_tx { | |||
3134 | */ | 3134 | */ |
3135 | struct statistics_tx_power tx_power; | 3135 | struct statistics_tx_power tx_power; |
3136 | __le32 reserved1; | 3136 | __le32 reserved1; |
3137 | } __attribute__ ((packed)); | 3137 | } __packed; |
3138 | 3138 | ||
3139 | 3139 | ||
3140 | struct statistics_div { | 3140 | struct statistics_div { |
@@ -3144,7 +3144,7 @@ struct statistics_div { | |||
3144 | __le32 probe_time; | 3144 | __le32 probe_time; |
3145 | __le32 reserved1; | 3145 | __le32 reserved1; |
3146 | __le32 reserved2; | 3146 | __le32 reserved2; |
3147 | } __attribute__ ((packed)); | 3147 | } __packed; |
3148 | 3148 | ||
3149 | struct statistics_general { | 3149 | struct statistics_general { |
3150 | __le32 temperature; /* radio temperature */ | 3150 | __le32 temperature; /* radio temperature */ |
@@ -3164,7 +3164,7 @@ struct statistics_general { | |||
3164 | __le32 num_of_sos_states; | 3164 | __le32 num_of_sos_states; |
3165 | __le32 reserved2; | 3165 | __le32 reserved2; |
3166 | __le32 reserved3; | 3166 | __le32 reserved3; |
3167 | } __attribute__ ((packed)); | 3167 | } __packed; |
3168 | 3168 | ||
3169 | #define UCODE_STATISTICS_CLEAR_MSK (0x1 << 0) | 3169 | #define UCODE_STATISTICS_CLEAR_MSK (0x1 << 0) |
3170 | #define UCODE_STATISTICS_FREQUENCY_MSK (0x1 << 1) | 3170 | #define UCODE_STATISTICS_FREQUENCY_MSK (0x1 << 1) |
@@ -3189,7 +3189,7 @@ struct statistics_general { | |||
3189 | #define IWL_STATS_CONF_DISABLE_NOTIF cpu_to_le32(0x2)/* see above */ | 3189 | #define IWL_STATS_CONF_DISABLE_NOTIF cpu_to_le32(0x2)/* see above */ |
3190 | struct iwl_statistics_cmd { | 3190 | struct iwl_statistics_cmd { |
3191 | __le32 configuration_flags; /* IWL_STATS_CONF_* */ | 3191 | __le32 configuration_flags; /* IWL_STATS_CONF_* */ |
3192 | } __attribute__ ((packed)); | 3192 | } __packed; |
3193 | 3193 | ||
3194 | /* | 3194 | /* |
3195 | * STATISTICS_NOTIFICATION = 0x9d (notification only, not a command) | 3195 | * STATISTICS_NOTIFICATION = 0x9d (notification only, not a command) |
@@ -3214,14 +3214,14 @@ struct iwl3945_notif_statistics { | |||
3214 | struct iwl39_statistics_rx rx; | 3214 | struct iwl39_statistics_rx rx; |
3215 | struct iwl39_statistics_tx tx; | 3215 | struct iwl39_statistics_tx tx; |
3216 | struct iwl39_statistics_general general; | 3216 | struct iwl39_statistics_general general; |
3217 | } __attribute__ ((packed)); | 3217 | } __packed; |
3218 | 3218 | ||
3219 | struct iwl_notif_statistics { | 3219 | struct iwl_notif_statistics { |
3220 | __le32 flag; | 3220 | __le32 flag; |
3221 | struct statistics_rx rx; | 3221 | struct statistics_rx rx; |
3222 | struct statistics_tx tx; | 3222 | struct statistics_tx tx; |
3223 | struct statistics_general general; | 3223 | struct statistics_general general; |
3224 | } __attribute__ ((packed)); | 3224 | } __packed; |
3225 | 3225 | ||
3226 | 3226 | ||
3227 | /* | 3227 | /* |
@@ -3253,7 +3253,7 @@ struct iwl_missed_beacon_notif { | |||
3253 | __le32 total_missed_becons; | 3253 | __le32 total_missed_becons; |
3254 | __le32 num_expected_beacons; | 3254 | __le32 num_expected_beacons; |
3255 | __le32 num_recvd_beacons; | 3255 | __le32 num_recvd_beacons; |
3256 | } __attribute__ ((packed)); | 3256 | } __packed; |
3257 | 3257 | ||
3258 | 3258 | ||
3259 | /****************************************************************************** | 3259 | /****************************************************************************** |
@@ -3455,7 +3455,7 @@ struct iwl_missed_beacon_notif { | |||
3455 | struct iwl_sensitivity_cmd { | 3455 | struct iwl_sensitivity_cmd { |
3456 | __le16 control; /* always use "1" */ | 3456 | __le16 control; /* always use "1" */ |
3457 | __le16 table[HD_TABLE_SIZE]; /* use HD_* as index */ | 3457 | __le16 table[HD_TABLE_SIZE]; /* use HD_* as index */ |
3458 | } __attribute__ ((packed)); | 3458 | } __packed; |
3459 | 3459 | ||
3460 | 3460 | ||
3461 | /** | 3461 | /** |
@@ -3536,31 +3536,31 @@ struct iwl_calib_cfg_elmnt_s { | |||
3536 | __le32 send_res; | 3536 | __le32 send_res; |
3537 | __le32 apply_res; | 3537 | __le32 apply_res; |
3538 | __le32 reserved; | 3538 | __le32 reserved; |
3539 | } __attribute__ ((packed)); | 3539 | } __packed; |
3540 | 3540 | ||
3541 | struct iwl_calib_cfg_status_s { | 3541 | struct iwl_calib_cfg_status_s { |
3542 | struct iwl_calib_cfg_elmnt_s once; | 3542 | struct iwl_calib_cfg_elmnt_s once; |
3543 | struct iwl_calib_cfg_elmnt_s perd; | 3543 | struct iwl_calib_cfg_elmnt_s perd; |
3544 | __le32 flags; | 3544 | __le32 flags; |
3545 | } __attribute__ ((packed)); | 3545 | } __packed; |
3546 | 3546 | ||
3547 | struct iwl_calib_cfg_cmd { | 3547 | struct iwl_calib_cfg_cmd { |
3548 | struct iwl_calib_cfg_status_s ucd_calib_cfg; | 3548 | struct iwl_calib_cfg_status_s ucd_calib_cfg; |
3549 | struct iwl_calib_cfg_status_s drv_calib_cfg; | 3549 | struct iwl_calib_cfg_status_s drv_calib_cfg; |
3550 | __le32 reserved1; | 3550 | __le32 reserved1; |
3551 | } __attribute__ ((packed)); | 3551 | } __packed; |
3552 | 3552 | ||
3553 | struct iwl_calib_hdr { | 3553 | struct iwl_calib_hdr { |
3554 | u8 op_code; | 3554 | u8 op_code; |
3555 | u8 first_group; | 3555 | u8 first_group; |
3556 | u8 groups_num; | 3556 | u8 groups_num; |
3557 | u8 data_valid; | 3557 | u8 data_valid; |
3558 | } __attribute__ ((packed)); | 3558 | } __packed; |
3559 | 3559 | ||
3560 | struct iwl_calib_cmd { | 3560 | struct iwl_calib_cmd { |
3561 | struct iwl_calib_hdr hdr; | 3561 | struct iwl_calib_hdr hdr; |
3562 | u8 data[0]; | 3562 | u8 data[0]; |
3563 | } __attribute__ ((packed)); | 3563 | } __packed; |
3564 | 3564 | ||
3565 | /* IWL_PHY_CALIBRATE_DIFF_GAIN_CMD (7) */ | 3565 | /* IWL_PHY_CALIBRATE_DIFF_GAIN_CMD (7) */ |
3566 | struct iwl_calib_diff_gain_cmd { | 3566 | struct iwl_calib_diff_gain_cmd { |
@@ -3569,14 +3569,14 @@ struct iwl_calib_diff_gain_cmd { | |||
3569 | s8 diff_gain_b; | 3569 | s8 diff_gain_b; |
3570 | s8 diff_gain_c; | 3570 | s8 diff_gain_c; |
3571 | u8 reserved1; | 3571 | u8 reserved1; |
3572 | } __attribute__ ((packed)); | 3572 | } __packed; |
3573 | 3573 | ||
3574 | struct iwl_calib_xtal_freq_cmd { | 3574 | struct iwl_calib_xtal_freq_cmd { |
3575 | struct iwl_calib_hdr hdr; | 3575 | struct iwl_calib_hdr hdr; |
3576 | u8 cap_pin1; | 3576 | u8 cap_pin1; |
3577 | u8 cap_pin2; | 3577 | u8 cap_pin2; |
3578 | u8 pad[2]; | 3578 | u8 pad[2]; |
3579 | } __attribute__ ((packed)); | 3579 | } __packed; |
3580 | 3580 | ||
3581 | /* IWL_PHY_CALIBRATE_CHAIN_NOISE_RESET_CMD */ | 3581 | /* IWL_PHY_CALIBRATE_CHAIN_NOISE_RESET_CMD */ |
3582 | struct iwl_calib_chain_noise_reset_cmd { | 3582 | struct iwl_calib_chain_noise_reset_cmd { |
@@ -3590,7 +3590,7 @@ struct iwl_calib_chain_noise_gain_cmd { | |||
3590 | u8 delta_gain_1; | 3590 | u8 delta_gain_1; |
3591 | u8 delta_gain_2; | 3591 | u8 delta_gain_2; |
3592 | u8 pad[2]; | 3592 | u8 pad[2]; |
3593 | } __attribute__ ((packed)); | 3593 | } __packed; |
3594 | 3594 | ||
3595 | /****************************************************************************** | 3595 | /****************************************************************************** |
3596 | * (12) | 3596 | * (12) |
@@ -3613,7 +3613,7 @@ struct iwl_led_cmd { | |||
3613 | u8 on; /* # intervals on while blinking; | 3613 | u8 on; /* # intervals on while blinking; |
3614 | * "0", regardless of "off", turns LED off */ | 3614 | * "0", regardless of "off", turns LED off */ |
3615 | u8 reserved; | 3615 | u8 reserved; |
3616 | } __attribute__ ((packed)); | 3616 | } __packed; |
3617 | 3617 | ||
3618 | /* | 3618 | /* |
3619 | * station priority table entries | 3619 | * station priority table entries |
@@ -3749,7 +3749,7 @@ struct iwl_wimax_coex_event_entry { | |||
3749 | u8 win_medium_prio; | 3749 | u8 win_medium_prio; |
3750 | u8 reserved; | 3750 | u8 reserved; |
3751 | u8 flags; | 3751 | u8 flags; |
3752 | } __attribute__ ((packed)); | 3752 | } __packed; |
3753 | 3753 | ||
3754 | /* COEX flag masks */ | 3754 | /* COEX flag masks */ |
3755 | 3755 | ||
@@ -3766,7 +3766,7 @@ struct iwl_wimax_coex_cmd { | |||
3766 | u8 flags; | 3766 | u8 flags; |
3767 | u8 reserved[3]; | 3767 | u8 reserved[3]; |
3768 | struct iwl_wimax_coex_event_entry sta_prio[COEX_NUM_OF_EVENTS]; | 3768 | struct iwl_wimax_coex_event_entry sta_prio[COEX_NUM_OF_EVENTS]; |
3769 | } __attribute__ ((packed)); | 3769 | } __packed; |
3770 | 3770 | ||
3771 | /* | 3771 | /* |
3772 | * Coexistence MEDIUM NOTIFICATION | 3772 | * Coexistence MEDIUM NOTIFICATION |
@@ -3795,7 +3795,7 @@ struct iwl_wimax_coex_cmd { | |||
3795 | struct iwl_coex_medium_notification { | 3795 | struct iwl_coex_medium_notification { |
3796 | __le32 status; | 3796 | __le32 status; |
3797 | __le32 events; | 3797 | __le32 events; |
3798 | } __attribute__ ((packed)); | 3798 | } __packed; |
3799 | 3799 | ||
3800 | /* | 3800 | /* |
3801 | * Coexistence EVENT Command | 3801 | * Coexistence EVENT Command |
@@ -3810,11 +3810,11 @@ struct iwl_coex_event_cmd { | |||
3810 | u8 flags; | 3810 | u8 flags; |
3811 | u8 event; | 3811 | u8 event; |
3812 | __le16 reserved; | 3812 | __le16 reserved; |
3813 | } __attribute__ ((packed)); | 3813 | } __packed; |
3814 | 3814 | ||
3815 | struct iwl_coex_event_resp { | 3815 | struct iwl_coex_event_resp { |
3816 | __le32 status; | 3816 | __le32 status; |
3817 | } __attribute__ ((packed)); | 3817 | } __packed; |
3818 | 3818 | ||
3819 | 3819 | ||
3820 | /****************************************************************************** | 3820 | /****************************************************************************** |
@@ -3858,7 +3858,7 @@ struct iwl_rx_packet { | |||
3858 | __le32 status; | 3858 | __le32 status; |
3859 | u8 raw[0]; | 3859 | u8 raw[0]; |
3860 | } u; | 3860 | } u; |
3861 | } __attribute__ ((packed)); | 3861 | } __packed; |
3862 | 3862 | ||
3863 | int iwl_agn_check_rxon_cmd(struct iwl_priv *priv); | 3863 | int iwl_agn_check_rxon_cmd(struct iwl_priv *priv); |
3864 | 3864 | ||
diff --git a/drivers/net/wireless/iwlwifi/iwl-dev.h b/drivers/net/wireless/iwlwifi/iwl-dev.h index f3f3473c5c7e..a36a6ef45aae 100644 --- a/drivers/net/wireless/iwlwifi/iwl-dev.h +++ b/drivers/net/wireless/iwlwifi/iwl-dev.h | |||
@@ -157,7 +157,7 @@ struct iwl_queue { | |||
157 | * space more than this */ | 157 | * space more than this */ |
158 | int high_mark; /* high watermark, stop queue if free | 158 | int high_mark; /* high watermark, stop queue if free |
159 | * space less than this */ | 159 | * space less than this */ |
160 | } __attribute__ ((packed)); | 160 | } __packed; |
161 | 161 | ||
162 | /* One for each TFD */ | 162 | /* One for each TFD */ |
163 | struct iwl_tx_info { | 163 | struct iwl_tx_info { |
@@ -343,8 +343,8 @@ struct iwl_device_cmd { | |||
343 | struct iwl_tx_cmd tx; | 343 | struct iwl_tx_cmd tx; |
344 | struct iwl6000_channel_switch_cmd chswitch; | 344 | struct iwl6000_channel_switch_cmd chswitch; |
345 | u8 payload[DEF_CMD_PAYLOAD_SIZE]; | 345 | u8 payload[DEF_CMD_PAYLOAD_SIZE]; |
346 | } __attribute__ ((packed)) cmd; | 346 | } __packed cmd; |
347 | } __attribute__ ((packed)); | 347 | } __packed; |
348 | 348 | ||
349 | #define TFD_MAX_PAYLOAD_SIZE (sizeof(struct iwl_device_cmd)) | 349 | #define TFD_MAX_PAYLOAD_SIZE (sizeof(struct iwl_device_cmd)) |
350 | 350 | ||
@@ -590,7 +590,7 @@ struct iwl_ucode_tlv { | |||
590 | __le16 alternative; /* see comment */ | 590 | __le16 alternative; /* see comment */ |
591 | __le32 length; /* not including type/length fields */ | 591 | __le32 length; /* not including type/length fields */ |
592 | u8 data[0]; | 592 | u8 data[0]; |
593 | } __attribute__ ((packed)); | 593 | } __packed; |
594 | 594 | ||
595 | #define IWL_TLV_UCODE_MAGIC 0x0a4c5749 | 595 | #define IWL_TLV_UCODE_MAGIC 0x0a4c5749 |
596 | 596 | ||
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom.h b/drivers/net/wireless/iwlwifi/iwl-eeprom.h index 95aa202c85e3..5488006491a2 100644 --- a/drivers/net/wireless/iwlwifi/iwl-eeprom.h +++ b/drivers/net/wireless/iwlwifi/iwl-eeprom.h | |||
@@ -118,7 +118,7 @@ enum { | |||
118 | struct iwl_eeprom_channel { | 118 | struct iwl_eeprom_channel { |
119 | u8 flags; /* EEPROM_CHANNEL_* flags copied from EEPROM */ | 119 | u8 flags; /* EEPROM_CHANNEL_* flags copied from EEPROM */ |
120 | s8 max_power_avg; /* max power (dBm) on this chnl, limit 31 */ | 120 | s8 max_power_avg; /* max power (dBm) on this chnl, limit 31 */ |
121 | } __attribute__ ((packed)); | 121 | } __packed; |
122 | 122 | ||
123 | /** | 123 | /** |
124 | * iwl_eeprom_enhanced_txpwr structure | 124 | * iwl_eeprom_enhanced_txpwr structure |
@@ -144,7 +144,7 @@ struct iwl_eeprom_enhanced_txpwr { | |||
144 | s8 reserved; | 144 | s8 reserved; |
145 | s8 mimo2_max; | 145 | s8 mimo2_max; |
146 | s8 mimo3_max; | 146 | s8 mimo3_max; |
147 | } __attribute__ ((packed)); | 147 | } __packed; |
148 | 148 | ||
149 | /* 3945 Specific */ | 149 | /* 3945 Specific */ |
150 | #define EEPROM_3945_EEPROM_VERSION (0x2f) | 150 | #define EEPROM_3945_EEPROM_VERSION (0x2f) |
@@ -312,7 +312,7 @@ struct iwl_eeprom_calib_measure { | |||
312 | u8 gain_idx; /* Index into gain table */ | 312 | u8 gain_idx; /* Index into gain table */ |
313 | u8 actual_pow; /* Measured RF output power, half-dBm */ | 313 | u8 actual_pow; /* Measured RF output power, half-dBm */ |
314 | s8 pa_det; /* Power amp detector level (not used) */ | 314 | s8 pa_det; /* Power amp detector level (not used) */ |
315 | } __attribute__ ((packed)); | 315 | } __packed; |
316 | 316 | ||
317 | 317 | ||
318 | /* | 318 | /* |
@@ -328,7 +328,7 @@ struct iwl_eeprom_calib_ch_info { | |||
328 | struct iwl_eeprom_calib_measure | 328 | struct iwl_eeprom_calib_measure |
329 | measurements[EEPROM_TX_POWER_TX_CHAINS] | 329 | measurements[EEPROM_TX_POWER_TX_CHAINS] |
330 | [EEPROM_TX_POWER_MEASUREMENTS]; | 330 | [EEPROM_TX_POWER_MEASUREMENTS]; |
331 | } __attribute__ ((packed)); | 331 | } __packed; |
332 | 332 | ||
333 | /* | 333 | /* |
334 | * txpower subband info. | 334 | * txpower subband info. |
@@ -345,7 +345,7 @@ struct iwl_eeprom_calib_subband_info { | |||
345 | u8 ch_to; /* channel number of highest channel in subband */ | 345 | u8 ch_to; /* channel number of highest channel in subband */ |
346 | struct iwl_eeprom_calib_ch_info ch1; | 346 | struct iwl_eeprom_calib_ch_info ch1; |
347 | struct iwl_eeprom_calib_ch_info ch2; | 347 | struct iwl_eeprom_calib_ch_info ch2; |
348 | } __attribute__ ((packed)); | 348 | } __packed; |
349 | 349 | ||
350 | 350 | ||
351 | /* | 351 | /* |
@@ -374,7 +374,7 @@ struct iwl_eeprom_calib_info { | |||
374 | __le16 voltage; /* signed */ | 374 | __le16 voltage; /* signed */ |
375 | struct iwl_eeprom_calib_subband_info | 375 | struct iwl_eeprom_calib_subband_info |
376 | band_info[EEPROM_TX_POWER_BANDS]; | 376 | band_info[EEPROM_TX_POWER_BANDS]; |
377 | } __attribute__ ((packed)); | 377 | } __packed; |
378 | 378 | ||
379 | 379 | ||
380 | #define ADDRESS_MSK 0x0000FFFF | 380 | #define ADDRESS_MSK 0x0000FFFF |
diff --git a/drivers/net/wireless/iwlwifi/iwl-fh.h b/drivers/net/wireless/iwlwifi/iwl-fh.h index 113c3669b9ce..a3fcbb5f2c70 100644 --- a/drivers/net/wireless/iwlwifi/iwl-fh.h +++ b/drivers/net/wireless/iwlwifi/iwl-fh.h | |||
@@ -449,7 +449,7 @@ struct iwl_rb_status { | |||
449 | __le16 finished_rb_num; | 449 | __le16 finished_rb_num; |
450 | __le16 finished_fr_nam; | 450 | __le16 finished_fr_nam; |
451 | __le32 __unused; /* 3945 only */ | 451 | __le32 __unused; /* 3945 only */ |
452 | } __attribute__ ((packed)); | 452 | } __packed; |
453 | 453 | ||
454 | 454 | ||
455 | #define TFD_QUEUE_SIZE_MAX (256) | 455 | #define TFD_QUEUE_SIZE_MAX (256) |
@@ -475,7 +475,7 @@ static inline u8 iwl_get_dma_hi_addr(dma_addr_t addr) | |||
475 | struct iwl_tfd_tb { | 475 | struct iwl_tfd_tb { |
476 | __le32 lo; | 476 | __le32 lo; |
477 | __le16 hi_n_len; | 477 | __le16 hi_n_len; |
478 | } __attribute__((packed)); | 478 | } __packed; |
479 | 479 | ||
480 | /** | 480 | /** |
481 | * struct iwl_tfd | 481 | * struct iwl_tfd |
@@ -510,7 +510,7 @@ struct iwl_tfd { | |||
510 | u8 num_tbs; | 510 | u8 num_tbs; |
511 | struct iwl_tfd_tb tbs[IWL_NUM_OF_TBS]; | 511 | struct iwl_tfd_tb tbs[IWL_NUM_OF_TBS]; |
512 | __le32 __pad; | 512 | __le32 __pad; |
513 | } __attribute__ ((packed)); | 513 | } __packed; |
514 | 514 | ||
515 | /* Keep Warm Size */ | 515 | /* Keep Warm Size */ |
516 | #define IWL_KW_SIZE 0x1000 /* 4k */ | 516 | #define IWL_KW_SIZE 0x1000 /* 4k */ |
diff --git a/drivers/net/wireless/iwlwifi/iwl-spectrum.h b/drivers/net/wireless/iwlwifi/iwl-spectrum.h index af6babee2891..c4ca0b5d77da 100644 --- a/drivers/net/wireless/iwlwifi/iwl-spectrum.h +++ b/drivers/net/wireless/iwlwifi/iwl-spectrum.h | |||
@@ -42,7 +42,7 @@ struct ieee80211_basic_report { | |||
42 | __le64 start_time; | 42 | __le64 start_time; |
43 | __le16 duration; | 43 | __le16 duration; |
44 | u8 map; | 44 | u8 map; |
45 | } __attribute__ ((packed)); | 45 | } __packed; |
46 | 46 | ||
47 | enum { /* ieee80211_measurement_request.mode */ | 47 | enum { /* ieee80211_measurement_request.mode */ |
48 | /* Bit 0 is reserved */ | 48 | /* Bit 0 is reserved */ |
@@ -63,13 +63,13 @@ struct ieee80211_measurement_params { | |||
63 | u8 channel; | 63 | u8 channel; |
64 | __le64 start_time; | 64 | __le64 start_time; |
65 | __le16 duration; | 65 | __le16 duration; |
66 | } __attribute__ ((packed)); | 66 | } __packed; |
67 | 67 | ||
68 | struct ieee80211_info_element { | 68 | struct ieee80211_info_element { |
69 | u8 id; | 69 | u8 id; |
70 | u8 len; | 70 | u8 len; |
71 | u8 data[0]; | 71 | u8 data[0]; |
72 | } __attribute__ ((packed)); | 72 | } __packed; |
73 | 73 | ||
74 | struct ieee80211_measurement_request { | 74 | struct ieee80211_measurement_request { |
75 | struct ieee80211_info_element ie; | 75 | struct ieee80211_info_element ie; |
@@ -77,7 +77,7 @@ struct ieee80211_measurement_request { | |||
77 | u8 mode; | 77 | u8 mode; |
78 | u8 type; | 78 | u8 type; |
79 | struct ieee80211_measurement_params params[0]; | 79 | struct ieee80211_measurement_params params[0]; |
80 | } __attribute__ ((packed)); | 80 | } __packed; |
81 | 81 | ||
82 | struct ieee80211_measurement_report { | 82 | struct ieee80211_measurement_report { |
83 | struct ieee80211_info_element ie; | 83 | struct ieee80211_info_element ie; |
@@ -87,6 +87,6 @@ struct ieee80211_measurement_report { | |||
87 | union { | 87 | union { |
88 | struct ieee80211_basic_report basic[0]; | 88 | struct ieee80211_basic_report basic[0]; |
89 | } u; | 89 | } u; |
90 | } __attribute__ ((packed)); | 90 | } __packed; |
91 | 91 | ||
92 | #endif | 92 | #endif |
diff --git a/drivers/net/wireless/iwmc3200wifi/commands.h b/drivers/net/wireless/iwmc3200wifi/commands.h index 7e16bcf59978..6421689f5e8e 100644 --- a/drivers/net/wireless/iwmc3200wifi/commands.h +++ b/drivers/net/wireless/iwmc3200wifi/commands.h | |||
@@ -56,7 +56,7 @@ | |||
56 | 56 | ||
57 | struct iwm_umac_cmd_reset { | 57 | struct iwm_umac_cmd_reset { |
58 | __le32 flags; | 58 | __le32 flags; |
59 | } __attribute__ ((packed)); | 59 | } __packed; |
60 | 60 | ||
61 | #define UMAC_PARAM_TBL_ORD_FIX 0x0 | 61 | #define UMAC_PARAM_TBL_ORD_FIX 0x0 |
62 | #define UMAC_PARAM_TBL_ORD_VAR 0x1 | 62 | #define UMAC_PARAM_TBL_ORD_VAR 0x1 |
@@ -220,37 +220,37 @@ struct iwm_umac_cmd_set_param_fix { | |||
220 | __le16 tbl; | 220 | __le16 tbl; |
221 | __le16 key; | 221 | __le16 key; |
222 | __le32 value; | 222 | __le32 value; |
223 | } __attribute__ ((packed)); | 223 | } __packed; |
224 | 224 | ||
225 | struct iwm_umac_cmd_set_param_var { | 225 | struct iwm_umac_cmd_set_param_var { |
226 | __le16 tbl; | 226 | __le16 tbl; |
227 | __le16 key; | 227 | __le16 key; |
228 | __le16 len; | 228 | __le16 len; |
229 | __le16 reserved; | 229 | __le16 reserved; |
230 | } __attribute__ ((packed)); | 230 | } __packed; |
231 | 231 | ||
232 | struct iwm_umac_cmd_get_param { | 232 | struct iwm_umac_cmd_get_param { |
233 | __le16 tbl; | 233 | __le16 tbl; |
234 | __le16 key; | 234 | __le16 key; |
235 | } __attribute__ ((packed)); | 235 | } __packed; |
236 | 236 | ||
237 | struct iwm_umac_cmd_get_param_resp { | 237 | struct iwm_umac_cmd_get_param_resp { |
238 | __le16 tbl; | 238 | __le16 tbl; |
239 | __le16 key; | 239 | __le16 key; |
240 | __le16 len; | 240 | __le16 len; |
241 | __le16 reserved; | 241 | __le16 reserved; |
242 | } __attribute__ ((packed)); | 242 | } __packed; |
243 | 243 | ||
244 | struct iwm_umac_cmd_eeprom_proxy_hdr { | 244 | struct iwm_umac_cmd_eeprom_proxy_hdr { |
245 | __le32 type; | 245 | __le32 type; |
246 | __le32 offset; | 246 | __le32 offset; |
247 | __le32 len; | 247 | __le32 len; |
248 | } __attribute__ ((packed)); | 248 | } __packed; |
249 | 249 | ||
250 | struct iwm_umac_cmd_eeprom_proxy { | 250 | struct iwm_umac_cmd_eeprom_proxy { |
251 | struct iwm_umac_cmd_eeprom_proxy_hdr hdr; | 251 | struct iwm_umac_cmd_eeprom_proxy_hdr hdr; |
252 | u8 buf[0]; | 252 | u8 buf[0]; |
253 | } __attribute__ ((packed)); | 253 | } __packed; |
254 | 254 | ||
255 | #define IWM_UMAC_CMD_EEPROM_TYPE_READ 0x1 | 255 | #define IWM_UMAC_CMD_EEPROM_TYPE_READ 0x1 |
256 | #define IWM_UMAC_CMD_EEPROM_TYPE_WRITE 0x2 | 256 | #define IWM_UMAC_CMD_EEPROM_TYPE_WRITE 0x2 |
@@ -267,13 +267,13 @@ struct iwm_umac_channel_info { | |||
267 | u8 reserved; | 267 | u8 reserved; |
268 | u8 flags; | 268 | u8 flags; |
269 | __le32 channels_mask; | 269 | __le32 channels_mask; |
270 | } __attribute__ ((packed)); | 270 | } __packed; |
271 | 271 | ||
272 | struct iwm_umac_cmd_get_channel_list { | 272 | struct iwm_umac_cmd_get_channel_list { |
273 | __le16 count; | 273 | __le16 count; |
274 | __le16 reserved; | 274 | __le16 reserved; |
275 | struct iwm_umac_channel_info ch[0]; | 275 | struct iwm_umac_channel_info ch[0]; |
276 | } __attribute__ ((packed)); | 276 | } __packed; |
277 | 277 | ||
278 | 278 | ||
279 | /* UMAC WiFi interface commands */ | 279 | /* UMAC WiFi interface commands */ |
@@ -304,7 +304,7 @@ struct iwm_umac_ssid { | |||
304 | u8 ssid_len; | 304 | u8 ssid_len; |
305 | u8 ssid[IEEE80211_MAX_SSID_LEN]; | 305 | u8 ssid[IEEE80211_MAX_SSID_LEN]; |
306 | u8 reserved[3]; | 306 | u8 reserved[3]; |
307 | } __attribute__ ((packed)); | 307 | } __packed; |
308 | 308 | ||
309 | struct iwm_umac_cmd_scan_request { | 309 | struct iwm_umac_cmd_scan_request { |
310 | struct iwm_umac_wifi_if hdr; | 310 | struct iwm_umac_wifi_if hdr; |
@@ -314,7 +314,7 @@ struct iwm_umac_cmd_scan_request { | |||
314 | u8 timeout; /* In seconds */ | 314 | u8 timeout; /* In seconds */ |
315 | u8 reserved; | 315 | u8 reserved; |
316 | struct iwm_umac_ssid ssids[UMAC_WIFI_IF_PROBE_OPTION_MAX]; | 316 | struct iwm_umac_ssid ssids[UMAC_WIFI_IF_PROBE_OPTION_MAX]; |
317 | } __attribute__ ((packed)); | 317 | } __packed; |
318 | 318 | ||
319 | #define UMAC_CIPHER_TYPE_NONE 0xFF | 319 | #define UMAC_CIPHER_TYPE_NONE 0xFF |
320 | #define UMAC_CIPHER_TYPE_USE_GROUPCAST 0x00 | 320 | #define UMAC_CIPHER_TYPE_USE_GROUPCAST 0x00 |
@@ -357,7 +357,7 @@ struct iwm_umac_security { | |||
357 | u8 ucast_cipher; | 357 | u8 ucast_cipher; |
358 | u8 mcast_cipher; | 358 | u8 mcast_cipher; |
359 | u8 flags; | 359 | u8 flags; |
360 | } __attribute__ ((packed)); | 360 | } __packed; |
361 | 361 | ||
362 | struct iwm_umac_ibss { | 362 | struct iwm_umac_ibss { |
363 | u8 beacon_interval; /* in millisecond */ | 363 | u8 beacon_interval; /* in millisecond */ |
@@ -366,7 +366,7 @@ struct iwm_umac_ibss { | |||
366 | u8 band; | 366 | u8 band; |
367 | u8 channel; | 367 | u8 channel; |
368 | u8 reserved[3]; | 368 | u8 reserved[3]; |
369 | } __attribute__ ((packed)); | 369 | } __packed; |
370 | 370 | ||
371 | #define UMAC_MODE_BSS 0 | 371 | #define UMAC_MODE_BSS 0 |
372 | #define UMAC_MODE_IBSS 1 | 372 | #define UMAC_MODE_IBSS 1 |
@@ -385,13 +385,13 @@ struct iwm_umac_profile { | |||
385 | __le16 flags; | 385 | __le16 flags; |
386 | u8 wireless_mode; | 386 | u8 wireless_mode; |
387 | u8 bss_num; | 387 | u8 bss_num; |
388 | } __attribute__ ((packed)); | 388 | } __packed; |
389 | 389 | ||
390 | struct iwm_umac_invalidate_profile { | 390 | struct iwm_umac_invalidate_profile { |
391 | struct iwm_umac_wifi_if hdr; | 391 | struct iwm_umac_wifi_if hdr; |
392 | u8 reason; | 392 | u8 reason; |
393 | u8 reserved[3]; | 393 | u8 reserved[3]; |
394 | } __attribute__ ((packed)); | 394 | } __packed; |
395 | 395 | ||
396 | /* Encryption key commands */ | 396 | /* Encryption key commands */ |
397 | struct iwm_umac_key_wep40 { | 397 | struct iwm_umac_key_wep40 { |
@@ -400,7 +400,7 @@ struct iwm_umac_key_wep40 { | |||
400 | u8 key[WLAN_KEY_LEN_WEP40]; | 400 | u8 key[WLAN_KEY_LEN_WEP40]; |
401 | u8 static_key; | 401 | u8 static_key; |
402 | u8 reserved[2]; | 402 | u8 reserved[2]; |
403 | } __attribute__ ((packed)); | 403 | } __packed; |
404 | 404 | ||
405 | struct iwm_umac_key_wep104 { | 405 | struct iwm_umac_key_wep104 { |
406 | struct iwm_umac_wifi_if hdr; | 406 | struct iwm_umac_wifi_if hdr; |
@@ -408,7 +408,7 @@ struct iwm_umac_key_wep104 { | |||
408 | u8 key[WLAN_KEY_LEN_WEP104]; | 408 | u8 key[WLAN_KEY_LEN_WEP104]; |
409 | u8 static_key; | 409 | u8 static_key; |
410 | u8 reserved[2]; | 410 | u8 reserved[2]; |
411 | } __attribute__ ((packed)); | 411 | } __packed; |
412 | 412 | ||
413 | #define IWM_TKIP_KEY_SIZE 16 | 413 | #define IWM_TKIP_KEY_SIZE 16 |
414 | #define IWM_TKIP_MIC_SIZE 8 | 414 | #define IWM_TKIP_MIC_SIZE 8 |
@@ -420,7 +420,7 @@ struct iwm_umac_key_tkip { | |||
420 | u8 tkip_key[IWM_TKIP_KEY_SIZE]; | 420 | u8 tkip_key[IWM_TKIP_KEY_SIZE]; |
421 | u8 mic_rx_key[IWM_TKIP_MIC_SIZE]; | 421 | u8 mic_rx_key[IWM_TKIP_MIC_SIZE]; |
422 | u8 mic_tx_key[IWM_TKIP_MIC_SIZE]; | 422 | u8 mic_tx_key[IWM_TKIP_MIC_SIZE]; |
423 | } __attribute__ ((packed)); | 423 | } __packed; |
424 | 424 | ||
425 | struct iwm_umac_key_ccmp { | 425 | struct iwm_umac_key_ccmp { |
426 | struct iwm_umac_wifi_if hdr; | 426 | struct iwm_umac_wifi_if hdr; |
@@ -428,27 +428,27 @@ struct iwm_umac_key_ccmp { | |||
428 | u8 iv_count[6]; | 428 | u8 iv_count[6]; |
429 | u8 reserved[2]; | 429 | u8 reserved[2]; |
430 | u8 key[WLAN_KEY_LEN_CCMP]; | 430 | u8 key[WLAN_KEY_LEN_CCMP]; |
431 | } __attribute__ ((packed)); | 431 | } __packed; |
432 | 432 | ||
433 | struct iwm_umac_key_remove { | 433 | struct iwm_umac_key_remove { |
434 | struct iwm_umac_wifi_if hdr; | 434 | struct iwm_umac_wifi_if hdr; |
435 | struct iwm_umac_key_hdr key_hdr; | 435 | struct iwm_umac_key_hdr key_hdr; |
436 | } __attribute__ ((packed)); | 436 | } __packed; |
437 | 437 | ||
438 | struct iwm_umac_tx_key_id { | 438 | struct iwm_umac_tx_key_id { |
439 | struct iwm_umac_wifi_if hdr; | 439 | struct iwm_umac_wifi_if hdr; |
440 | u8 key_idx; | 440 | u8 key_idx; |
441 | u8 reserved[3]; | 441 | u8 reserved[3]; |
442 | } __attribute__ ((packed)); | 442 | } __packed; |
443 | 443 | ||
444 | struct iwm_umac_pwr_trigger { | 444 | struct iwm_umac_pwr_trigger { |
445 | struct iwm_umac_wifi_if hdr; | 445 | struct iwm_umac_wifi_if hdr; |
446 | __le32 reseved; | 446 | __le32 reseved; |
447 | } __attribute__ ((packed)); | 447 | } __packed; |
448 | 448 | ||
449 | struct iwm_umac_cmd_stats_req { | 449 | struct iwm_umac_cmd_stats_req { |
450 | __le32 flags; | 450 | __le32 flags; |
451 | } __attribute__ ((packed)); | 451 | } __packed; |
452 | 452 | ||
453 | struct iwm_umac_cmd_stop_resume_tx { | 453 | struct iwm_umac_cmd_stop_resume_tx { |
454 | u8 flags; | 454 | u8 flags; |
@@ -456,7 +456,7 @@ struct iwm_umac_cmd_stop_resume_tx { | |||
456 | __le16 stop_resume_tid_msk; | 456 | __le16 stop_resume_tid_msk; |
457 | __le16 last_seq_num[IWM_UMAC_TID_NR]; | 457 | __le16 last_seq_num[IWM_UMAC_TID_NR]; |
458 | u16 reserved; | 458 | u16 reserved; |
459 | } __attribute__ ((packed)); | 459 | } __packed; |
460 | 460 | ||
461 | #define IWM_CMD_PMKID_ADD 1 | 461 | #define IWM_CMD_PMKID_ADD 1 |
462 | #define IWM_CMD_PMKID_DEL 2 | 462 | #define IWM_CMD_PMKID_DEL 2 |
@@ -468,7 +468,7 @@ struct iwm_umac_pmkid_update { | |||
468 | u8 bssid[ETH_ALEN]; | 468 | u8 bssid[ETH_ALEN]; |
469 | __le16 reserved; | 469 | __le16 reserved; |
470 | u8 pmkid[WLAN_PMKID_LEN]; | 470 | u8 pmkid[WLAN_PMKID_LEN]; |
471 | } __attribute__ ((packed)); | 471 | } __packed; |
472 | 472 | ||
473 | /* LMAC commands */ | 473 | /* LMAC commands */ |
474 | int iwm_read_mac(struct iwm_priv *iwm, u8 *mac); | 474 | int iwm_read_mac(struct iwm_priv *iwm, u8 *mac); |
diff --git a/drivers/net/wireless/iwmc3200wifi/iwm.h b/drivers/net/wireless/iwmc3200wifi/iwm.h index 13266c3842f8..51d7efa15ae6 100644 --- a/drivers/net/wireless/iwmc3200wifi/iwm.h +++ b/drivers/net/wireless/iwmc3200wifi/iwm.h | |||
@@ -162,7 +162,7 @@ struct iwm_umac_key_hdr { | |||
162 | u8 mac[ETH_ALEN]; | 162 | u8 mac[ETH_ALEN]; |
163 | u8 key_idx; | 163 | u8 key_idx; |
164 | u8 multicast; /* BCast encrypt & BCast decrypt of frames FROM mac */ | 164 | u8 multicast; /* BCast encrypt & BCast decrypt of frames FROM mac */ |
165 | } __attribute__ ((packed)); | 165 | } __packed; |
166 | 166 | ||
167 | struct iwm_key { | 167 | struct iwm_key { |
168 | struct iwm_umac_key_hdr hdr; | 168 | struct iwm_umac_key_hdr hdr; |
diff --git a/drivers/net/wireless/iwmc3200wifi/lmac.h b/drivers/net/wireless/iwmc3200wifi/lmac.h index a855a99e49b8..5ddcdf8c70c0 100644 --- a/drivers/net/wireless/iwmc3200wifi/lmac.h +++ b/drivers/net/wireless/iwmc3200wifi/lmac.h | |||
@@ -43,7 +43,7 @@ struct iwm_lmac_hdr { | |||
43 | u8 id; | 43 | u8 id; |
44 | u8 flags; | 44 | u8 flags; |
45 | __le16 seq_num; | 45 | __le16 seq_num; |
46 | } __attribute__ ((packed)); | 46 | } __packed; |
47 | 47 | ||
48 | /* LMAC commands */ | 48 | /* LMAC commands */ |
49 | #define CALIB_CFG_FLAG_SEND_COMPLETE_NTFY_AFTER_MSK 0x1 | 49 | #define CALIB_CFG_FLAG_SEND_COMPLETE_NTFY_AFTER_MSK 0x1 |
@@ -54,23 +54,23 @@ struct iwm_lmac_cal_cfg_elt { | |||
54 | __le32 send_res; /* 1 for sending back results */ | 54 | __le32 send_res; /* 1 for sending back results */ |
55 | __le32 apply_res; /* 1 for applying calibration results to HW */ | 55 | __le32 apply_res; /* 1 for applying calibration results to HW */ |
56 | __le32 reserved; | 56 | __le32 reserved; |
57 | } __attribute__ ((packed)); | 57 | } __packed; |
58 | 58 | ||
59 | struct iwm_lmac_cal_cfg_status { | 59 | struct iwm_lmac_cal_cfg_status { |
60 | struct iwm_lmac_cal_cfg_elt init; | 60 | struct iwm_lmac_cal_cfg_elt init; |
61 | struct iwm_lmac_cal_cfg_elt periodic; | 61 | struct iwm_lmac_cal_cfg_elt periodic; |
62 | __le32 flags; /* CALIB_CFG_FLAG_SEND_COMPLETE_NTFY_AFTER_MSK */ | 62 | __le32 flags; /* CALIB_CFG_FLAG_SEND_COMPLETE_NTFY_AFTER_MSK */ |
63 | } __attribute__ ((packed)); | 63 | } __packed; |
64 | 64 | ||
65 | struct iwm_lmac_cal_cfg_cmd { | 65 | struct iwm_lmac_cal_cfg_cmd { |
66 | struct iwm_lmac_cal_cfg_status ucode_cfg; | 66 | struct iwm_lmac_cal_cfg_status ucode_cfg; |
67 | struct iwm_lmac_cal_cfg_status driver_cfg; | 67 | struct iwm_lmac_cal_cfg_status driver_cfg; |
68 | __le32 reserved; | 68 | __le32 reserved; |
69 | } __attribute__ ((packed)); | 69 | } __packed; |
70 | 70 | ||
71 | struct iwm_lmac_cal_cfg_resp { | 71 | struct iwm_lmac_cal_cfg_resp { |
72 | __le32 status; | 72 | __le32 status; |
73 | } __attribute__ ((packed)); | 73 | } __packed; |
74 | 74 | ||
75 | #define IWM_CARD_STATE_SW_HW_ENABLED 0x00 | 75 | #define IWM_CARD_STATE_SW_HW_ENABLED 0x00 |
76 | #define IWM_CARD_STATE_HW_DISABLED 0x01 | 76 | #define IWM_CARD_STATE_HW_DISABLED 0x01 |
@@ -80,7 +80,7 @@ struct iwm_lmac_cal_cfg_resp { | |||
80 | 80 | ||
81 | struct iwm_lmac_card_state { | 81 | struct iwm_lmac_card_state { |
82 | __le32 flags; | 82 | __le32 flags; |
83 | } __attribute__ ((packed)); | 83 | } __packed; |
84 | 84 | ||
85 | /** | 85 | /** |
86 | * COEX_PRIORITY_TABLE_CMD | 86 | * COEX_PRIORITY_TABLE_CMD |
@@ -131,7 +131,7 @@ struct coex_event { | |||
131 | u8 win_med_prio; | 131 | u8 win_med_prio; |
132 | u8 reserved; | 132 | u8 reserved; |
133 | u8 flags; | 133 | u8 flags; |
134 | } __attribute__ ((packed)); | 134 | } __packed; |
135 | 135 | ||
136 | #define COEX_FLAGS_STA_TABLE_VALID_MSK 0x1 | 136 | #define COEX_FLAGS_STA_TABLE_VALID_MSK 0x1 |
137 | #define COEX_FLAGS_UNASSOC_WAKEUP_UMASK_MSK 0x4 | 137 | #define COEX_FLAGS_UNASSOC_WAKEUP_UMASK_MSK 0x4 |
@@ -142,7 +142,7 @@ struct iwm_coex_prio_table_cmd { | |||
142 | u8 flags; | 142 | u8 flags; |
143 | u8 reserved[3]; | 143 | u8 reserved[3]; |
144 | struct coex_event sta_prio[COEX_EVENTS_NUM]; | 144 | struct coex_event sta_prio[COEX_EVENTS_NUM]; |
145 | } __attribute__ ((packed)); | 145 | } __packed; |
146 | 146 | ||
147 | /* Coexistence definitions | 147 | /* Coexistence definitions |
148 | * | 148 | * |
@@ -192,7 +192,7 @@ struct iwm_ct_kill_cfg_cmd { | |||
192 | u32 exit_threshold; | 192 | u32 exit_threshold; |
193 | u32 reserved; | 193 | u32 reserved; |
194 | u32 entry_threshold; | 194 | u32 entry_threshold; |
195 | } __attribute__ ((packed)); | 195 | } __packed; |
196 | 196 | ||
197 | 197 | ||
198 | /* LMAC OP CODES */ | 198 | /* LMAC OP CODES */ |
@@ -428,7 +428,7 @@ struct iwm_lmac_calib_hdr { | |||
428 | u8 first_grp; | 428 | u8 first_grp; |
429 | u8 grp_num; | 429 | u8 grp_num; |
430 | u8 all_data_valid; | 430 | u8 all_data_valid; |
431 | } __attribute__ ((packed)); | 431 | } __packed; |
432 | 432 | ||
433 | #define IWM_LMAC_CALIB_FREQ_GROUPS_NR 7 | 433 | #define IWM_LMAC_CALIB_FREQ_GROUPS_NR 7 |
434 | #define IWM_CALIB_FREQ_GROUPS_NR 5 | 434 | #define IWM_CALIB_FREQ_GROUPS_NR 5 |
@@ -437,20 +437,20 @@ struct iwm_lmac_calib_hdr { | |||
437 | struct iwm_calib_rxiq_entry { | 437 | struct iwm_calib_rxiq_entry { |
438 | u16 ptam_postdist_ars; | 438 | u16 ptam_postdist_ars; |
439 | u16 ptam_postdist_arc; | 439 | u16 ptam_postdist_arc; |
440 | } __attribute__ ((packed)); | 440 | } __packed; |
441 | 441 | ||
442 | struct iwm_calib_rxiq_group { | 442 | struct iwm_calib_rxiq_group { |
443 | struct iwm_calib_rxiq_entry mode[IWM_CALIB_DC_MODES_NR]; | 443 | struct iwm_calib_rxiq_entry mode[IWM_CALIB_DC_MODES_NR]; |
444 | } __attribute__ ((packed)); | 444 | } __packed; |
445 | 445 | ||
446 | struct iwm_lmac_calib_rxiq { | 446 | struct iwm_lmac_calib_rxiq { |
447 | struct iwm_calib_rxiq_group group[IWM_LMAC_CALIB_FREQ_GROUPS_NR]; | 447 | struct iwm_calib_rxiq_group group[IWM_LMAC_CALIB_FREQ_GROUPS_NR]; |
448 | } __attribute__ ((packed)); | 448 | } __packed; |
449 | 449 | ||
450 | struct iwm_calib_rxiq { | 450 | struct iwm_calib_rxiq { |
451 | struct iwm_lmac_calib_hdr hdr; | 451 | struct iwm_lmac_calib_hdr hdr; |
452 | struct iwm_calib_rxiq_group group[IWM_CALIB_FREQ_GROUPS_NR]; | 452 | struct iwm_calib_rxiq_group group[IWM_CALIB_FREQ_GROUPS_NR]; |
453 | } __attribute__ ((packed)); | 453 | } __packed; |
454 | 454 | ||
455 | #define LMAC_STA_ID_SEED 0x0f | 455 | #define LMAC_STA_ID_SEED 0x0f |
456 | #define LMAC_STA_ID_POS 0 | 456 | #define LMAC_STA_ID_POS 0 |
@@ -463,7 +463,7 @@ struct iwm_lmac_power_report { | |||
463 | u8 pa_integ_res_A[3]; | 463 | u8 pa_integ_res_A[3]; |
464 | u8 pa_integ_res_B[3]; | 464 | u8 pa_integ_res_B[3]; |
465 | u8 pa_integ_res_C[3]; | 465 | u8 pa_integ_res_C[3]; |
466 | } __attribute__ ((packed)); | 466 | } __packed; |
467 | 467 | ||
468 | struct iwm_lmac_tx_resp { | 468 | struct iwm_lmac_tx_resp { |
469 | u8 frame_cnt; /* 1-no aggregation, greater then 1 - aggregation */ | 469 | u8 frame_cnt; /* 1-no aggregation, greater then 1 - aggregation */ |
@@ -479,6 +479,6 @@ struct iwm_lmac_tx_resp { | |||
479 | u8 ra_tid; | 479 | u8 ra_tid; |
480 | __le16 frame_ctl; | 480 | __le16 frame_ctl; |
481 | __le32 status; | 481 | __le32 status; |
482 | } __attribute__ ((packed)); | 482 | } __packed; |
483 | 483 | ||
484 | #endif | 484 | #endif |
diff --git a/drivers/net/wireless/iwmc3200wifi/umac.h b/drivers/net/wireless/iwmc3200wifi/umac.h index 0cbba3ecc813..4a137d334a42 100644 --- a/drivers/net/wireless/iwmc3200wifi/umac.h +++ b/drivers/net/wireless/iwmc3200wifi/umac.h | |||
@@ -42,19 +42,19 @@ | |||
42 | struct iwm_udma_in_hdr { | 42 | struct iwm_udma_in_hdr { |
43 | __le32 cmd; | 43 | __le32 cmd; |
44 | __le32 size; | 44 | __le32 size; |
45 | } __attribute__ ((packed)); | 45 | } __packed; |
46 | 46 | ||
47 | struct iwm_udma_out_nonwifi_hdr { | 47 | struct iwm_udma_out_nonwifi_hdr { |
48 | __le32 cmd; | 48 | __le32 cmd; |
49 | __le32 addr; | 49 | __le32 addr; |
50 | __le32 op1_sz; | 50 | __le32 op1_sz; |
51 | __le32 op2; | 51 | __le32 op2; |
52 | } __attribute__ ((packed)); | 52 | } __packed; |
53 | 53 | ||
54 | struct iwm_udma_out_wifi_hdr { | 54 | struct iwm_udma_out_wifi_hdr { |
55 | __le32 cmd; | 55 | __le32 cmd; |
56 | __le32 meta_data; | 56 | __le32 meta_data; |
57 | } __attribute__ ((packed)); | 57 | } __packed; |
58 | 58 | ||
59 | /* Sequence numbering */ | 59 | /* Sequence numbering */ |
60 | #define UMAC_WIFI_SEQ_NUM_BASE 1 | 60 | #define UMAC_WIFI_SEQ_NUM_BASE 1 |
@@ -408,12 +408,12 @@ struct iwm_rx_ticket { | |||
408 | __le16 flags; | 408 | __le16 flags; |
409 | u8 payload_offset; /* includes: MAC header, pad, IV */ | 409 | u8 payload_offset; /* includes: MAC header, pad, IV */ |
410 | u8 tail_len; /* includes: MIC, ICV, CRC (w/o STATUS) */ | 410 | u8 tail_len; /* includes: MIC, ICV, CRC (w/o STATUS) */ |
411 | } __attribute__ ((packed)); | 411 | } __packed; |
412 | 412 | ||
413 | struct iwm_rx_mpdu_hdr { | 413 | struct iwm_rx_mpdu_hdr { |
414 | __le16 len; | 414 | __le16 len; |
415 | __le16 reserved; | 415 | __le16 reserved; |
416 | } __attribute__ ((packed)); | 416 | } __packed; |
417 | 417 | ||
418 | /* UMAC SW WIFI API */ | 418 | /* UMAC SW WIFI API */ |
419 | 419 | ||
@@ -421,31 +421,31 @@ struct iwm_dev_cmd_hdr { | |||
421 | u8 cmd; | 421 | u8 cmd; |
422 | u8 flags; | 422 | u8 flags; |
423 | __le16 seq_num; | 423 | __le16 seq_num; |
424 | } __attribute__ ((packed)); | 424 | } __packed; |
425 | 425 | ||
426 | struct iwm_umac_fw_cmd_hdr { | 426 | struct iwm_umac_fw_cmd_hdr { |
427 | __le32 meta_data; | 427 | __le32 meta_data; |
428 | struct iwm_dev_cmd_hdr cmd; | 428 | struct iwm_dev_cmd_hdr cmd; |
429 | } __attribute__ ((packed)); | 429 | } __packed; |
430 | 430 | ||
431 | struct iwm_umac_wifi_out_hdr { | 431 | struct iwm_umac_wifi_out_hdr { |
432 | struct iwm_udma_out_wifi_hdr hw_hdr; | 432 | struct iwm_udma_out_wifi_hdr hw_hdr; |
433 | struct iwm_umac_fw_cmd_hdr sw_hdr; | 433 | struct iwm_umac_fw_cmd_hdr sw_hdr; |
434 | } __attribute__ ((packed)); | 434 | } __packed; |
435 | 435 | ||
436 | struct iwm_umac_nonwifi_out_hdr { | 436 | struct iwm_umac_nonwifi_out_hdr { |
437 | struct iwm_udma_out_nonwifi_hdr hw_hdr; | 437 | struct iwm_udma_out_nonwifi_hdr hw_hdr; |
438 | } __attribute__ ((packed)); | 438 | } __packed; |
439 | 439 | ||
440 | struct iwm_umac_wifi_in_hdr { | 440 | struct iwm_umac_wifi_in_hdr { |
441 | struct iwm_udma_in_hdr hw_hdr; | 441 | struct iwm_udma_in_hdr hw_hdr; |
442 | struct iwm_umac_fw_cmd_hdr sw_hdr; | 442 | struct iwm_umac_fw_cmd_hdr sw_hdr; |
443 | } __attribute__ ((packed)); | 443 | } __packed; |
444 | 444 | ||
445 | struct iwm_umac_nonwifi_in_hdr { | 445 | struct iwm_umac_nonwifi_in_hdr { |
446 | struct iwm_udma_in_hdr hw_hdr; | 446 | struct iwm_udma_in_hdr hw_hdr; |
447 | __le32 time_stamp; | 447 | __le32 time_stamp; |
448 | } __attribute__ ((packed)); | 448 | } __packed; |
449 | 449 | ||
450 | #define IWM_UMAC_PAGE_SIZE 0x200 | 450 | #define IWM_UMAC_PAGE_SIZE 0x200 |
451 | 451 | ||
@@ -521,7 +521,7 @@ struct iwm_umac_notif_wifi_if { | |||
521 | u8 status; | 521 | u8 status; |
522 | u8 flags; | 522 | u8 flags; |
523 | __le16 buf_size; | 523 | __le16 buf_size; |
524 | } __attribute__ ((packed)); | 524 | } __packed; |
525 | 525 | ||
526 | #define UMAC_ROAM_REASON_FIRST_SELECTION 0x1 | 526 | #define UMAC_ROAM_REASON_FIRST_SELECTION 0x1 |
527 | #define UMAC_ROAM_REASON_AP_DEAUTH 0x2 | 527 | #define UMAC_ROAM_REASON_AP_DEAUTH 0x2 |
@@ -535,7 +535,7 @@ struct iwm_umac_notif_assoc_start { | |||
535 | __le32 roam_reason; | 535 | __le32 roam_reason; |
536 | u8 bssid[ETH_ALEN]; | 536 | u8 bssid[ETH_ALEN]; |
537 | u8 reserved[2]; | 537 | u8 reserved[2]; |
538 | } __attribute__ ((packed)); | 538 | } __packed; |
539 | 539 | ||
540 | #define UMAC_ASSOC_COMPLETE_SUCCESS 0x0 | 540 | #define UMAC_ASSOC_COMPLETE_SUCCESS 0x0 |
541 | #define UMAC_ASSOC_COMPLETE_FAILURE 0x1 | 541 | #define UMAC_ASSOC_COMPLETE_FAILURE 0x1 |
@@ -546,7 +546,7 @@ struct iwm_umac_notif_assoc_complete { | |||
546 | u8 bssid[ETH_ALEN]; | 546 | u8 bssid[ETH_ALEN]; |
547 | u8 band; | 547 | u8 band; |
548 | u8 channel; | 548 | u8 channel; |
549 | } __attribute__ ((packed)); | 549 | } __packed; |
550 | 550 | ||
551 | #define UMAC_PROFILE_INVALID_ASSOC_TIMEOUT 0x0 | 551 | #define UMAC_PROFILE_INVALID_ASSOC_TIMEOUT 0x0 |
552 | #define UMAC_PROFILE_INVALID_ROAM_TIMEOUT 0x1 | 552 | #define UMAC_PROFILE_INVALID_ROAM_TIMEOUT 0x1 |
@@ -556,7 +556,7 @@ struct iwm_umac_notif_assoc_complete { | |||
556 | struct iwm_umac_notif_profile_invalidate { | 556 | struct iwm_umac_notif_profile_invalidate { |
557 | struct iwm_umac_notif_wifi_if mlme_hdr; | 557 | struct iwm_umac_notif_wifi_if mlme_hdr; |
558 | __le32 reason; | 558 | __le32 reason; |
559 | } __attribute__ ((packed)); | 559 | } __packed; |
560 | 560 | ||
561 | #define UMAC_SCAN_RESULT_SUCCESS 0x0 | 561 | #define UMAC_SCAN_RESULT_SUCCESS 0x0 |
562 | #define UMAC_SCAN_RESULT_ABORTED 0x1 | 562 | #define UMAC_SCAN_RESULT_ABORTED 0x1 |
@@ -568,7 +568,7 @@ struct iwm_umac_notif_scan_complete { | |||
568 | __le32 type; | 568 | __le32 type; |
569 | __le32 result; | 569 | __le32 result; |
570 | u8 seq_num; | 570 | u8 seq_num; |
571 | } __attribute__ ((packed)); | 571 | } __packed; |
572 | 572 | ||
573 | #define UMAC_OPCODE_ADD_MODIFY 0x0 | 573 | #define UMAC_OPCODE_ADD_MODIFY 0x0 |
574 | #define UMAC_OPCODE_REMOVE 0x1 | 574 | #define UMAC_OPCODE_REMOVE 0x1 |
@@ -582,7 +582,7 @@ struct iwm_umac_notif_sta_info { | |||
582 | u8 mac_addr[ETH_ALEN]; | 582 | u8 mac_addr[ETH_ALEN]; |
583 | u8 sta_id; /* bits 0-3: station ID, bits 4-7: station color */ | 583 | u8 sta_id; /* bits 0-3: station ID, bits 4-7: station color */ |
584 | u8 flags; | 584 | u8 flags; |
585 | } __attribute__ ((packed)); | 585 | } __packed; |
586 | 586 | ||
587 | #define UMAC_BAND_2GHZ 0 | 587 | #define UMAC_BAND_2GHZ 0 |
588 | #define UMAC_BAND_5GHZ 1 | 588 | #define UMAC_BAND_5GHZ 1 |
@@ -601,7 +601,7 @@ struct iwm_umac_notif_bss_info { | |||
601 | s8 rssi; | 601 | s8 rssi; |
602 | u8 reserved; | 602 | u8 reserved; |
603 | u8 frame_buf[1]; | 603 | u8 frame_buf[1]; |
604 | } __attribute__ ((packed)); | 604 | } __packed; |
605 | 605 | ||
606 | #define IWM_BSS_REMOVE_INDEX_MSK 0x0fff | 606 | #define IWM_BSS_REMOVE_INDEX_MSK 0x0fff |
607 | #define IWM_BSS_REMOVE_FLAGS_MSK 0xfc00 | 607 | #define IWM_BSS_REMOVE_FLAGS_MSK 0xfc00 |
@@ -614,13 +614,13 @@ struct iwm_umac_notif_bss_removed { | |||
614 | struct iwm_umac_notif_wifi_if mlme_hdr; | 614 | struct iwm_umac_notif_wifi_if mlme_hdr; |
615 | __le32 count; | 615 | __le32 count; |
616 | __le16 entries[0]; | 616 | __le16 entries[0]; |
617 | } __attribute__ ((packed)); | 617 | } __packed; |
618 | 618 | ||
619 | struct iwm_umac_notif_mgt_frame { | 619 | struct iwm_umac_notif_mgt_frame { |
620 | struct iwm_umac_notif_wifi_if mlme_hdr; | 620 | struct iwm_umac_notif_wifi_if mlme_hdr; |
621 | __le16 len; | 621 | __le16 len; |
622 | u8 frame[1]; | 622 | u8 frame[1]; |
623 | } __attribute__ ((packed)); | 623 | } __packed; |
624 | 624 | ||
625 | struct iwm_umac_notif_alive { | 625 | struct iwm_umac_notif_alive { |
626 | struct iwm_umac_wifi_in_hdr hdr; | 626 | struct iwm_umac_wifi_in_hdr hdr; |
@@ -630,13 +630,13 @@ struct iwm_umac_notif_alive { | |||
630 | __le16 reserved2; | 630 | __le16 reserved2; |
631 | __le16 page_grp_count; | 631 | __le16 page_grp_count; |
632 | __le32 page_grp_state[IWM_MACS_OUT_GROUPS]; | 632 | __le32 page_grp_state[IWM_MACS_OUT_GROUPS]; |
633 | } __attribute__ ((packed)); | 633 | } __packed; |
634 | 634 | ||
635 | struct iwm_umac_notif_init_complete { | 635 | struct iwm_umac_notif_init_complete { |
636 | struct iwm_umac_wifi_in_hdr hdr; | 636 | struct iwm_umac_wifi_in_hdr hdr; |
637 | __le16 status; | 637 | __le16 status; |
638 | __le16 reserved; | 638 | __le16 reserved; |
639 | } __attribute__ ((packed)); | 639 | } __packed; |
640 | 640 | ||
641 | /* error categories */ | 641 | /* error categories */ |
642 | enum { | 642 | enum { |
@@ -667,12 +667,12 @@ struct iwm_fw_error_hdr { | |||
667 | __le32 dbm_buf_end; | 667 | __le32 dbm_buf_end; |
668 | __le32 dbm_buf_write_ptr; | 668 | __le32 dbm_buf_write_ptr; |
669 | __le32 dbm_buf_cycle_cnt; | 669 | __le32 dbm_buf_cycle_cnt; |
670 | } __attribute__ ((packed)); | 670 | } __packed; |
671 | 671 | ||
672 | struct iwm_umac_notif_error { | 672 | struct iwm_umac_notif_error { |
673 | struct iwm_umac_wifi_in_hdr hdr; | 673 | struct iwm_umac_wifi_in_hdr hdr; |
674 | struct iwm_fw_error_hdr err; | 674 | struct iwm_fw_error_hdr err; |
675 | } __attribute__ ((packed)); | 675 | } __packed; |
676 | 676 | ||
677 | #define UMAC_DEALLOC_NTFY_CHANGES_CNT_POS 0 | 677 | #define UMAC_DEALLOC_NTFY_CHANGES_CNT_POS 0 |
678 | #define UMAC_DEALLOC_NTFY_CHANGES_CNT_SEED 0xff | 678 | #define UMAC_DEALLOC_NTFY_CHANGES_CNT_SEED 0xff |
@@ -687,20 +687,20 @@ struct iwm_umac_notif_page_dealloc { | |||
687 | struct iwm_umac_wifi_in_hdr hdr; | 687 | struct iwm_umac_wifi_in_hdr hdr; |
688 | __le32 changes; | 688 | __le32 changes; |
689 | __le32 grp_info[IWM_MACS_OUT_GROUPS]; | 689 | __le32 grp_info[IWM_MACS_OUT_GROUPS]; |
690 | } __attribute__ ((packed)); | 690 | } __packed; |
691 | 691 | ||
692 | struct iwm_umac_notif_wifi_status { | 692 | struct iwm_umac_notif_wifi_status { |
693 | struct iwm_umac_wifi_in_hdr hdr; | 693 | struct iwm_umac_wifi_in_hdr hdr; |
694 | __le16 status; | 694 | __le16 status; |
695 | __le16 reserved; | 695 | __le16 reserved; |
696 | } __attribute__ ((packed)); | 696 | } __packed; |
697 | 697 | ||
698 | struct iwm_umac_notif_rx_ticket { | 698 | struct iwm_umac_notif_rx_ticket { |
699 | struct iwm_umac_wifi_in_hdr hdr; | 699 | struct iwm_umac_wifi_in_hdr hdr; |
700 | u8 num_tickets; | 700 | u8 num_tickets; |
701 | u8 reserved[3]; | 701 | u8 reserved[3]; |
702 | struct iwm_rx_ticket tickets[1]; | 702 | struct iwm_rx_ticket tickets[1]; |
703 | } __attribute__ ((packed)); | 703 | } __packed; |
704 | 704 | ||
705 | /* Tx/Rx rates window (number of max of last update window per second) */ | 705 | /* Tx/Rx rates window (number of max of last update window per second) */ |
706 | #define UMAC_NTF_RATE_SAMPLE_NR 4 | 706 | #define UMAC_NTF_RATE_SAMPLE_NR 4 |
@@ -758,7 +758,7 @@ struct iwm_umac_notif_stats { | |||
758 | __le32 roam_unassoc; | 758 | __le32 roam_unassoc; |
759 | __le32 roam_deauth; | 759 | __le32 roam_deauth; |
760 | __le32 roam_ap_loadblance; | 760 | __le32 roam_ap_loadblance; |
761 | } __attribute__ ((packed)); | 761 | } __packed; |
762 | 762 | ||
763 | #define UMAC_STOP_TX_FLAG 0x1 | 763 | #define UMAC_STOP_TX_FLAG 0x1 |
764 | #define UMAC_RESUME_TX_FLAG 0x2 | 764 | #define UMAC_RESUME_TX_FLAG 0x2 |
@@ -770,7 +770,7 @@ struct iwm_umac_notif_stop_resume_tx { | |||
770 | u8 flags; /* UMAC_*_TX_FLAG_* */ | 770 | u8 flags; /* UMAC_*_TX_FLAG_* */ |
771 | u8 sta_id; | 771 | u8 sta_id; |
772 | __le16 stop_resume_tid_msk; /* tid bitmask */ | 772 | __le16 stop_resume_tid_msk; /* tid bitmask */ |
773 | } __attribute__ ((packed)); | 773 | } __packed; |
774 | 774 | ||
775 | #define UMAC_MAX_NUM_PMKIDS 4 | 775 | #define UMAC_MAX_NUM_PMKIDS 4 |
776 | 776 | ||
@@ -779,7 +779,7 @@ struct iwm_umac_wifi_if { | |||
779 | u8 oid; | 779 | u8 oid; |
780 | u8 flags; | 780 | u8 flags; |
781 | __le16 buf_size; | 781 | __le16 buf_size; |
782 | } __attribute__ ((packed)); | 782 | } __packed; |
783 | 783 | ||
784 | #define IWM_SEQ_NUM_HOST_MSK 0x0000 | 784 | #define IWM_SEQ_NUM_HOST_MSK 0x0000 |
785 | #define IWM_SEQ_NUM_UMAC_MSK 0x4000 | 785 | #define IWM_SEQ_NUM_UMAC_MSK 0x4000 |
diff --git a/drivers/net/wireless/libertas/host.h b/drivers/net/wireless/libertas/host.h index 3809c0b49464..3bd5d3b6037a 100644 --- a/drivers/net/wireless/libertas/host.h +++ b/drivers/net/wireless/libertas/host.h | |||
@@ -326,7 +326,7 @@ struct txpd { | |||
326 | u8 pktdelay_2ms; | 326 | u8 pktdelay_2ms; |
327 | /* reserved */ | 327 | /* reserved */ |
328 | u8 reserved1; | 328 | u8 reserved1; |
329 | } __attribute__ ((packed)); | 329 | } __packed; |
330 | 330 | ||
331 | /* RxPD Descriptor */ | 331 | /* RxPD Descriptor */ |
332 | struct rxpd { | 332 | struct rxpd { |
@@ -339,8 +339,8 @@ struct rxpd { | |||
339 | u8 bss_type; | 339 | u8 bss_type; |
340 | /* BSS number */ | 340 | /* BSS number */ |
341 | u8 bss_num; | 341 | u8 bss_num; |
342 | } __attribute__ ((packed)) bss; | 342 | } __packed bss; |
343 | } __attribute__ ((packed)) u; | 343 | } __packed u; |
344 | 344 | ||
345 | /* SNR */ | 345 | /* SNR */ |
346 | u8 snr; | 346 | u8 snr; |
@@ -366,14 +366,14 @@ struct rxpd { | |||
366 | /* Pkt Priority */ | 366 | /* Pkt Priority */ |
367 | u8 priority; | 367 | u8 priority; |
368 | u8 reserved[3]; | 368 | u8 reserved[3]; |
369 | } __attribute__ ((packed)); | 369 | } __packed; |
370 | 370 | ||
371 | struct cmd_header { | 371 | struct cmd_header { |
372 | __le16 command; | 372 | __le16 command; |
373 | __le16 size; | 373 | __le16 size; |
374 | __le16 seqnum; | 374 | __le16 seqnum; |
375 | __le16 result; | 375 | __le16 result; |
376 | } __attribute__ ((packed)); | 376 | } __packed; |
377 | 377 | ||
378 | /* Generic structure to hold all key types. */ | 378 | /* Generic structure to hold all key types. */ |
379 | struct enc_key { | 379 | struct enc_key { |
@@ -387,7 +387,7 @@ struct enc_key { | |||
387 | struct lbs_offset_value { | 387 | struct lbs_offset_value { |
388 | u32 offset; | 388 | u32 offset; |
389 | u32 value; | 389 | u32 value; |
390 | } __attribute__ ((packed)); | 390 | } __packed; |
391 | 391 | ||
392 | /* | 392 | /* |
393 | * Define data structure for CMD_GET_HW_SPEC | 393 | * Define data structure for CMD_GET_HW_SPEC |
@@ -426,7 +426,7 @@ struct cmd_ds_get_hw_spec { | |||
426 | 426 | ||
427 | /*FW/HW capability */ | 427 | /*FW/HW capability */ |
428 | __le32 fwcapinfo; | 428 | __le32 fwcapinfo; |
429 | } __attribute__ ((packed)); | 429 | } __packed; |
430 | 430 | ||
431 | struct cmd_ds_802_11_subscribe_event { | 431 | struct cmd_ds_802_11_subscribe_event { |
432 | struct cmd_header hdr; | 432 | struct cmd_header hdr; |
@@ -440,7 +440,7 @@ struct cmd_ds_802_11_subscribe_event { | |||
440 | * bump this up a bit. | 440 | * bump this up a bit. |
441 | */ | 441 | */ |
442 | uint8_t tlv[128]; | 442 | uint8_t tlv[128]; |
443 | } __attribute__ ((packed)); | 443 | } __packed; |
444 | 444 | ||
445 | /* | 445 | /* |
446 | * This scan handle Country Information IE(802.11d compliant) | 446 | * This scan handle Country Information IE(802.11d compliant) |
@@ -452,7 +452,7 @@ struct cmd_ds_802_11_scan { | |||
452 | uint8_t bsstype; | 452 | uint8_t bsstype; |
453 | uint8_t bssid[ETH_ALEN]; | 453 | uint8_t bssid[ETH_ALEN]; |
454 | uint8_t tlvbuffer[0]; | 454 | uint8_t tlvbuffer[0]; |
455 | } __attribute__ ((packed)); | 455 | } __packed; |
456 | 456 | ||
457 | struct cmd_ds_802_11_scan_rsp { | 457 | struct cmd_ds_802_11_scan_rsp { |
458 | struct cmd_header hdr; | 458 | struct cmd_header hdr; |
@@ -460,7 +460,7 @@ struct cmd_ds_802_11_scan_rsp { | |||
460 | __le16 bssdescriptsize; | 460 | __le16 bssdescriptsize; |
461 | uint8_t nr_sets; | 461 | uint8_t nr_sets; |
462 | uint8_t bssdesc_and_tlvbuffer[0]; | 462 | uint8_t bssdesc_and_tlvbuffer[0]; |
463 | } __attribute__ ((packed)); | 463 | } __packed; |
464 | 464 | ||
465 | struct cmd_ds_802_11_get_log { | 465 | struct cmd_ds_802_11_get_log { |
466 | struct cmd_header hdr; | 466 | struct cmd_header hdr; |
@@ -478,20 +478,20 @@ struct cmd_ds_802_11_get_log { | |||
478 | __le32 fcserror; | 478 | __le32 fcserror; |
479 | __le32 txframe; | 479 | __le32 txframe; |
480 | __le32 wepundecryptable; | 480 | __le32 wepundecryptable; |
481 | } __attribute__ ((packed)); | 481 | } __packed; |
482 | 482 | ||
483 | struct cmd_ds_mac_control { | 483 | struct cmd_ds_mac_control { |
484 | struct cmd_header hdr; | 484 | struct cmd_header hdr; |
485 | __le16 action; | 485 | __le16 action; |
486 | u16 reserved; | 486 | u16 reserved; |
487 | } __attribute__ ((packed)); | 487 | } __packed; |
488 | 488 | ||
489 | struct cmd_ds_mac_multicast_adr { | 489 | struct cmd_ds_mac_multicast_adr { |
490 | struct cmd_header hdr; | 490 | struct cmd_header hdr; |
491 | __le16 action; | 491 | __le16 action; |
492 | __le16 nr_of_adrs; | 492 | __le16 nr_of_adrs; |
493 | u8 maclist[ETH_ALEN * MRVDRV_MAX_MULTICAST_LIST_SIZE]; | 493 | u8 maclist[ETH_ALEN * MRVDRV_MAX_MULTICAST_LIST_SIZE]; |
494 | } __attribute__ ((packed)); | 494 | } __packed; |
495 | 495 | ||
496 | struct cmd_ds_802_11_authenticate { | 496 | struct cmd_ds_802_11_authenticate { |
497 | struct cmd_header hdr; | 497 | struct cmd_header hdr; |
@@ -499,14 +499,14 @@ struct cmd_ds_802_11_authenticate { | |||
499 | u8 bssid[ETH_ALEN]; | 499 | u8 bssid[ETH_ALEN]; |
500 | u8 authtype; | 500 | u8 authtype; |
501 | u8 reserved[10]; | 501 | u8 reserved[10]; |
502 | } __attribute__ ((packed)); | 502 | } __packed; |
503 | 503 | ||
504 | struct cmd_ds_802_11_deauthenticate { | 504 | struct cmd_ds_802_11_deauthenticate { |
505 | struct cmd_header hdr; | 505 | struct cmd_header hdr; |
506 | 506 | ||
507 | u8 macaddr[ETH_ALEN]; | 507 | u8 macaddr[ETH_ALEN]; |
508 | __le16 reasoncode; | 508 | __le16 reasoncode; |
509 | } __attribute__ ((packed)); | 509 | } __packed; |
510 | 510 | ||
511 | struct cmd_ds_802_11_associate { | 511 | struct cmd_ds_802_11_associate { |
512 | struct cmd_header hdr; | 512 | struct cmd_header hdr; |
@@ -517,7 +517,7 @@ struct cmd_ds_802_11_associate { | |||
517 | __le16 bcnperiod; | 517 | __le16 bcnperiod; |
518 | u8 dtimperiod; | 518 | u8 dtimperiod; |
519 | u8 iebuf[512]; /* Enough for required and most optional IEs */ | 519 | u8 iebuf[512]; /* Enough for required and most optional IEs */ |
520 | } __attribute__ ((packed)); | 520 | } __packed; |
521 | 521 | ||
522 | struct cmd_ds_802_11_associate_response { | 522 | struct cmd_ds_802_11_associate_response { |
523 | struct cmd_header hdr; | 523 | struct cmd_header hdr; |
@@ -526,7 +526,7 @@ struct cmd_ds_802_11_associate_response { | |||
526 | __le16 statuscode; | 526 | __le16 statuscode; |
527 | __le16 aid; | 527 | __le16 aid; |
528 | u8 iebuf[512]; | 528 | u8 iebuf[512]; |
529 | } __attribute__ ((packed)); | 529 | } __packed; |
530 | 530 | ||
531 | struct cmd_ds_802_11_set_wep { | 531 | struct cmd_ds_802_11_set_wep { |
532 | struct cmd_header hdr; | 532 | struct cmd_header hdr; |
@@ -540,7 +540,7 @@ struct cmd_ds_802_11_set_wep { | |||
540 | /* 40, 128bit or TXWEP */ | 540 | /* 40, 128bit or TXWEP */ |
541 | uint8_t keytype[4]; | 541 | uint8_t keytype[4]; |
542 | uint8_t keymaterial[4][16]; | 542 | uint8_t keymaterial[4][16]; |
543 | } __attribute__ ((packed)); | 543 | } __packed; |
544 | 544 | ||
545 | struct cmd_ds_802_11_snmp_mib { | 545 | struct cmd_ds_802_11_snmp_mib { |
546 | struct cmd_header hdr; | 546 | struct cmd_header hdr; |
@@ -549,40 +549,40 @@ struct cmd_ds_802_11_snmp_mib { | |||
549 | __le16 oid; | 549 | __le16 oid; |
550 | __le16 bufsize; | 550 | __le16 bufsize; |
551 | u8 value[128]; | 551 | u8 value[128]; |
552 | } __attribute__ ((packed)); | 552 | } __packed; |
553 | 553 | ||
554 | struct cmd_ds_mac_reg_access { | 554 | struct cmd_ds_mac_reg_access { |
555 | __le16 action; | 555 | __le16 action; |
556 | __le16 offset; | 556 | __le16 offset; |
557 | __le32 value; | 557 | __le32 value; |
558 | } __attribute__ ((packed)); | 558 | } __packed; |
559 | 559 | ||
560 | struct cmd_ds_bbp_reg_access { | 560 | struct cmd_ds_bbp_reg_access { |
561 | __le16 action; | 561 | __le16 action; |
562 | __le16 offset; | 562 | __le16 offset; |
563 | u8 value; | 563 | u8 value; |
564 | u8 reserved[3]; | 564 | u8 reserved[3]; |
565 | } __attribute__ ((packed)); | 565 | } __packed; |
566 | 566 | ||
567 | struct cmd_ds_rf_reg_access { | 567 | struct cmd_ds_rf_reg_access { |
568 | __le16 action; | 568 | __le16 action; |
569 | __le16 offset; | 569 | __le16 offset; |
570 | u8 value; | 570 | u8 value; |
571 | u8 reserved[3]; | 571 | u8 reserved[3]; |
572 | } __attribute__ ((packed)); | 572 | } __packed; |
573 | 573 | ||
574 | struct cmd_ds_802_11_radio_control { | 574 | struct cmd_ds_802_11_radio_control { |
575 | struct cmd_header hdr; | 575 | struct cmd_header hdr; |
576 | 576 | ||
577 | __le16 action; | 577 | __le16 action; |
578 | __le16 control; | 578 | __le16 control; |
579 | } __attribute__ ((packed)); | 579 | } __packed; |
580 | 580 | ||
581 | struct cmd_ds_802_11_beacon_control { | 581 | struct cmd_ds_802_11_beacon_control { |
582 | __le16 action; | 582 | __le16 action; |
583 | __le16 beacon_enable; | 583 | __le16 beacon_enable; |
584 | __le16 beacon_period; | 584 | __le16 beacon_period; |
585 | } __attribute__ ((packed)); | 585 | } __packed; |
586 | 586 | ||
587 | struct cmd_ds_802_11_sleep_params { | 587 | struct cmd_ds_802_11_sleep_params { |
588 | struct cmd_header hdr; | 588 | struct cmd_header hdr; |
@@ -607,7 +607,7 @@ struct cmd_ds_802_11_sleep_params { | |||
607 | 607 | ||
608 | /* reserved field, should be set to zero */ | 608 | /* reserved field, should be set to zero */ |
609 | __le16 reserved; | 609 | __le16 reserved; |
610 | } __attribute__ ((packed)); | 610 | } __packed; |
611 | 611 | ||
612 | struct cmd_ds_802_11_rf_channel { | 612 | struct cmd_ds_802_11_rf_channel { |
613 | struct cmd_header hdr; | 613 | struct cmd_header hdr; |
@@ -617,7 +617,7 @@ struct cmd_ds_802_11_rf_channel { | |||
617 | __le16 rftype; /* unused */ | 617 | __le16 rftype; /* unused */ |
618 | __le16 reserved; /* unused */ | 618 | __le16 reserved; /* unused */ |
619 | u8 channellist[32]; /* unused */ | 619 | u8 channellist[32]; /* unused */ |
620 | } __attribute__ ((packed)); | 620 | } __packed; |
621 | 621 | ||
622 | struct cmd_ds_802_11_rssi { | 622 | struct cmd_ds_802_11_rssi { |
623 | /* weighting factor */ | 623 | /* weighting factor */ |
@@ -626,21 +626,21 @@ struct cmd_ds_802_11_rssi { | |||
626 | __le16 reserved_0; | 626 | __le16 reserved_0; |
627 | __le16 reserved_1; | 627 | __le16 reserved_1; |
628 | __le16 reserved_2; | 628 | __le16 reserved_2; |
629 | } __attribute__ ((packed)); | 629 | } __packed; |
630 | 630 | ||
631 | struct cmd_ds_802_11_rssi_rsp { | 631 | struct cmd_ds_802_11_rssi_rsp { |
632 | __le16 SNR; | 632 | __le16 SNR; |
633 | __le16 noisefloor; | 633 | __le16 noisefloor; |
634 | __le16 avgSNR; | 634 | __le16 avgSNR; |
635 | __le16 avgnoisefloor; | 635 | __le16 avgnoisefloor; |
636 | } __attribute__ ((packed)); | 636 | } __packed; |
637 | 637 | ||
638 | struct cmd_ds_802_11_mac_address { | 638 | struct cmd_ds_802_11_mac_address { |
639 | struct cmd_header hdr; | 639 | struct cmd_header hdr; |
640 | 640 | ||
641 | __le16 action; | 641 | __le16 action; |
642 | u8 macadd[ETH_ALEN]; | 642 | u8 macadd[ETH_ALEN]; |
643 | } __attribute__ ((packed)); | 643 | } __packed; |
644 | 644 | ||
645 | struct cmd_ds_802_11_rf_tx_power { | 645 | struct cmd_ds_802_11_rf_tx_power { |
646 | struct cmd_header hdr; | 646 | struct cmd_header hdr; |
@@ -649,26 +649,26 @@ struct cmd_ds_802_11_rf_tx_power { | |||
649 | __le16 curlevel; | 649 | __le16 curlevel; |
650 | s8 maxlevel; | 650 | s8 maxlevel; |
651 | s8 minlevel; | 651 | s8 minlevel; |
652 | } __attribute__ ((packed)); | 652 | } __packed; |
653 | 653 | ||
654 | struct cmd_ds_802_11_monitor_mode { | 654 | struct cmd_ds_802_11_monitor_mode { |
655 | __le16 action; | 655 | __le16 action; |
656 | __le16 mode; | 656 | __le16 mode; |
657 | } __attribute__ ((packed)); | 657 | } __packed; |
658 | 658 | ||
659 | struct cmd_ds_set_boot2_ver { | 659 | struct cmd_ds_set_boot2_ver { |
660 | struct cmd_header hdr; | 660 | struct cmd_header hdr; |
661 | 661 | ||
662 | __le16 action; | 662 | __le16 action; |
663 | __le16 version; | 663 | __le16 version; |
664 | } __attribute__ ((packed)); | 664 | } __packed; |
665 | 665 | ||
666 | struct cmd_ds_802_11_fw_wake_method { | 666 | struct cmd_ds_802_11_fw_wake_method { |
667 | struct cmd_header hdr; | 667 | struct cmd_header hdr; |
668 | 668 | ||
669 | __le16 action; | 669 | __le16 action; |
670 | __le16 method; | 670 | __le16 method; |
671 | } __attribute__ ((packed)); | 671 | } __packed; |
672 | 672 | ||
673 | struct cmd_ds_802_11_ps_mode { | 673 | struct cmd_ds_802_11_ps_mode { |
674 | __le16 action; | 674 | __le16 action; |
@@ -676,7 +676,7 @@ struct cmd_ds_802_11_ps_mode { | |||
676 | __le16 multipledtim; | 676 | __le16 multipledtim; |
677 | __le16 reserved; | 677 | __le16 reserved; |
678 | __le16 locallisteninterval; | 678 | __le16 locallisteninterval; |
679 | } __attribute__ ((packed)); | 679 | } __packed; |
680 | 680 | ||
681 | struct cmd_confirm_sleep { | 681 | struct cmd_confirm_sleep { |
682 | struct cmd_header hdr; | 682 | struct cmd_header hdr; |
@@ -686,7 +686,7 @@ struct cmd_confirm_sleep { | |||
686 | __le16 multipledtim; | 686 | __le16 multipledtim; |
687 | __le16 reserved; | 687 | __le16 reserved; |
688 | __le16 locallisteninterval; | 688 | __le16 locallisteninterval; |
689 | } __attribute__ ((packed)); | 689 | } __packed; |
690 | 690 | ||
691 | struct cmd_ds_802_11_data_rate { | 691 | struct cmd_ds_802_11_data_rate { |
692 | struct cmd_header hdr; | 692 | struct cmd_header hdr; |
@@ -694,14 +694,14 @@ struct cmd_ds_802_11_data_rate { | |||
694 | __le16 action; | 694 | __le16 action; |
695 | __le16 reserved; | 695 | __le16 reserved; |
696 | u8 rates[MAX_RATES]; | 696 | u8 rates[MAX_RATES]; |
697 | } __attribute__ ((packed)); | 697 | } __packed; |
698 | 698 | ||
699 | struct cmd_ds_802_11_rate_adapt_rateset { | 699 | struct cmd_ds_802_11_rate_adapt_rateset { |
700 | struct cmd_header hdr; | 700 | struct cmd_header hdr; |
701 | __le16 action; | 701 | __le16 action; |
702 | __le16 enablehwauto; | 702 | __le16 enablehwauto; |
703 | __le16 bitmap; | 703 | __le16 bitmap; |
704 | } __attribute__ ((packed)); | 704 | } __packed; |
705 | 705 | ||
706 | struct cmd_ds_802_11_ad_hoc_start { | 706 | struct cmd_ds_802_11_ad_hoc_start { |
707 | struct cmd_header hdr; | 707 | struct cmd_header hdr; |
@@ -718,14 +718,14 @@ struct cmd_ds_802_11_ad_hoc_start { | |||
718 | __le16 capability; | 718 | __le16 capability; |
719 | u8 rates[MAX_RATES]; | 719 | u8 rates[MAX_RATES]; |
720 | u8 tlv_memory_size_pad[100]; | 720 | u8 tlv_memory_size_pad[100]; |
721 | } __attribute__ ((packed)); | 721 | } __packed; |
722 | 722 | ||
723 | struct cmd_ds_802_11_ad_hoc_result { | 723 | struct cmd_ds_802_11_ad_hoc_result { |
724 | struct cmd_header hdr; | 724 | struct cmd_header hdr; |
725 | 725 | ||
726 | u8 pad[3]; | 726 | u8 pad[3]; |
727 | u8 bssid[ETH_ALEN]; | 727 | u8 bssid[ETH_ALEN]; |
728 | } __attribute__ ((packed)); | 728 | } __packed; |
729 | 729 | ||
730 | struct adhoc_bssdesc { | 730 | struct adhoc_bssdesc { |
731 | u8 bssid[ETH_ALEN]; | 731 | u8 bssid[ETH_ALEN]; |
@@ -746,7 +746,7 @@ struct adhoc_bssdesc { | |||
746 | * Adhoc join command and will cause a binary layout mismatch with | 746 | * Adhoc join command and will cause a binary layout mismatch with |
747 | * the firmware | 747 | * the firmware |
748 | */ | 748 | */ |
749 | } __attribute__ ((packed)); | 749 | } __packed; |
750 | 750 | ||
751 | struct cmd_ds_802_11_ad_hoc_join { | 751 | struct cmd_ds_802_11_ad_hoc_join { |
752 | struct cmd_header hdr; | 752 | struct cmd_header hdr; |
@@ -754,18 +754,18 @@ struct cmd_ds_802_11_ad_hoc_join { | |||
754 | struct adhoc_bssdesc bss; | 754 | struct adhoc_bssdesc bss; |
755 | __le16 failtimeout; /* Reserved on v9 and later */ | 755 | __le16 failtimeout; /* Reserved on v9 and later */ |
756 | __le16 probedelay; /* Reserved on v9 and later */ | 756 | __le16 probedelay; /* Reserved on v9 and later */ |
757 | } __attribute__ ((packed)); | 757 | } __packed; |
758 | 758 | ||
759 | struct cmd_ds_802_11_ad_hoc_stop { | 759 | struct cmd_ds_802_11_ad_hoc_stop { |
760 | struct cmd_header hdr; | 760 | struct cmd_header hdr; |
761 | } __attribute__ ((packed)); | 761 | } __packed; |
762 | 762 | ||
763 | struct cmd_ds_802_11_enable_rsn { | 763 | struct cmd_ds_802_11_enable_rsn { |
764 | struct cmd_header hdr; | 764 | struct cmd_header hdr; |
765 | 765 | ||
766 | __le16 action; | 766 | __le16 action; |
767 | __le16 enable; | 767 | __le16 enable; |
768 | } __attribute__ ((packed)); | 768 | } __packed; |
769 | 769 | ||
770 | struct MrvlIEtype_keyParamSet { | 770 | struct MrvlIEtype_keyParamSet { |
771 | /* type ID */ | 771 | /* type ID */ |
@@ -785,7 +785,7 @@ struct MrvlIEtype_keyParamSet { | |||
785 | 785 | ||
786 | /* key material of size keylen */ | 786 | /* key material of size keylen */ |
787 | u8 key[32]; | 787 | u8 key[32]; |
788 | } __attribute__ ((packed)); | 788 | } __packed; |
789 | 789 | ||
790 | #define MAX_WOL_RULES 16 | 790 | #define MAX_WOL_RULES 16 |
791 | 791 | ||
@@ -797,7 +797,7 @@ struct host_wol_rule { | |||
797 | __le16 reserve; | 797 | __le16 reserve; |
798 | __be32 sig_mask; | 798 | __be32 sig_mask; |
799 | __be32 signature; | 799 | __be32 signature; |
800 | } __attribute__ ((packed)); | 800 | } __packed; |
801 | 801 | ||
802 | struct wol_config { | 802 | struct wol_config { |
803 | uint8_t action; | 803 | uint8_t action; |
@@ -805,7 +805,7 @@ struct wol_config { | |||
805 | uint8_t no_rules_in_cmd; | 805 | uint8_t no_rules_in_cmd; |
806 | uint8_t result; | 806 | uint8_t result; |
807 | struct host_wol_rule rule[MAX_WOL_RULES]; | 807 | struct host_wol_rule rule[MAX_WOL_RULES]; |
808 | } __attribute__ ((packed)); | 808 | } __packed; |
809 | 809 | ||
810 | struct cmd_ds_host_sleep { | 810 | struct cmd_ds_host_sleep { |
811 | struct cmd_header hdr; | 811 | struct cmd_header hdr; |
@@ -813,7 +813,7 @@ struct cmd_ds_host_sleep { | |||
813 | uint8_t gpio; | 813 | uint8_t gpio; |
814 | uint16_t gap; | 814 | uint16_t gap; |
815 | struct wol_config wol_conf; | 815 | struct wol_config wol_conf; |
816 | } __attribute__ ((packed)); | 816 | } __packed; |
817 | 817 | ||
818 | 818 | ||
819 | 819 | ||
@@ -822,7 +822,7 @@ struct cmd_ds_802_11_key_material { | |||
822 | 822 | ||
823 | __le16 action; | 823 | __le16 action; |
824 | struct MrvlIEtype_keyParamSet keyParamSet[2]; | 824 | struct MrvlIEtype_keyParamSet keyParamSet[2]; |
825 | } __attribute__ ((packed)); | 825 | } __packed; |
826 | 826 | ||
827 | struct cmd_ds_802_11_eeprom_access { | 827 | struct cmd_ds_802_11_eeprom_access { |
828 | struct cmd_header hdr; | 828 | struct cmd_header hdr; |
@@ -832,7 +832,7 @@ struct cmd_ds_802_11_eeprom_access { | |||
832 | /* firmware says it returns a maximum of 20 bytes */ | 832 | /* firmware says it returns a maximum of 20 bytes */ |
833 | #define LBS_EEPROM_READ_LEN 20 | 833 | #define LBS_EEPROM_READ_LEN 20 |
834 | u8 value[LBS_EEPROM_READ_LEN]; | 834 | u8 value[LBS_EEPROM_READ_LEN]; |
835 | } __attribute__ ((packed)); | 835 | } __packed; |
836 | 836 | ||
837 | struct cmd_ds_802_11_tpc_cfg { | 837 | struct cmd_ds_802_11_tpc_cfg { |
838 | struct cmd_header hdr; | 838 | struct cmd_header hdr; |
@@ -843,7 +843,7 @@ struct cmd_ds_802_11_tpc_cfg { | |||
843 | int8_t P1; | 843 | int8_t P1; |
844 | int8_t P2; | 844 | int8_t P2; |
845 | uint8_t usesnr; | 845 | uint8_t usesnr; |
846 | } __attribute__ ((packed)); | 846 | } __packed; |
847 | 847 | ||
848 | 848 | ||
849 | struct cmd_ds_802_11_pa_cfg { | 849 | struct cmd_ds_802_11_pa_cfg { |
@@ -854,14 +854,14 @@ struct cmd_ds_802_11_pa_cfg { | |||
854 | int8_t P0; | 854 | int8_t P0; |
855 | int8_t P1; | 855 | int8_t P1; |
856 | int8_t P2; | 856 | int8_t P2; |
857 | } __attribute__ ((packed)); | 857 | } __packed; |
858 | 858 | ||
859 | 859 | ||
860 | struct cmd_ds_802_11_led_ctrl { | 860 | struct cmd_ds_802_11_led_ctrl { |
861 | __le16 action; | 861 | __le16 action; |
862 | __le16 numled; | 862 | __le16 numled; |
863 | u8 data[256]; | 863 | u8 data[256]; |
864 | } __attribute__ ((packed)); | 864 | } __packed; |
865 | 865 | ||
866 | struct cmd_ds_802_11_afc { | 866 | struct cmd_ds_802_11_afc { |
867 | __le16 afc_auto; | 867 | __le16 afc_auto; |
@@ -875,22 +875,22 @@ struct cmd_ds_802_11_afc { | |||
875 | __le16 carrier_offset; /* signed */ | 875 | __le16 carrier_offset; /* signed */ |
876 | }; | 876 | }; |
877 | }; | 877 | }; |
878 | } __attribute__ ((packed)); | 878 | } __packed; |
879 | 879 | ||
880 | struct cmd_tx_rate_query { | 880 | struct cmd_tx_rate_query { |
881 | __le16 txrate; | 881 | __le16 txrate; |
882 | } __attribute__ ((packed)); | 882 | } __packed; |
883 | 883 | ||
884 | struct cmd_ds_get_tsf { | 884 | struct cmd_ds_get_tsf { |
885 | __le64 tsfvalue; | 885 | __le64 tsfvalue; |
886 | } __attribute__ ((packed)); | 886 | } __packed; |
887 | 887 | ||
888 | struct cmd_ds_bt_access { | 888 | struct cmd_ds_bt_access { |
889 | __le16 action; | 889 | __le16 action; |
890 | __le32 id; | 890 | __le32 id; |
891 | u8 addr1[ETH_ALEN]; | 891 | u8 addr1[ETH_ALEN]; |
892 | u8 addr2[ETH_ALEN]; | 892 | u8 addr2[ETH_ALEN]; |
893 | } __attribute__ ((packed)); | 893 | } __packed; |
894 | 894 | ||
895 | struct cmd_ds_fwt_access { | 895 | struct cmd_ds_fwt_access { |
896 | __le16 action; | 896 | __le16 action; |
@@ -910,7 +910,7 @@ struct cmd_ds_fwt_access { | |||
910 | __le32 snr; | 910 | __le32 snr; |
911 | __le32 references; | 911 | __le32 references; |
912 | u8 prec[ETH_ALEN]; | 912 | u8 prec[ETH_ALEN]; |
913 | } __attribute__ ((packed)); | 913 | } __packed; |
914 | 914 | ||
915 | struct cmd_ds_mesh_config { | 915 | struct cmd_ds_mesh_config { |
916 | struct cmd_header hdr; | 916 | struct cmd_header hdr; |
@@ -920,14 +920,14 @@ struct cmd_ds_mesh_config { | |||
920 | __le16 type; | 920 | __le16 type; |
921 | __le16 length; | 921 | __le16 length; |
922 | u8 data[128]; /* last position reserved */ | 922 | u8 data[128]; /* last position reserved */ |
923 | } __attribute__ ((packed)); | 923 | } __packed; |
924 | 924 | ||
925 | struct cmd_ds_mesh_access { | 925 | struct cmd_ds_mesh_access { |
926 | struct cmd_header hdr; | 926 | struct cmd_header hdr; |
927 | 927 | ||
928 | __le16 action; | 928 | __le16 action; |
929 | __le32 data[32]; /* last position reserved */ | 929 | __le32 data[32]; /* last position reserved */ |
930 | } __attribute__ ((packed)); | 930 | } __packed; |
931 | 931 | ||
932 | /* Number of stats counters returned by the firmware */ | 932 | /* Number of stats counters returned by the firmware */ |
933 | #define MESH_STATS_NUM 8 | 933 | #define MESH_STATS_NUM 8 |
@@ -957,6 +957,6 @@ struct cmd_ds_command { | |||
957 | struct cmd_ds_fwt_access fwt; | 957 | struct cmd_ds_fwt_access fwt; |
958 | struct cmd_ds_802_11_beacon_control bcn_ctrl; | 958 | struct cmd_ds_802_11_beacon_control bcn_ctrl; |
959 | } params; | 959 | } params; |
960 | } __attribute__ ((packed)); | 960 | } __packed; |
961 | 961 | ||
962 | #endif | 962 | #endif |
diff --git a/drivers/net/wireless/libertas/radiotap.h b/drivers/net/wireless/libertas/radiotap.h index d16b26416e82..b3c8ea6d610e 100644 --- a/drivers/net/wireless/libertas/radiotap.h +++ b/drivers/net/wireless/libertas/radiotap.h | |||
@@ -6,7 +6,7 @@ struct tx_radiotap_hdr { | |||
6 | u8 txpower; | 6 | u8 txpower; |
7 | u8 rts_retries; | 7 | u8 rts_retries; |
8 | u8 data_retries; | 8 | u8 data_retries; |
9 | } __attribute__ ((packed)); | 9 | } __packed; |
10 | 10 | ||
11 | #define TX_RADIOTAP_PRESENT ( \ | 11 | #define TX_RADIOTAP_PRESENT ( \ |
12 | (1 << IEEE80211_RADIOTAP_RATE) | \ | 12 | (1 << IEEE80211_RADIOTAP_RATE) | \ |
@@ -34,7 +34,7 @@ struct rx_radiotap_hdr { | |||
34 | u8 flags; | 34 | u8 flags; |
35 | u8 rate; | 35 | u8 rate; |
36 | u8 antsignal; | 36 | u8 antsignal; |
37 | } __attribute__ ((packed)); | 37 | } __packed; |
38 | 38 | ||
39 | #define RX_RADIOTAP_PRESENT ( \ | 39 | #define RX_RADIOTAP_PRESENT ( \ |
40 | (1 << IEEE80211_RADIOTAP_FLAGS) | \ | 40 | (1 << IEEE80211_RADIOTAP_FLAGS) | \ |
diff --git a/drivers/net/wireless/libertas/rx.c b/drivers/net/wireless/libertas/rx.c index 7a377f5b7662..1c63f8ce7349 100644 --- a/drivers/net/wireless/libertas/rx.c +++ b/drivers/net/wireless/libertas/rx.c | |||
@@ -15,7 +15,7 @@ struct eth803hdr { | |||
15 | u8 dest_addr[6]; | 15 | u8 dest_addr[6]; |
16 | u8 src_addr[6]; | 16 | u8 src_addr[6]; |
17 | u16 h803_len; | 17 | u16 h803_len; |
18 | } __attribute__ ((packed)); | 18 | } __packed; |
19 | 19 | ||
20 | struct rfc1042hdr { | 20 | struct rfc1042hdr { |
21 | u8 llc_dsap; | 21 | u8 llc_dsap; |
@@ -23,17 +23,17 @@ struct rfc1042hdr { | |||
23 | u8 llc_ctrl; | 23 | u8 llc_ctrl; |
24 | u8 snap_oui[3]; | 24 | u8 snap_oui[3]; |
25 | u16 snap_type; | 25 | u16 snap_type; |
26 | } __attribute__ ((packed)); | 26 | } __packed; |
27 | 27 | ||
28 | struct rxpackethdr { | 28 | struct rxpackethdr { |
29 | struct eth803hdr eth803_hdr; | 29 | struct eth803hdr eth803_hdr; |
30 | struct rfc1042hdr rfc1042_hdr; | 30 | struct rfc1042hdr rfc1042_hdr; |
31 | } __attribute__ ((packed)); | 31 | } __packed; |
32 | 32 | ||
33 | struct rx80211packethdr { | 33 | struct rx80211packethdr { |
34 | struct rxpd rx_pd; | 34 | struct rxpd rx_pd; |
35 | void *eth80211_hdr; | 35 | void *eth80211_hdr; |
36 | } __attribute__ ((packed)); | 36 | } __packed; |
37 | 37 | ||
38 | static int process_rxed_802_11_packet(struct lbs_private *priv, | 38 | static int process_rxed_802_11_packet(struct lbs_private *priv, |
39 | struct sk_buff *skb); | 39 | struct sk_buff *skb); |
diff --git a/drivers/net/wireless/libertas/types.h b/drivers/net/wireless/libertas/types.h index 3e72c86ceca8..462fbb4cb743 100644 --- a/drivers/net/wireless/libertas/types.h +++ b/drivers/net/wireless/libertas/types.h | |||
@@ -11,7 +11,7 @@ | |||
11 | struct ieee_ie_header { | 11 | struct ieee_ie_header { |
12 | u8 id; | 12 | u8 id; |
13 | u8 len; | 13 | u8 len; |
14 | } __attribute__ ((packed)); | 14 | } __packed; |
15 | 15 | ||
16 | struct ieee_ie_cf_param_set { | 16 | struct ieee_ie_cf_param_set { |
17 | struct ieee_ie_header header; | 17 | struct ieee_ie_header header; |
@@ -20,19 +20,19 @@ struct ieee_ie_cf_param_set { | |||
20 | u8 cfpperiod; | 20 | u8 cfpperiod; |
21 | __le16 cfpmaxduration; | 21 | __le16 cfpmaxduration; |
22 | __le16 cfpdurationremaining; | 22 | __le16 cfpdurationremaining; |
23 | } __attribute__ ((packed)); | 23 | } __packed; |
24 | 24 | ||
25 | 25 | ||
26 | struct ieee_ie_ibss_param_set { | 26 | struct ieee_ie_ibss_param_set { |
27 | struct ieee_ie_header header; | 27 | struct ieee_ie_header header; |
28 | 28 | ||
29 | __le16 atimwindow; | 29 | __le16 atimwindow; |
30 | } __attribute__ ((packed)); | 30 | } __packed; |
31 | 31 | ||
32 | union ieee_ss_param_set { | 32 | union ieee_ss_param_set { |
33 | struct ieee_ie_cf_param_set cf; | 33 | struct ieee_ie_cf_param_set cf; |
34 | struct ieee_ie_ibss_param_set ibss; | 34 | struct ieee_ie_ibss_param_set ibss; |
35 | } __attribute__ ((packed)); | 35 | } __packed; |
36 | 36 | ||
37 | struct ieee_ie_fh_param_set { | 37 | struct ieee_ie_fh_param_set { |
38 | struct ieee_ie_header header; | 38 | struct ieee_ie_header header; |
@@ -41,18 +41,18 @@ struct ieee_ie_fh_param_set { | |||
41 | u8 hopset; | 41 | u8 hopset; |
42 | u8 hoppattern; | 42 | u8 hoppattern; |
43 | u8 hopindex; | 43 | u8 hopindex; |
44 | } __attribute__ ((packed)); | 44 | } __packed; |
45 | 45 | ||
46 | struct ieee_ie_ds_param_set { | 46 | struct ieee_ie_ds_param_set { |
47 | struct ieee_ie_header header; | 47 | struct ieee_ie_header header; |
48 | 48 | ||
49 | u8 channel; | 49 | u8 channel; |
50 | } __attribute__ ((packed)); | 50 | } __packed; |
51 | 51 | ||
52 | union ieee_phy_param_set { | 52 | union ieee_phy_param_set { |
53 | struct ieee_ie_fh_param_set fh; | 53 | struct ieee_ie_fh_param_set fh; |
54 | struct ieee_ie_ds_param_set ds; | 54 | struct ieee_ie_ds_param_set ds; |
55 | } __attribute__ ((packed)); | 55 | } __packed; |
56 | 56 | ||
57 | /** TLV type ID definition */ | 57 | /** TLV type ID definition */ |
58 | #define PROPRIETARY_TLV_BASE_ID 0x0100 | 58 | #define PROPRIETARY_TLV_BASE_ID 0x0100 |
@@ -100,28 +100,28 @@ union ieee_phy_param_set { | |||
100 | struct mrvl_ie_header { | 100 | struct mrvl_ie_header { |
101 | __le16 type; | 101 | __le16 type; |
102 | __le16 len; | 102 | __le16 len; |
103 | } __attribute__ ((packed)); | 103 | } __packed; |
104 | 104 | ||
105 | struct mrvl_ie_data { | 105 | struct mrvl_ie_data { |
106 | struct mrvl_ie_header header; | 106 | struct mrvl_ie_header header; |
107 | u8 Data[1]; | 107 | u8 Data[1]; |
108 | } __attribute__ ((packed)); | 108 | } __packed; |
109 | 109 | ||
110 | struct mrvl_ie_rates_param_set { | 110 | struct mrvl_ie_rates_param_set { |
111 | struct mrvl_ie_header header; | 111 | struct mrvl_ie_header header; |
112 | u8 rates[1]; | 112 | u8 rates[1]; |
113 | } __attribute__ ((packed)); | 113 | } __packed; |
114 | 114 | ||
115 | struct mrvl_ie_ssid_param_set { | 115 | struct mrvl_ie_ssid_param_set { |
116 | struct mrvl_ie_header header; | 116 | struct mrvl_ie_header header; |
117 | u8 ssid[1]; | 117 | u8 ssid[1]; |
118 | } __attribute__ ((packed)); | 118 | } __packed; |
119 | 119 | ||
120 | struct mrvl_ie_wildcard_ssid_param_set { | 120 | struct mrvl_ie_wildcard_ssid_param_set { |
121 | struct mrvl_ie_header header; | 121 | struct mrvl_ie_header header; |
122 | u8 MaxSsidlength; | 122 | u8 MaxSsidlength; |
123 | u8 ssid[1]; | 123 | u8 ssid[1]; |
124 | } __attribute__ ((packed)); | 124 | } __packed; |
125 | 125 | ||
126 | struct chanscanmode { | 126 | struct chanscanmode { |
127 | #ifdef __BIG_ENDIAN_BITFIELD | 127 | #ifdef __BIG_ENDIAN_BITFIELD |
@@ -133,7 +133,7 @@ struct chanscanmode { | |||
133 | u8 disablechanfilt:1; | 133 | u8 disablechanfilt:1; |
134 | u8 reserved_2_7:6; | 134 | u8 reserved_2_7:6; |
135 | #endif | 135 | #endif |
136 | } __attribute__ ((packed)); | 136 | } __packed; |
137 | 137 | ||
138 | struct chanscanparamset { | 138 | struct chanscanparamset { |
139 | u8 radiotype; | 139 | u8 radiotype; |
@@ -141,12 +141,12 @@ struct chanscanparamset { | |||
141 | struct chanscanmode chanscanmode; | 141 | struct chanscanmode chanscanmode; |
142 | __le16 minscantime; | 142 | __le16 minscantime; |
143 | __le16 maxscantime; | 143 | __le16 maxscantime; |
144 | } __attribute__ ((packed)); | 144 | } __packed; |
145 | 145 | ||
146 | struct mrvl_ie_chanlist_param_set { | 146 | struct mrvl_ie_chanlist_param_set { |
147 | struct mrvl_ie_header header; | 147 | struct mrvl_ie_header header; |
148 | struct chanscanparamset chanscanparam[1]; | 148 | struct chanscanparamset chanscanparam[1]; |
149 | } __attribute__ ((packed)); | 149 | } __packed; |
150 | 150 | ||
151 | struct mrvl_ie_cf_param_set { | 151 | struct mrvl_ie_cf_param_set { |
152 | struct mrvl_ie_header header; | 152 | struct mrvl_ie_header header; |
@@ -154,86 +154,86 @@ struct mrvl_ie_cf_param_set { | |||
154 | u8 cfpperiod; | 154 | u8 cfpperiod; |
155 | __le16 cfpmaxduration; | 155 | __le16 cfpmaxduration; |
156 | __le16 cfpdurationremaining; | 156 | __le16 cfpdurationremaining; |
157 | } __attribute__ ((packed)); | 157 | } __packed; |
158 | 158 | ||
159 | struct mrvl_ie_ds_param_set { | 159 | struct mrvl_ie_ds_param_set { |
160 | struct mrvl_ie_header header; | 160 | struct mrvl_ie_header header; |
161 | u8 channel; | 161 | u8 channel; |
162 | } __attribute__ ((packed)); | 162 | } __packed; |
163 | 163 | ||
164 | struct mrvl_ie_rsn_param_set { | 164 | struct mrvl_ie_rsn_param_set { |
165 | struct mrvl_ie_header header; | 165 | struct mrvl_ie_header header; |
166 | u8 rsnie[1]; | 166 | u8 rsnie[1]; |
167 | } __attribute__ ((packed)); | 167 | } __packed; |
168 | 168 | ||
169 | struct mrvl_ie_tsf_timestamp { | 169 | struct mrvl_ie_tsf_timestamp { |
170 | struct mrvl_ie_header header; | 170 | struct mrvl_ie_header header; |
171 | __le64 tsftable[1]; | 171 | __le64 tsftable[1]; |
172 | } __attribute__ ((packed)); | 172 | } __packed; |
173 | 173 | ||
174 | /* v9 and later firmware only */ | 174 | /* v9 and later firmware only */ |
175 | struct mrvl_ie_auth_type { | 175 | struct mrvl_ie_auth_type { |
176 | struct mrvl_ie_header header; | 176 | struct mrvl_ie_header header; |
177 | __le16 auth; | 177 | __le16 auth; |
178 | } __attribute__ ((packed)); | 178 | } __packed; |
179 | 179 | ||
180 | /** Local Power capability */ | 180 | /** Local Power capability */ |
181 | struct mrvl_ie_power_capability { | 181 | struct mrvl_ie_power_capability { |
182 | struct mrvl_ie_header header; | 182 | struct mrvl_ie_header header; |
183 | s8 minpower; | 183 | s8 minpower; |
184 | s8 maxpower; | 184 | s8 maxpower; |
185 | } __attribute__ ((packed)); | 185 | } __packed; |
186 | 186 | ||
187 | /* used in CMD_802_11_SUBSCRIBE_EVENT for SNR, RSSI and Failure */ | 187 | /* used in CMD_802_11_SUBSCRIBE_EVENT for SNR, RSSI and Failure */ |
188 | struct mrvl_ie_thresholds { | 188 | struct mrvl_ie_thresholds { |
189 | struct mrvl_ie_header header; | 189 | struct mrvl_ie_header header; |
190 | u8 value; | 190 | u8 value; |
191 | u8 freq; | 191 | u8 freq; |
192 | } __attribute__ ((packed)); | 192 | } __packed; |
193 | 193 | ||
194 | struct mrvl_ie_beacons_missed { | 194 | struct mrvl_ie_beacons_missed { |
195 | struct mrvl_ie_header header; | 195 | struct mrvl_ie_header header; |
196 | u8 beaconmissed; | 196 | u8 beaconmissed; |
197 | u8 reserved; | 197 | u8 reserved; |
198 | } __attribute__ ((packed)); | 198 | } __packed; |
199 | 199 | ||
200 | struct mrvl_ie_num_probes { | 200 | struct mrvl_ie_num_probes { |
201 | struct mrvl_ie_header header; | 201 | struct mrvl_ie_header header; |
202 | __le16 numprobes; | 202 | __le16 numprobes; |
203 | } __attribute__ ((packed)); | 203 | } __packed; |
204 | 204 | ||
205 | struct mrvl_ie_bcast_probe { | 205 | struct mrvl_ie_bcast_probe { |
206 | struct mrvl_ie_header header; | 206 | struct mrvl_ie_header header; |
207 | __le16 bcastprobe; | 207 | __le16 bcastprobe; |
208 | } __attribute__ ((packed)); | 208 | } __packed; |
209 | 209 | ||
210 | struct mrvl_ie_num_ssid_probe { | 210 | struct mrvl_ie_num_ssid_probe { |
211 | struct mrvl_ie_header header; | 211 | struct mrvl_ie_header header; |
212 | __le16 numssidprobe; | 212 | __le16 numssidprobe; |
213 | } __attribute__ ((packed)); | 213 | } __packed; |
214 | 214 | ||
215 | struct led_pin { | 215 | struct led_pin { |
216 | u8 led; | 216 | u8 led; |
217 | u8 pin; | 217 | u8 pin; |
218 | } __attribute__ ((packed)); | 218 | } __packed; |
219 | 219 | ||
220 | struct mrvl_ie_ledgpio { | 220 | struct mrvl_ie_ledgpio { |
221 | struct mrvl_ie_header header; | 221 | struct mrvl_ie_header header; |
222 | struct led_pin ledpin[1]; | 222 | struct led_pin ledpin[1]; |
223 | } __attribute__ ((packed)); | 223 | } __packed; |
224 | 224 | ||
225 | struct led_bhv { | 225 | struct led_bhv { |
226 | uint8_t firmwarestate; | 226 | uint8_t firmwarestate; |
227 | uint8_t led; | 227 | uint8_t led; |
228 | uint8_t ledstate; | 228 | uint8_t ledstate; |
229 | uint8_t ledarg; | 229 | uint8_t ledarg; |
230 | } __attribute__ ((packed)); | 230 | } __packed; |
231 | 231 | ||
232 | 232 | ||
233 | struct mrvl_ie_ledbhv { | 233 | struct mrvl_ie_ledbhv { |
234 | struct mrvl_ie_header header; | 234 | struct mrvl_ie_header header; |
235 | struct led_bhv ledbhv[1]; | 235 | struct led_bhv ledbhv[1]; |
236 | } __attribute__ ((packed)); | 236 | } __packed; |
237 | 237 | ||
238 | /* Meant to be packed as the value member of a struct ieee80211_info_element. | 238 | /* Meant to be packed as the value member of a struct ieee80211_info_element. |
239 | * Note that the len member of the ieee80211_info_element varies depending on | 239 | * Note that the len member of the ieee80211_info_element varies depending on |
@@ -248,12 +248,12 @@ struct mrvl_meshie_val { | |||
248 | uint8_t mesh_capability; | 248 | uint8_t mesh_capability; |
249 | uint8_t mesh_id_len; | 249 | uint8_t mesh_id_len; |
250 | uint8_t mesh_id[IEEE80211_MAX_SSID_LEN]; | 250 | uint8_t mesh_id[IEEE80211_MAX_SSID_LEN]; |
251 | } __attribute__ ((packed)); | 251 | } __packed; |
252 | 252 | ||
253 | struct mrvl_meshie { | 253 | struct mrvl_meshie { |
254 | u8 id, len; | 254 | u8 id, len; |
255 | struct mrvl_meshie_val val; | 255 | struct mrvl_meshie_val val; |
256 | } __attribute__ ((packed)); | 256 | } __packed; |
257 | 257 | ||
258 | struct mrvl_mesh_defaults { | 258 | struct mrvl_mesh_defaults { |
259 | __le32 bootflag; | 259 | __le32 bootflag; |
@@ -261,6 +261,6 @@ struct mrvl_mesh_defaults { | |||
261 | uint8_t reserved; | 261 | uint8_t reserved; |
262 | __le16 channel; | 262 | __le16 channel; |
263 | struct mrvl_meshie meshie; | 263 | struct mrvl_meshie meshie; |
264 | } __attribute__ ((packed)); | 264 | } __packed; |
265 | 265 | ||
266 | #endif | 266 | #endif |
diff --git a/drivers/net/wireless/libertas_tf/libertas_tf.h b/drivers/net/wireless/libertas_tf/libertas_tf.h index fbbaaae7a1ae..737eac92ef72 100644 --- a/drivers/net/wireless/libertas_tf/libertas_tf.h +++ b/drivers/net/wireless/libertas_tf/libertas_tf.h | |||
@@ -316,7 +316,7 @@ struct cmd_header { | |||
316 | __le16 size; | 316 | __le16 size; |
317 | __le16 seqnum; | 317 | __le16 seqnum; |
318 | __le16 result; | 318 | __le16 result; |
319 | } __attribute__ ((packed)); | 319 | } __packed; |
320 | 320 | ||
321 | struct cmd_ctrl_node { | 321 | struct cmd_ctrl_node { |
322 | struct list_head list; | 322 | struct list_head list; |
@@ -369,7 +369,7 @@ struct cmd_ds_get_hw_spec { | |||
369 | 369 | ||
370 | /*FW/HW capability */ | 370 | /*FW/HW capability */ |
371 | __le32 fwcapinfo; | 371 | __le32 fwcapinfo; |
372 | } __attribute__ ((packed)); | 372 | } __packed; |
373 | 373 | ||
374 | struct cmd_ds_mac_control { | 374 | struct cmd_ds_mac_control { |
375 | struct cmd_header hdr; | 375 | struct cmd_header hdr; |
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c index 6f8cb3ee6fed..49a7dfb4809a 100644 --- a/drivers/net/wireless/mac80211_hwsim.c +++ b/drivers/net/wireless/mac80211_hwsim.c | |||
@@ -317,7 +317,7 @@ struct hwsim_radiotap_hdr { | |||
317 | u8 rt_rate; | 317 | u8 rt_rate; |
318 | __le16 rt_channel; | 318 | __le16 rt_channel; |
319 | __le16 rt_chbitmask; | 319 | __le16 rt_chbitmask; |
320 | } __attribute__ ((packed)); | 320 | } __packed; |
321 | 321 | ||
322 | 322 | ||
323 | static netdev_tx_t hwsim_mon_xmit(struct sk_buff *skb, | 323 | static netdev_tx_t hwsim_mon_xmit(struct sk_buff *skb, |
diff --git a/drivers/net/wireless/mwl8k.c b/drivers/net/wireless/mwl8k.c index 808adb909095..60a819107a8c 100644 --- a/drivers/net/wireless/mwl8k.c +++ b/drivers/net/wireless/mwl8k.c | |||
@@ -426,7 +426,7 @@ struct mwl8k_cmd_pkt { | |||
426 | __u8 macid; | 426 | __u8 macid; |
427 | __le16 result; | 427 | __le16 result; |
428 | char payload[0]; | 428 | char payload[0]; |
429 | } __attribute__((packed)); | 429 | } __packed; |
430 | 430 | ||
431 | /* | 431 | /* |
432 | * Firmware loading. | 432 | * Firmware loading. |
@@ -632,7 +632,7 @@ struct mwl8k_dma_data { | |||
632 | __le16 fwlen; | 632 | __le16 fwlen; |
633 | struct ieee80211_hdr wh; | 633 | struct ieee80211_hdr wh; |
634 | char data[0]; | 634 | char data[0]; |
635 | } __attribute__((packed)); | 635 | } __packed; |
636 | 636 | ||
637 | /* Routines to add/remove DMA header from skb. */ | 637 | /* Routines to add/remove DMA header from skb. */ |
638 | static inline void mwl8k_remove_dma_header(struct sk_buff *skb, __le16 qos) | 638 | static inline void mwl8k_remove_dma_header(struct sk_buff *skb, __le16 qos) |
@@ -711,7 +711,7 @@ struct mwl8k_rxd_8366_ap { | |||
711 | __u8 rx_status; | 711 | __u8 rx_status; |
712 | __u8 channel; | 712 | __u8 channel; |
713 | __u8 rx_ctrl; | 713 | __u8 rx_ctrl; |
714 | } __attribute__((packed)); | 714 | } __packed; |
715 | 715 | ||
716 | #define MWL8K_8366_AP_RATE_INFO_MCS_FORMAT 0x80 | 716 | #define MWL8K_8366_AP_RATE_INFO_MCS_FORMAT 0x80 |
717 | #define MWL8K_8366_AP_RATE_INFO_40MHZ 0x40 | 717 | #define MWL8K_8366_AP_RATE_INFO_40MHZ 0x40 |
@@ -806,7 +806,7 @@ struct mwl8k_rxd_sta { | |||
806 | __u8 rx_ctrl; | 806 | __u8 rx_ctrl; |
807 | __u8 rx_status; | 807 | __u8 rx_status; |
808 | __u8 pad2[2]; | 808 | __u8 pad2[2]; |
809 | } __attribute__((packed)); | 809 | } __packed; |
810 | 810 | ||
811 | #define MWL8K_STA_RATE_INFO_SHORTPRE 0x8000 | 811 | #define MWL8K_STA_RATE_INFO_SHORTPRE 0x8000 |
812 | #define MWL8K_STA_RATE_INFO_ANTSELECT(x) (((x) >> 11) & 0x3) | 812 | #define MWL8K_STA_RATE_INFO_ANTSELECT(x) (((x) >> 11) & 0x3) |
@@ -1120,7 +1120,7 @@ struct mwl8k_tx_desc { | |||
1120 | __le16 rate_info; | 1120 | __le16 rate_info; |
1121 | __u8 peer_id; | 1121 | __u8 peer_id; |
1122 | __u8 tx_frag_cnt; | 1122 | __u8 tx_frag_cnt; |
1123 | } __attribute__((packed)); | 1123 | } __packed; |
1124 | 1124 | ||
1125 | #define MWL8K_TX_DESCS 128 | 1125 | #define MWL8K_TX_DESCS 128 |
1126 | 1126 | ||
@@ -1666,7 +1666,7 @@ struct mwl8k_cmd_get_hw_spec_sta { | |||
1666 | __le32 caps2; | 1666 | __le32 caps2; |
1667 | __le32 num_tx_desc_per_queue; | 1667 | __le32 num_tx_desc_per_queue; |
1668 | __le32 total_rxd; | 1668 | __le32 total_rxd; |
1669 | } __attribute__((packed)); | 1669 | } __packed; |
1670 | 1670 | ||
1671 | #define MWL8K_CAP_MAX_AMSDU 0x20000000 | 1671 | #define MWL8K_CAP_MAX_AMSDU 0x20000000 |
1672 | #define MWL8K_CAP_GREENFIELD 0x08000000 | 1672 | #define MWL8K_CAP_GREENFIELD 0x08000000 |
@@ -1810,7 +1810,7 @@ struct mwl8k_cmd_get_hw_spec_ap { | |||
1810 | __le32 wcbbase1; | 1810 | __le32 wcbbase1; |
1811 | __le32 wcbbase2; | 1811 | __le32 wcbbase2; |
1812 | __le32 wcbbase3; | 1812 | __le32 wcbbase3; |
1813 | } __attribute__((packed)); | 1813 | } __packed; |
1814 | 1814 | ||
1815 | static int mwl8k_cmd_get_hw_spec_ap(struct ieee80211_hw *hw) | 1815 | static int mwl8k_cmd_get_hw_spec_ap(struct ieee80211_hw *hw) |
1816 | { | 1816 | { |
@@ -1883,7 +1883,7 @@ struct mwl8k_cmd_set_hw_spec { | |||
1883 | __le32 flags; | 1883 | __le32 flags; |
1884 | __le32 num_tx_desc_per_queue; | 1884 | __le32 num_tx_desc_per_queue; |
1885 | __le32 total_rxd; | 1885 | __le32 total_rxd; |
1886 | } __attribute__((packed)); | 1886 | } __packed; |
1887 | 1887 | ||
1888 | #define MWL8K_SET_HW_SPEC_FLAG_HOST_DECR_MGMT 0x00000080 | 1888 | #define MWL8K_SET_HW_SPEC_FLAG_HOST_DECR_MGMT 0x00000080 |
1889 | #define MWL8K_SET_HW_SPEC_FLAG_HOSTFORM_PROBERESP 0x00000020 | 1889 | #define MWL8K_SET_HW_SPEC_FLAG_HOSTFORM_PROBERESP 0x00000020 |
@@ -1985,7 +1985,7 @@ __mwl8k_cmd_mac_multicast_adr(struct ieee80211_hw *hw, int allmulti, | |||
1985 | struct mwl8k_cmd_get_stat { | 1985 | struct mwl8k_cmd_get_stat { |
1986 | struct mwl8k_cmd_pkt header; | 1986 | struct mwl8k_cmd_pkt header; |
1987 | __le32 stats[64]; | 1987 | __le32 stats[64]; |
1988 | } __attribute__((packed)); | 1988 | } __packed; |
1989 | 1989 | ||
1990 | #define MWL8K_STAT_ACK_FAILURE 9 | 1990 | #define MWL8K_STAT_ACK_FAILURE 9 |
1991 | #define MWL8K_STAT_RTS_FAILURE 12 | 1991 | #define MWL8K_STAT_RTS_FAILURE 12 |
@@ -2029,7 +2029,7 @@ struct mwl8k_cmd_radio_control { | |||
2029 | __le16 action; | 2029 | __le16 action; |
2030 | __le16 control; | 2030 | __le16 control; |
2031 | __le16 radio_on; | 2031 | __le16 radio_on; |
2032 | } __attribute__((packed)); | 2032 | } __packed; |
2033 | 2033 | ||
2034 | static int | 2034 | static int |
2035 | mwl8k_cmd_radio_control(struct ieee80211_hw *hw, bool enable, bool force) | 2035 | mwl8k_cmd_radio_control(struct ieee80211_hw *hw, bool enable, bool force) |
@@ -2092,7 +2092,7 @@ struct mwl8k_cmd_rf_tx_power { | |||
2092 | __le16 current_level; | 2092 | __le16 current_level; |
2093 | __le16 reserved; | 2093 | __le16 reserved; |
2094 | __le16 power_level_list[MWL8K_TX_POWER_LEVEL_TOTAL]; | 2094 | __le16 power_level_list[MWL8K_TX_POWER_LEVEL_TOTAL]; |
2095 | } __attribute__((packed)); | 2095 | } __packed; |
2096 | 2096 | ||
2097 | static int mwl8k_cmd_rf_tx_power(struct ieee80211_hw *hw, int dBm) | 2097 | static int mwl8k_cmd_rf_tx_power(struct ieee80211_hw *hw, int dBm) |
2098 | { | 2098 | { |
@@ -2121,7 +2121,7 @@ struct mwl8k_cmd_rf_antenna { | |||
2121 | struct mwl8k_cmd_pkt header; | 2121 | struct mwl8k_cmd_pkt header; |
2122 | __le16 antenna; | 2122 | __le16 antenna; |
2123 | __le16 mode; | 2123 | __le16 mode; |
2124 | } __attribute__((packed)); | 2124 | } __packed; |
2125 | 2125 | ||
2126 | #define MWL8K_RF_ANTENNA_RX 1 | 2126 | #define MWL8K_RF_ANTENNA_RX 1 |
2127 | #define MWL8K_RF_ANTENNA_TX 2 | 2127 | #define MWL8K_RF_ANTENNA_TX 2 |
@@ -2182,7 +2182,7 @@ static int mwl8k_cmd_set_beacon(struct ieee80211_hw *hw, | |||
2182 | */ | 2182 | */ |
2183 | struct mwl8k_cmd_set_pre_scan { | 2183 | struct mwl8k_cmd_set_pre_scan { |
2184 | struct mwl8k_cmd_pkt header; | 2184 | struct mwl8k_cmd_pkt header; |
2185 | } __attribute__((packed)); | 2185 | } __packed; |
2186 | 2186 | ||
2187 | static int mwl8k_cmd_set_pre_scan(struct ieee80211_hw *hw) | 2187 | static int mwl8k_cmd_set_pre_scan(struct ieee80211_hw *hw) |
2188 | { | 2188 | { |
@@ -2209,7 +2209,7 @@ struct mwl8k_cmd_set_post_scan { | |||
2209 | struct mwl8k_cmd_pkt header; | 2209 | struct mwl8k_cmd_pkt header; |
2210 | __le32 isibss; | 2210 | __le32 isibss; |
2211 | __u8 bssid[ETH_ALEN]; | 2211 | __u8 bssid[ETH_ALEN]; |
2212 | } __attribute__((packed)); | 2212 | } __packed; |
2213 | 2213 | ||
2214 | static int | 2214 | static int |
2215 | mwl8k_cmd_set_post_scan(struct ieee80211_hw *hw, const __u8 *mac) | 2215 | mwl8k_cmd_set_post_scan(struct ieee80211_hw *hw, const __u8 *mac) |
@@ -2240,7 +2240,7 @@ struct mwl8k_cmd_set_rf_channel { | |||
2240 | __le16 action; | 2240 | __le16 action; |
2241 | __u8 current_channel; | 2241 | __u8 current_channel; |
2242 | __le32 channel_flags; | 2242 | __le32 channel_flags; |
2243 | } __attribute__((packed)); | 2243 | } __packed; |
2244 | 2244 | ||
2245 | static int mwl8k_cmd_set_rf_channel(struct ieee80211_hw *hw, | 2245 | static int mwl8k_cmd_set_rf_channel(struct ieee80211_hw *hw, |
2246 | struct ieee80211_conf *conf) | 2246 | struct ieee80211_conf *conf) |
@@ -2293,7 +2293,7 @@ struct mwl8k_cmd_update_set_aid { | |||
2293 | __u8 bssid[ETH_ALEN]; | 2293 | __u8 bssid[ETH_ALEN]; |
2294 | __le16 protection_mode; | 2294 | __le16 protection_mode; |
2295 | __u8 supp_rates[14]; | 2295 | __u8 supp_rates[14]; |
2296 | } __attribute__((packed)); | 2296 | } __packed; |
2297 | 2297 | ||
2298 | static void legacy_rate_mask_to_array(u8 *rates, u32 mask) | 2298 | static void legacy_rate_mask_to_array(u8 *rates, u32 mask) |
2299 | { | 2299 | { |
@@ -2364,7 +2364,7 @@ struct mwl8k_cmd_set_rate { | |||
2364 | /* Bitmap for supported MCS codes. */ | 2364 | /* Bitmap for supported MCS codes. */ |
2365 | __u8 mcs_set[16]; | 2365 | __u8 mcs_set[16]; |
2366 | __u8 reserved[16]; | 2366 | __u8 reserved[16]; |
2367 | } __attribute__((packed)); | 2367 | } __packed; |
2368 | 2368 | ||
2369 | static int | 2369 | static int |
2370 | mwl8k_cmd_set_rate(struct ieee80211_hw *hw, struct ieee80211_vif *vif, | 2370 | mwl8k_cmd_set_rate(struct ieee80211_hw *hw, struct ieee80211_vif *vif, |
@@ -2397,7 +2397,7 @@ struct mwl8k_cmd_finalize_join { | |||
2397 | struct mwl8k_cmd_pkt header; | 2397 | struct mwl8k_cmd_pkt header; |
2398 | __le32 sleep_interval; /* Number of beacon periods to sleep */ | 2398 | __le32 sleep_interval; /* Number of beacon periods to sleep */ |
2399 | __u8 beacon_data[MWL8K_FJ_BEACON_MAXLEN]; | 2399 | __u8 beacon_data[MWL8K_FJ_BEACON_MAXLEN]; |
2400 | } __attribute__((packed)); | 2400 | } __packed; |
2401 | 2401 | ||
2402 | static int mwl8k_cmd_finalize_join(struct ieee80211_hw *hw, void *frame, | 2402 | static int mwl8k_cmd_finalize_join(struct ieee80211_hw *hw, void *frame, |
2403 | int framelen, int dtim) | 2403 | int framelen, int dtim) |
@@ -2436,7 +2436,7 @@ struct mwl8k_cmd_set_rts_threshold { | |||
2436 | struct mwl8k_cmd_pkt header; | 2436 | struct mwl8k_cmd_pkt header; |
2437 | __le16 action; | 2437 | __le16 action; |
2438 | __le16 threshold; | 2438 | __le16 threshold; |
2439 | } __attribute__((packed)); | 2439 | } __packed; |
2440 | 2440 | ||
2441 | static int | 2441 | static int |
2442 | mwl8k_cmd_set_rts_threshold(struct ieee80211_hw *hw, int rts_thresh) | 2442 | mwl8k_cmd_set_rts_threshold(struct ieee80211_hw *hw, int rts_thresh) |
@@ -2466,7 +2466,7 @@ struct mwl8k_cmd_set_slot { | |||
2466 | struct mwl8k_cmd_pkt header; | 2466 | struct mwl8k_cmd_pkt header; |
2467 | __le16 action; | 2467 | __le16 action; |
2468 | __u8 short_slot; | 2468 | __u8 short_slot; |
2469 | } __attribute__((packed)); | 2469 | } __packed; |
2470 | 2470 | ||
2471 | static int mwl8k_cmd_set_slot(struct ieee80211_hw *hw, bool short_slot_time) | 2471 | static int mwl8k_cmd_set_slot(struct ieee80211_hw *hw, bool short_slot_time) |
2472 | { | 2472 | { |
@@ -2528,7 +2528,7 @@ struct mwl8k_cmd_set_edca_params { | |||
2528 | __u8 txq; | 2528 | __u8 txq; |
2529 | } sta; | 2529 | } sta; |
2530 | }; | 2530 | }; |
2531 | } __attribute__((packed)); | 2531 | } __packed; |
2532 | 2532 | ||
2533 | #define MWL8K_SET_EDCA_CW 0x01 | 2533 | #define MWL8K_SET_EDCA_CW 0x01 |
2534 | #define MWL8K_SET_EDCA_TXOP 0x02 | 2534 | #define MWL8K_SET_EDCA_TXOP 0x02 |
@@ -2579,7 +2579,7 @@ mwl8k_cmd_set_edca_params(struct ieee80211_hw *hw, __u8 qnum, | |||
2579 | struct mwl8k_cmd_set_wmm_mode { | 2579 | struct mwl8k_cmd_set_wmm_mode { |
2580 | struct mwl8k_cmd_pkt header; | 2580 | struct mwl8k_cmd_pkt header; |
2581 | __le16 action; | 2581 | __le16 action; |
2582 | } __attribute__((packed)); | 2582 | } __packed; |
2583 | 2583 | ||
2584 | static int mwl8k_cmd_set_wmm_mode(struct ieee80211_hw *hw, bool enable) | 2584 | static int mwl8k_cmd_set_wmm_mode(struct ieee80211_hw *hw, bool enable) |
2585 | { | 2585 | { |
@@ -2612,7 +2612,7 @@ struct mwl8k_cmd_mimo_config { | |||
2612 | __le32 action; | 2612 | __le32 action; |
2613 | __u8 rx_antenna_map; | 2613 | __u8 rx_antenna_map; |
2614 | __u8 tx_antenna_map; | 2614 | __u8 tx_antenna_map; |
2615 | } __attribute__((packed)); | 2615 | } __packed; |
2616 | 2616 | ||
2617 | static int mwl8k_cmd_mimo_config(struct ieee80211_hw *hw, __u8 rx, __u8 tx) | 2617 | static int mwl8k_cmd_mimo_config(struct ieee80211_hw *hw, __u8 rx, __u8 tx) |
2618 | { | 2618 | { |
@@ -2652,7 +2652,7 @@ struct mwl8k_cmd_use_fixed_rate_sta { | |||
2652 | __le32 rate_type; | 2652 | __le32 rate_type; |
2653 | __le32 reserved1; | 2653 | __le32 reserved1; |
2654 | __le32 reserved2; | 2654 | __le32 reserved2; |
2655 | } __attribute__((packed)); | 2655 | } __packed; |
2656 | 2656 | ||
2657 | #define MWL8K_USE_AUTO_RATE 0x0002 | 2657 | #define MWL8K_USE_AUTO_RATE 0x0002 |
2658 | #define MWL8K_UCAST_RATE 0 | 2658 | #define MWL8K_UCAST_RATE 0 |
@@ -2694,7 +2694,7 @@ struct mwl8k_cmd_use_fixed_rate_ap { | |||
2694 | u8 multicast_rate; | 2694 | u8 multicast_rate; |
2695 | u8 multicast_rate_type; | 2695 | u8 multicast_rate_type; |
2696 | u8 management_rate; | 2696 | u8 management_rate; |
2697 | } __attribute__((packed)); | 2697 | } __packed; |
2698 | 2698 | ||
2699 | static int | 2699 | static int |
2700 | mwl8k_cmd_use_fixed_rate_ap(struct ieee80211_hw *hw, int mcast, int mgmt) | 2700 | mwl8k_cmd_use_fixed_rate_ap(struct ieee80211_hw *hw, int mcast, int mgmt) |
@@ -2724,7 +2724,7 @@ mwl8k_cmd_use_fixed_rate_ap(struct ieee80211_hw *hw, int mcast, int mgmt) | |||
2724 | struct mwl8k_cmd_enable_sniffer { | 2724 | struct mwl8k_cmd_enable_sniffer { |
2725 | struct mwl8k_cmd_pkt header; | 2725 | struct mwl8k_cmd_pkt header; |
2726 | __le32 action; | 2726 | __le32 action; |
2727 | } __attribute__((packed)); | 2727 | } __packed; |
2728 | 2728 | ||
2729 | static int mwl8k_cmd_enable_sniffer(struct ieee80211_hw *hw, bool enable) | 2729 | static int mwl8k_cmd_enable_sniffer(struct ieee80211_hw *hw, bool enable) |
2730 | { | 2730 | { |
@@ -2757,7 +2757,7 @@ struct mwl8k_cmd_set_mac_addr { | |||
2757 | } mbss; | 2757 | } mbss; |
2758 | __u8 mac_addr[ETH_ALEN]; | 2758 | __u8 mac_addr[ETH_ALEN]; |
2759 | }; | 2759 | }; |
2760 | } __attribute__((packed)); | 2760 | } __packed; |
2761 | 2761 | ||
2762 | #define MWL8K_MAC_TYPE_PRIMARY_CLIENT 0 | 2762 | #define MWL8K_MAC_TYPE_PRIMARY_CLIENT 0 |
2763 | #define MWL8K_MAC_TYPE_SECONDARY_CLIENT 1 | 2763 | #define MWL8K_MAC_TYPE_SECONDARY_CLIENT 1 |
@@ -2812,7 +2812,7 @@ struct mwl8k_cmd_set_rate_adapt_mode { | |||
2812 | struct mwl8k_cmd_pkt header; | 2812 | struct mwl8k_cmd_pkt header; |
2813 | __le16 action; | 2813 | __le16 action; |
2814 | __le16 mode; | 2814 | __le16 mode; |
2815 | } __attribute__((packed)); | 2815 | } __packed; |
2816 | 2816 | ||
2817 | static int mwl8k_cmd_set_rateadapt_mode(struct ieee80211_hw *hw, __u16 mode) | 2817 | static int mwl8k_cmd_set_rateadapt_mode(struct ieee80211_hw *hw, __u16 mode) |
2818 | { | 2818 | { |
@@ -2840,7 +2840,7 @@ static int mwl8k_cmd_set_rateadapt_mode(struct ieee80211_hw *hw, __u16 mode) | |||
2840 | struct mwl8k_cmd_bss_start { | 2840 | struct mwl8k_cmd_bss_start { |
2841 | struct mwl8k_cmd_pkt header; | 2841 | struct mwl8k_cmd_pkt header; |
2842 | __le32 enable; | 2842 | __le32 enable; |
2843 | } __attribute__((packed)); | 2843 | } __packed; |
2844 | 2844 | ||
2845 | static int mwl8k_cmd_bss_start(struct ieee80211_hw *hw, | 2845 | static int mwl8k_cmd_bss_start(struct ieee80211_hw *hw, |
2846 | struct ieee80211_vif *vif, int enable) | 2846 | struct ieee80211_vif *vif, int enable) |
@@ -2885,7 +2885,7 @@ struct mwl8k_cmd_set_new_stn { | |||
2885 | __u8 add_qos_info; | 2885 | __u8 add_qos_info; |
2886 | __u8 is_qos_sta; | 2886 | __u8 is_qos_sta; |
2887 | __le32 fw_sta_ptr; | 2887 | __le32 fw_sta_ptr; |
2888 | } __attribute__((packed)); | 2888 | } __packed; |
2889 | 2889 | ||
2890 | #define MWL8K_STA_ACTION_ADD 0 | 2890 | #define MWL8K_STA_ACTION_ADD 0 |
2891 | #define MWL8K_STA_ACTION_REMOVE 2 | 2891 | #define MWL8K_STA_ACTION_REMOVE 2 |
@@ -2978,7 +2978,7 @@ struct ewc_ht_info { | |||
2978 | __le16 control1; | 2978 | __le16 control1; |
2979 | __le16 control2; | 2979 | __le16 control2; |
2980 | __le16 control3; | 2980 | __le16 control3; |
2981 | } __attribute__((packed)); | 2981 | } __packed; |
2982 | 2982 | ||
2983 | struct peer_capability_info { | 2983 | struct peer_capability_info { |
2984 | /* Peer type - AP vs. STA. */ | 2984 | /* Peer type - AP vs. STA. */ |
@@ -3007,7 +3007,7 @@ struct peer_capability_info { | |||
3007 | __u8 pad2; | 3007 | __u8 pad2; |
3008 | __u8 station_id; | 3008 | __u8 station_id; |
3009 | __le16 amsdu_enabled; | 3009 | __le16 amsdu_enabled; |
3010 | } __attribute__((packed)); | 3010 | } __packed; |
3011 | 3011 | ||
3012 | struct mwl8k_cmd_update_stadb { | 3012 | struct mwl8k_cmd_update_stadb { |
3013 | struct mwl8k_cmd_pkt header; | 3013 | struct mwl8k_cmd_pkt header; |
@@ -3022,7 +3022,7 @@ struct mwl8k_cmd_update_stadb { | |||
3022 | 3022 | ||
3023 | /* Peer info - valid during add/update. */ | 3023 | /* Peer info - valid during add/update. */ |
3024 | struct peer_capability_info peer_info; | 3024 | struct peer_capability_info peer_info; |
3025 | } __attribute__((packed)); | 3025 | } __packed; |
3026 | 3026 | ||
3027 | #define MWL8K_STA_DB_MODIFY_ENTRY 1 | 3027 | #define MWL8K_STA_DB_MODIFY_ENTRY 1 |
3028 | #define MWL8K_STA_DB_DEL_ENTRY 2 | 3028 | #define MWL8K_STA_DB_DEL_ENTRY 2 |
diff --git a/drivers/net/wireless/orinoco/fw.c b/drivers/net/wireless/orinoco/fw.c index 3e1947d097ca..259d75853984 100644 --- a/drivers/net/wireless/orinoco/fw.c +++ b/drivers/net/wireless/orinoco/fw.c | |||
@@ -49,7 +49,7 @@ struct orinoco_fw_header { | |||
49 | __le32 pri_offset; /* Offset to primary plug data */ | 49 | __le32 pri_offset; /* Offset to primary plug data */ |
50 | __le32 compat_offset; /* Offset to compatibility data*/ | 50 | __le32 compat_offset; /* Offset to compatibility data*/ |
51 | char signature[0]; /* FW signature length headersize-20 */ | 51 | char signature[0]; /* FW signature length headersize-20 */ |
52 | } __attribute__ ((packed)); | 52 | } __packed; |
53 | 53 | ||
54 | /* Check the range of various header entries. Return a pointer to a | 54 | /* Check the range of various header entries. Return a pointer to a |
55 | * description of the problem, or NULL if everything checks out. */ | 55 | * description of the problem, or NULL if everything checks out. */ |
diff --git a/drivers/net/wireless/orinoco/hermes.h b/drivers/net/wireless/orinoco/hermes.h index 9ca34e722b45..d9f18c11682a 100644 --- a/drivers/net/wireless/orinoco/hermes.h +++ b/drivers/net/wireless/orinoco/hermes.h | |||
@@ -205,7 +205,7 @@ struct hermes_tx_descriptor { | |||
205 | u8 retry_count; | 205 | u8 retry_count; |
206 | u8 tx_rate; | 206 | u8 tx_rate; |
207 | __le16 tx_control; | 207 | __le16 tx_control; |
208 | } __attribute__ ((packed)); | 208 | } __packed; |
209 | 209 | ||
210 | #define HERMES_TXSTAT_RETRYERR (0x0001) | 210 | #define HERMES_TXSTAT_RETRYERR (0x0001) |
211 | #define HERMES_TXSTAT_AGEDERR (0x0002) | 211 | #define HERMES_TXSTAT_AGEDERR (0x0002) |
@@ -254,7 +254,7 @@ struct hermes_tallies_frame { | |||
254 | /* Those last are probably not available in very old firmwares */ | 254 | /* Those last are probably not available in very old firmwares */ |
255 | __le16 RxDiscards_WEPICVError; | 255 | __le16 RxDiscards_WEPICVError; |
256 | __le16 RxDiscards_WEPExcluded; | 256 | __le16 RxDiscards_WEPExcluded; |
257 | } __attribute__ ((packed)); | 257 | } __packed; |
258 | 258 | ||
259 | /* Grabbed from wlan-ng - Thanks Mark... - Jean II | 259 | /* Grabbed from wlan-ng - Thanks Mark... - Jean II |
260 | * This is the result of a scan inquiry command */ | 260 | * This is the result of a scan inquiry command */ |
@@ -271,7 +271,7 @@ struct prism2_scan_apinfo { | |||
271 | u8 rates[10]; /* Bit rate supported */ | 271 | u8 rates[10]; /* Bit rate supported */ |
272 | __le16 proberesp_rate; /* Data rate of the response frame */ | 272 | __le16 proberesp_rate; /* Data rate of the response frame */ |
273 | __le16 atim; /* ATIM window time, Kus (hostscan only) */ | 273 | __le16 atim; /* ATIM window time, Kus (hostscan only) */ |
274 | } __attribute__ ((packed)); | 274 | } __packed; |
275 | 275 | ||
276 | /* Same stuff for the Lucent/Agere card. | 276 | /* Same stuff for the Lucent/Agere card. |
277 | * Thanks to h1kari <h1kari AT dachb0den.com> - Jean II */ | 277 | * Thanks to h1kari <h1kari AT dachb0den.com> - Jean II */ |
@@ -285,7 +285,7 @@ struct agere_scan_apinfo { | |||
285 | /* bits: 0-ess, 1-ibss, 4-privacy [wep] */ | 285 | /* bits: 0-ess, 1-ibss, 4-privacy [wep] */ |
286 | __le16 essid_len; /* ESSID length */ | 286 | __le16 essid_len; /* ESSID length */ |
287 | u8 essid[32]; /* ESSID of the network */ | 287 | u8 essid[32]; /* ESSID of the network */ |
288 | } __attribute__ ((packed)); | 288 | } __packed; |
289 | 289 | ||
290 | /* Moustafa: Scan structure for Symbol cards */ | 290 | /* Moustafa: Scan structure for Symbol cards */ |
291 | struct symbol_scan_apinfo { | 291 | struct symbol_scan_apinfo { |
@@ -303,7 +303,7 @@ struct symbol_scan_apinfo { | |||
303 | __le16 basic_rates; /* Basic rates bitmask */ | 303 | __le16 basic_rates; /* Basic rates bitmask */ |
304 | u8 unknown2[6]; /* Always FF:FF:FF:FF:00:00 */ | 304 | u8 unknown2[6]; /* Always FF:FF:FF:FF:00:00 */ |
305 | u8 unknown3[8]; /* Always 0, appeared in f/w 3.91-68 */ | 305 | u8 unknown3[8]; /* Always 0, appeared in f/w 3.91-68 */ |
306 | } __attribute__ ((packed)); | 306 | } __packed; |
307 | 307 | ||
308 | union hermes_scan_info { | 308 | union hermes_scan_info { |
309 | struct agere_scan_apinfo a; | 309 | struct agere_scan_apinfo a; |
@@ -343,7 +343,7 @@ struct agere_ext_scan_info { | |||
343 | __le16 beacon_interval; | 343 | __le16 beacon_interval; |
344 | __le16 capabilities; | 344 | __le16 capabilities; |
345 | u8 data[0]; | 345 | u8 data[0]; |
346 | } __attribute__ ((packed)); | 346 | } __packed; |
347 | 347 | ||
348 | #define HERMES_LINKSTATUS_NOT_CONNECTED (0x0000) | 348 | #define HERMES_LINKSTATUS_NOT_CONNECTED (0x0000) |
349 | #define HERMES_LINKSTATUS_CONNECTED (0x0001) | 349 | #define HERMES_LINKSTATUS_CONNECTED (0x0001) |
@@ -355,7 +355,7 @@ struct agere_ext_scan_info { | |||
355 | 355 | ||
356 | struct hermes_linkstatus { | 356 | struct hermes_linkstatus { |
357 | __le16 linkstatus; /* Link status */ | 357 | __le16 linkstatus; /* Link status */ |
358 | } __attribute__ ((packed)); | 358 | } __packed; |
359 | 359 | ||
360 | struct hermes_response { | 360 | struct hermes_response { |
361 | u16 status, resp0, resp1, resp2; | 361 | u16 status, resp0, resp1, resp2; |
@@ -365,11 +365,11 @@ struct hermes_response { | |||
365 | struct hermes_idstring { | 365 | struct hermes_idstring { |
366 | __le16 len; | 366 | __le16 len; |
367 | __le16 val[16]; | 367 | __le16 val[16]; |
368 | } __attribute__ ((packed)); | 368 | } __packed; |
369 | 369 | ||
370 | struct hermes_multicast { | 370 | struct hermes_multicast { |
371 | u8 addr[HERMES_MAX_MULTICAST][ETH_ALEN]; | 371 | u8 addr[HERMES_MAX_MULTICAST][ETH_ALEN]; |
372 | } __attribute__ ((packed)); | 372 | } __packed; |
373 | 373 | ||
374 | /* Timeouts */ | 374 | /* Timeouts */ |
375 | #define HERMES_BAP_BUSY_TIMEOUT (10000) /* In iterations of ~1us */ | 375 | #define HERMES_BAP_BUSY_TIMEOUT (10000) /* In iterations of ~1us */ |
diff --git a/drivers/net/wireless/orinoco/hermes_dld.c b/drivers/net/wireless/orinoco/hermes_dld.c index 6da85e75fce0..55741caa2b82 100644 --- a/drivers/net/wireless/orinoco/hermes_dld.c +++ b/drivers/net/wireless/orinoco/hermes_dld.c | |||
@@ -65,7 +65,7 @@ struct dblock { | |||
65 | __le32 addr; /* adapter address where to write the block */ | 65 | __le32 addr; /* adapter address where to write the block */ |
66 | __le16 len; /* length of the data only, in bytes */ | 66 | __le16 len; /* length of the data only, in bytes */ |
67 | char data[0]; /* data to be written */ | 67 | char data[0]; /* data to be written */ |
68 | } __attribute__ ((packed)); | 68 | } __packed; |
69 | 69 | ||
70 | /* | 70 | /* |
71 | * Plug Data References are located in in the image after the last data | 71 | * Plug Data References are located in in the image after the last data |
@@ -77,7 +77,7 @@ struct pdr { | |||
77 | __le32 addr; /* adapter address where to write the data */ | 77 | __le32 addr; /* adapter address where to write the data */ |
78 | __le32 len; /* expected length of the data, in bytes */ | 78 | __le32 len; /* expected length of the data, in bytes */ |
79 | char next[0]; /* next PDR starts here */ | 79 | char next[0]; /* next PDR starts here */ |
80 | } __attribute__ ((packed)); | 80 | } __packed; |
81 | 81 | ||
82 | /* | 82 | /* |
83 | * Plug Data Items are located in the EEPROM read from the adapter by | 83 | * Plug Data Items are located in the EEPROM read from the adapter by |
@@ -88,7 +88,7 @@ struct pdi { | |||
88 | __le16 len; /* length of ID and data, in words */ | 88 | __le16 len; /* length of ID and data, in words */ |
89 | __le16 id; /* record ID */ | 89 | __le16 id; /* record ID */ |
90 | char data[0]; /* plug data */ | 90 | char data[0]; /* plug data */ |
91 | } __attribute__ ((packed)); | 91 | } __packed; |
92 | 92 | ||
93 | /*** FW data block access functions ***/ | 93 | /*** FW data block access functions ***/ |
94 | 94 | ||
@@ -317,7 +317,7 @@ static const struct { \ | |||
317 | __le16 len; \ | 317 | __le16 len; \ |
318 | __le16 id; \ | 318 | __le16 id; \ |
319 | u8 val[length]; \ | 319 | u8 val[length]; \ |
320 | } __attribute__ ((packed)) default_pdr_data_##pid = { \ | 320 | } __packed default_pdr_data_##pid = { \ |
321 | cpu_to_le16((sizeof(default_pdr_data_##pid)/ \ | 321 | cpu_to_le16((sizeof(default_pdr_data_##pid)/ \ |
322 | sizeof(__le16)) - 1), \ | 322 | sizeof(__le16)) - 1), \ |
323 | cpu_to_le16(pid), \ | 323 | cpu_to_le16(pid), \ |
diff --git a/drivers/net/wireless/orinoco/hw.c b/drivers/net/wireless/orinoco/hw.c index 6fbd78850123..077baa86756b 100644 --- a/drivers/net/wireless/orinoco/hw.c +++ b/drivers/net/wireless/orinoco/hw.c | |||
@@ -45,7 +45,7 @@ static const struct { | |||
45 | /* Firmware version encoding */ | 45 | /* Firmware version encoding */ |
46 | struct comp_id { | 46 | struct comp_id { |
47 | u16 id, variant, major, minor; | 47 | u16 id, variant, major, minor; |
48 | } __attribute__ ((packed)); | 48 | } __packed; |
49 | 49 | ||
50 | static inline fwtype_t determine_firmware_type(struct comp_id *nic_id) | 50 | static inline fwtype_t determine_firmware_type(struct comp_id *nic_id) |
51 | { | 51 | { |
@@ -995,7 +995,7 @@ int __orinoco_hw_set_tkip_key(struct orinoco_private *priv, int key_idx, | |||
995 | u8 tx_mic[MIC_KEYLEN]; | 995 | u8 tx_mic[MIC_KEYLEN]; |
996 | u8 rx_mic[MIC_KEYLEN]; | 996 | u8 rx_mic[MIC_KEYLEN]; |
997 | u8 tsc[ORINOCO_SEQ_LEN]; | 997 | u8 tsc[ORINOCO_SEQ_LEN]; |
998 | } __attribute__ ((packed)) buf; | 998 | } __packed buf; |
999 | hermes_t *hw = &priv->hw; | 999 | hermes_t *hw = &priv->hw; |
1000 | int ret; | 1000 | int ret; |
1001 | int err; | 1001 | int err; |
@@ -1326,7 +1326,7 @@ int orinoco_hw_disassociate(struct orinoco_private *priv, | |||
1326 | struct { | 1326 | struct { |
1327 | u8 addr[ETH_ALEN]; | 1327 | u8 addr[ETH_ALEN]; |
1328 | __le16 reason_code; | 1328 | __le16 reason_code; |
1329 | } __attribute__ ((packed)) buf; | 1329 | } __packed buf; |
1330 | 1330 | ||
1331 | /* Currently only supported by WPA enabled Agere fw */ | 1331 | /* Currently only supported by WPA enabled Agere fw */ |
1332 | if (!priv->has_wpa) | 1332 | if (!priv->has_wpa) |
diff --git a/drivers/net/wireless/orinoco/main.c b/drivers/net/wireless/orinoco/main.c index ca71f08709bc..e8e2d0f4763d 100644 --- a/drivers/net/wireless/orinoco/main.c +++ b/drivers/net/wireless/orinoco/main.c | |||
@@ -172,7 +172,7 @@ struct hermes_txexc_data { | |||
172 | __le16 frame_ctl; | 172 | __le16 frame_ctl; |
173 | __le16 duration_id; | 173 | __le16 duration_id; |
174 | u8 addr1[ETH_ALEN]; | 174 | u8 addr1[ETH_ALEN]; |
175 | } __attribute__ ((packed)); | 175 | } __packed; |
176 | 176 | ||
177 | /* Rx frame header except compatibility 802.3 header */ | 177 | /* Rx frame header except compatibility 802.3 header */ |
178 | struct hermes_rx_descriptor { | 178 | struct hermes_rx_descriptor { |
@@ -196,7 +196,7 @@ struct hermes_rx_descriptor { | |||
196 | 196 | ||
197 | /* Data length */ | 197 | /* Data length */ |
198 | __le16 data_len; | 198 | __le16 data_len; |
199 | } __attribute__ ((packed)); | 199 | } __packed; |
200 | 200 | ||
201 | struct orinoco_rx_data { | 201 | struct orinoco_rx_data { |
202 | struct hermes_rx_descriptor *desc; | 202 | struct hermes_rx_descriptor *desc; |
@@ -390,7 +390,7 @@ int orinoco_process_xmit_skb(struct sk_buff *skb, | |||
390 | struct header_struct { | 390 | struct header_struct { |
391 | struct ethhdr eth; /* 802.3 header */ | 391 | struct ethhdr eth; /* 802.3 header */ |
392 | u8 encap[6]; /* 802.2 header */ | 392 | u8 encap[6]; /* 802.2 header */ |
393 | } __attribute__ ((packed)) hdr; | 393 | } __packed hdr; |
394 | int len = skb->len + sizeof(encaps_hdr) - (2 * ETH_ALEN); | 394 | int len = skb->len + sizeof(encaps_hdr) - (2 * ETH_ALEN); |
395 | 395 | ||
396 | if (skb_headroom(skb) < ENCAPS_OVERHEAD) { | 396 | if (skb_headroom(skb) < ENCAPS_OVERHEAD) { |
@@ -1170,7 +1170,7 @@ static void orinoco_join_ap(struct work_struct *work) | |||
1170 | struct join_req { | 1170 | struct join_req { |
1171 | u8 bssid[ETH_ALEN]; | 1171 | u8 bssid[ETH_ALEN]; |
1172 | __le16 channel; | 1172 | __le16 channel; |
1173 | } __attribute__ ((packed)) req; | 1173 | } __packed req; |
1174 | const int atom_len = offsetof(struct prism2_scan_apinfo, atim); | 1174 | const int atom_len = offsetof(struct prism2_scan_apinfo, atim); |
1175 | struct prism2_scan_apinfo *atom = NULL; | 1175 | struct prism2_scan_apinfo *atom = NULL; |
1176 | int offset = 4; | 1176 | int offset = 4; |
@@ -1410,7 +1410,7 @@ void __orinoco_ev_info(struct net_device *dev, hermes_t *hw) | |||
1410 | struct { | 1410 | struct { |
1411 | __le16 len; | 1411 | __le16 len; |
1412 | __le16 type; | 1412 | __le16 type; |
1413 | } __attribute__ ((packed)) info; | 1413 | } __packed info; |
1414 | int len, type; | 1414 | int len, type; |
1415 | int err; | 1415 | int err; |
1416 | 1416 | ||
diff --git a/drivers/net/wireless/orinoco/orinoco.h b/drivers/net/wireless/orinoco/orinoco.h index a6da86e0a70f..255710ef082a 100644 --- a/drivers/net/wireless/orinoco/orinoco.h +++ b/drivers/net/wireless/orinoco/orinoco.h | |||
@@ -32,7 +32,7 @@ | |||
32 | struct orinoco_key { | 32 | struct orinoco_key { |
33 | __le16 len; /* always stored as little-endian */ | 33 | __le16 len; /* always stored as little-endian */ |
34 | char data[ORINOCO_MAX_KEY_SIZE]; | 34 | char data[ORINOCO_MAX_KEY_SIZE]; |
35 | } __attribute__ ((packed)); | 35 | } __packed; |
36 | 36 | ||
37 | #define TKIP_KEYLEN 16 | 37 | #define TKIP_KEYLEN 16 |
38 | #define MIC_KEYLEN 8 | 38 | #define MIC_KEYLEN 8 |
diff --git a/drivers/net/wireless/orinoco/orinoco_usb.c b/drivers/net/wireless/orinoco/orinoco_usb.c index 78f089baa8c9..11536ef17ba3 100644 --- a/drivers/net/wireless/orinoco/orinoco_usb.c +++ b/drivers/net/wireless/orinoco/orinoco_usb.c | |||
@@ -90,7 +90,7 @@ struct header_struct { | |||
90 | /* SNAP */ | 90 | /* SNAP */ |
91 | u8 oui[3]; | 91 | u8 oui[3]; |
92 | __be16 ethertype; | 92 | __be16 ethertype; |
93 | } __attribute__ ((packed)); | 93 | } __packed; |
94 | 94 | ||
95 | struct ez_usb_fw { | 95 | struct ez_usb_fw { |
96 | u16 size; | 96 | u16 size; |
@@ -222,7 +222,7 @@ struct ezusb_packet { | |||
222 | __le16 hermes_len; | 222 | __le16 hermes_len; |
223 | __le16 hermes_rid; | 223 | __le16 hermes_rid; |
224 | u8 data[0]; | 224 | u8 data[0]; |
225 | } __attribute__ ((packed)); | 225 | } __packed; |
226 | 226 | ||
227 | /* Table of devices that work or may work with this driver */ | 227 | /* Table of devices that work or may work with this driver */ |
228 | static struct usb_device_id ezusb_table[] = { | 228 | static struct usb_device_id ezusb_table[] = { |
diff --git a/drivers/net/wireless/orinoco/wext.c b/drivers/net/wireless/orinoco/wext.c index 5775124e2aee..9f86a272cb78 100644 --- a/drivers/net/wireless/orinoco/wext.c +++ b/drivers/net/wireless/orinoco/wext.c | |||
@@ -128,7 +128,7 @@ static struct iw_statistics *orinoco_get_wireless_stats(struct net_device *dev) | |||
128 | } else { | 128 | } else { |
129 | struct { | 129 | struct { |
130 | __le16 qual, signal, noise, unused; | 130 | __le16 qual, signal, noise, unused; |
131 | } __attribute__ ((packed)) cq; | 131 | } __packed cq; |
132 | 132 | ||
133 | err = HERMES_READ_RECORD(hw, USER_BAP, | 133 | err = HERMES_READ_RECORD(hw, USER_BAP, |
134 | HERMES_RID_COMMSQUALITY, &cq); | 134 | HERMES_RID_COMMSQUALITY, &cq); |
diff --git a/drivers/net/wireless/p54/net2280.h b/drivers/net/wireless/p54/net2280.h index 4915d9d54203..e3ed893b5aaf 100644 --- a/drivers/net/wireless/p54/net2280.h +++ b/drivers/net/wireless/p54/net2280.h | |||
@@ -232,7 +232,7 @@ struct net2280_regs { | |||
232 | #define GPIO2_INTERRUPT 2 | 232 | #define GPIO2_INTERRUPT 2 |
233 | #define GPIO1_INTERRUPT 1 | 233 | #define GPIO1_INTERRUPT 1 |
234 | #define GPIO0_INTERRUPT 0 | 234 | #define GPIO0_INTERRUPT 0 |
235 | } __attribute__ ((packed)); | 235 | } __packed; |
236 | 236 | ||
237 | /* usb control, BAR0 + 0x0080 */ | 237 | /* usb control, BAR0 + 0x0080 */ |
238 | struct net2280_usb_regs { | 238 | struct net2280_usb_regs { |
@@ -296,7 +296,7 @@ struct net2280_usb_regs { | |||
296 | #define FORCE_IMMEDIATE 7 | 296 | #define FORCE_IMMEDIATE 7 |
297 | #define OUR_USB_ADDRESS 0 | 297 | #define OUR_USB_ADDRESS 0 |
298 | __le32 ourconfig; | 298 | __le32 ourconfig; |
299 | } __attribute__ ((packed)); | 299 | } __packed; |
300 | 300 | ||
301 | /* pci control, BAR0 + 0x0100 */ | 301 | /* pci control, BAR0 + 0x0100 */ |
302 | struct net2280_pci_regs { | 302 | struct net2280_pci_regs { |
@@ -323,7 +323,7 @@ struct net2280_pci_regs { | |||
323 | #define PCI_ARBITER_CLEAR 2 | 323 | #define PCI_ARBITER_CLEAR 2 |
324 | #define PCI_EXTERNAL_ARBITER 1 | 324 | #define PCI_EXTERNAL_ARBITER 1 |
325 | #define PCI_HOST_MODE 0 | 325 | #define PCI_HOST_MODE 0 |
326 | } __attribute__ ((packed)); | 326 | } __packed; |
327 | 327 | ||
328 | /* dma control, BAR0 + 0x0180 ... array of four structs like this, | 328 | /* dma control, BAR0 + 0x0180 ... array of four structs like this, |
329 | * for channels 0..3. see also struct net2280_dma: descriptor | 329 | * for channels 0..3. see also struct net2280_dma: descriptor |
@@ -364,7 +364,7 @@ struct net2280_dma_regs { /* [11.7] */ | |||
364 | __le32 dmaaddr; | 364 | __le32 dmaaddr; |
365 | __le32 dmadesc; | 365 | __le32 dmadesc; |
366 | u32 _unused1; | 366 | u32 _unused1; |
367 | } __attribute__ ((packed)); | 367 | } __packed; |
368 | 368 | ||
369 | /* dedicated endpoint registers, BAR0 + 0x0200 */ | 369 | /* dedicated endpoint registers, BAR0 + 0x0200 */ |
370 | 370 | ||
@@ -374,7 +374,7 @@ struct net2280_dep_regs { /* [11.8] */ | |||
374 | /* offset 0x0204, 0x0214, 0x224, 0x234, 0x244 */ | 374 | /* offset 0x0204, 0x0214, 0x224, 0x234, 0x244 */ |
375 | __le32 dep_rsp; | 375 | __le32 dep_rsp; |
376 | u32 _unused[2]; | 376 | u32 _unused[2]; |
377 | } __attribute__ ((packed)); | 377 | } __packed; |
378 | 378 | ||
379 | /* configurable endpoint registers, BAR0 + 0x0300 ... array of seven structs | 379 | /* configurable endpoint registers, BAR0 + 0x0300 ... array of seven structs |
380 | * like this, for ep0 then the configurable endpoints A..F | 380 | * like this, for ep0 then the configurable endpoints A..F |
@@ -437,16 +437,16 @@ struct net2280_ep_regs { /* [11.9] */ | |||
437 | __le32 ep_avail; | 437 | __le32 ep_avail; |
438 | __le32 ep_data; | 438 | __le32 ep_data; |
439 | u32 _unused0[2]; | 439 | u32 _unused0[2]; |
440 | } __attribute__ ((packed)); | 440 | } __packed; |
441 | 441 | ||
442 | struct net2280_reg_write { | 442 | struct net2280_reg_write { |
443 | __le16 port; | 443 | __le16 port; |
444 | __le32 addr; | 444 | __le32 addr; |
445 | __le32 val; | 445 | __le32 val; |
446 | } __attribute__ ((packed)); | 446 | } __packed; |
447 | 447 | ||
448 | struct net2280_reg_read { | 448 | struct net2280_reg_read { |
449 | __le16 port; | 449 | __le16 port; |
450 | __le32 addr; | 450 | __le32 addr; |
451 | } __attribute__ ((packed)); | 451 | } __packed; |
452 | #endif /* NET2280_H */ | 452 | #endif /* NET2280_H */ |
diff --git a/drivers/net/wireless/p54/p54pci.h b/drivers/net/wireless/p54/p54pci.h index 2feead617a3b..ee9bc62a4fa2 100644 --- a/drivers/net/wireless/p54/p54pci.h +++ b/drivers/net/wireless/p54/p54pci.h | |||
@@ -65,7 +65,7 @@ struct p54p_csr { | |||
65 | u8 unused_6[1924]; | 65 | u8 unused_6[1924]; |
66 | u8 cardbus_cis[0x800]; | 66 | u8 cardbus_cis[0x800]; |
67 | u8 direct_mem_win[0x1000]; | 67 | u8 direct_mem_win[0x1000]; |
68 | } __attribute__ ((packed)); | 68 | } __packed; |
69 | 69 | ||
70 | /* usb backend only needs the register defines above */ | 70 | /* usb backend only needs the register defines above */ |
71 | #ifndef P54USB_H | 71 | #ifndef P54USB_H |
@@ -74,7 +74,7 @@ struct p54p_desc { | |||
74 | __le32 device_addr; | 74 | __le32 device_addr; |
75 | __le16 len; | 75 | __le16 len; |
76 | __le16 flags; | 76 | __le16 flags; |
77 | } __attribute__ ((packed)); | 77 | } __packed; |
78 | 78 | ||
79 | struct p54p_ring_control { | 79 | struct p54p_ring_control { |
80 | __le32 host_idx[4]; | 80 | __le32 host_idx[4]; |
@@ -83,7 +83,7 @@ struct p54p_ring_control { | |||
83 | struct p54p_desc tx_data[32]; | 83 | struct p54p_desc tx_data[32]; |
84 | struct p54p_desc rx_mgmt[4]; | 84 | struct p54p_desc rx_mgmt[4]; |
85 | struct p54p_desc tx_mgmt[4]; | 85 | struct p54p_desc tx_mgmt[4]; |
86 | } __attribute__ ((packed)); | 86 | } __packed; |
87 | 87 | ||
88 | #define P54P_READ(r) (__force __le32)__raw_readl(&priv->map->r) | 88 | #define P54P_READ(r) (__force __le32)__raw_readl(&priv->map->r) |
89 | #define P54P_WRITE(r, val) __raw_writel((__force u32)(__le32)(val), &priv->map->r) | 89 | #define P54P_WRITE(r, val) __raw_writel((__force u32)(__le32)(val), &priv->map->r) |
diff --git a/drivers/net/wireless/p54/p54spi.h b/drivers/net/wireless/p54/p54spi.h index 7fbe8d8fc67c..dfaa62aaeb07 100644 --- a/drivers/net/wireless/p54/p54spi.h +++ b/drivers/net/wireless/p54/p54spi.h | |||
@@ -96,7 +96,7 @@ struct p54s_dma_regs { | |||
96 | __le16 cmd; | 96 | __le16 cmd; |
97 | __le16 len; | 97 | __le16 len; |
98 | __le32 addr; | 98 | __le32 addr; |
99 | } __attribute__ ((packed)); | 99 | } __packed; |
100 | 100 | ||
101 | struct p54s_tx_info { | 101 | struct p54s_tx_info { |
102 | struct list_head tx_list; | 102 | struct list_head tx_list; |
diff --git a/drivers/net/wireless/p54/p54usb.h b/drivers/net/wireless/p54/p54usb.h index e935b79f7f75..ed4034ade59a 100644 --- a/drivers/net/wireless/p54/p54usb.h +++ b/drivers/net/wireless/p54/p54usb.h | |||
@@ -70,12 +70,12 @@ struct net2280_tx_hdr { | |||
70 | __le16 len; | 70 | __le16 len; |
71 | __le16 follower; /* ? */ | 71 | __le16 follower; /* ? */ |
72 | u8 padding[8]; | 72 | u8 padding[8]; |
73 | } __attribute__((packed)); | 73 | } __packed; |
74 | 74 | ||
75 | struct lm87_tx_hdr { | 75 | struct lm87_tx_hdr { |
76 | __le32 device_addr; | 76 | __le32 device_addr; |
77 | __le32 chksum; | 77 | __le32 chksum; |
78 | } __attribute__((packed)); | 78 | } __packed; |
79 | 79 | ||
80 | /* Some flags for the isl hardware registers controlling DMA inside the | 80 | /* Some flags for the isl hardware registers controlling DMA inside the |
81 | * chip */ | 81 | * chip */ |
@@ -103,7 +103,7 @@ struct x2_header { | |||
103 | __le32 fw_load_addr; | 103 | __le32 fw_load_addr; |
104 | __le32 fw_length; | 104 | __le32 fw_length; |
105 | __le32 crc; | 105 | __le32 crc; |
106 | } __attribute__((packed)); | 106 | } __packed; |
107 | 107 | ||
108 | /* pipes 3 and 4 are not used by the driver */ | 108 | /* pipes 3 and 4 are not used by the driver */ |
109 | #define P54U_PIPE_NUMBER 9 | 109 | #define P54U_PIPE_NUMBER 9 |
diff --git a/drivers/net/wireless/prism54/isl_ioctl.c b/drivers/net/wireless/prism54/isl_ioctl.c index 8d1190c0f062..13730a807002 100644 --- a/drivers/net/wireless/prism54/isl_ioctl.c +++ b/drivers/net/wireless/prism54/isl_ioctl.c | |||
@@ -2101,7 +2101,7 @@ struct ieee80211_beacon_phdr { | |||
2101 | u8 timestamp[8]; | 2101 | u8 timestamp[8]; |
2102 | u16 beacon_int; | 2102 | u16 beacon_int; |
2103 | u16 capab_info; | 2103 | u16 capab_info; |
2104 | } __attribute__ ((packed)); | 2104 | } __packed; |
2105 | 2105 | ||
2106 | #define WLAN_EID_GENERIC 0xdd | 2106 | #define WLAN_EID_GENERIC 0xdd |
2107 | static u8 wpa_oid[4] = { 0x00, 0x50, 0xf2, 1 }; | 2107 | static u8 wpa_oid[4] = { 0x00, 0x50, 0xf2, 1 }; |
diff --git a/drivers/net/wireless/prism54/isl_oid.h b/drivers/net/wireless/prism54/isl_oid.h index b7534c2869c8..59e31258d450 100644 --- a/drivers/net/wireless/prism54/isl_oid.h +++ b/drivers/net/wireless/prism54/isl_oid.h | |||
@@ -29,20 +29,20 @@ | |||
29 | struct obj_ssid { | 29 | struct obj_ssid { |
30 | u8 length; | 30 | u8 length; |
31 | char octets[33]; | 31 | char octets[33]; |
32 | } __attribute__ ((packed)); | 32 | } __packed; |
33 | 33 | ||
34 | struct obj_key { | 34 | struct obj_key { |
35 | u8 type; /* dot11_priv_t */ | 35 | u8 type; /* dot11_priv_t */ |
36 | u8 length; | 36 | u8 length; |
37 | char key[32]; | 37 | char key[32]; |
38 | } __attribute__ ((packed)); | 38 | } __packed; |
39 | 39 | ||
40 | struct obj_mlme { | 40 | struct obj_mlme { |
41 | u8 address[6]; | 41 | u8 address[6]; |
42 | u16 id; | 42 | u16 id; |
43 | u16 state; | 43 | u16 state; |
44 | u16 code; | 44 | u16 code; |
45 | } __attribute__ ((packed)); | 45 | } __packed; |
46 | 46 | ||
47 | struct obj_mlmeex { | 47 | struct obj_mlmeex { |
48 | u8 address[6]; | 48 | u8 address[6]; |
@@ -51,12 +51,12 @@ struct obj_mlmeex { | |||
51 | u16 code; | 51 | u16 code; |
52 | u16 size; | 52 | u16 size; |
53 | u8 data[0]; | 53 | u8 data[0]; |
54 | } __attribute__ ((packed)); | 54 | } __packed; |
55 | 55 | ||
56 | struct obj_buffer { | 56 | struct obj_buffer { |
57 | u32 size; | 57 | u32 size; |
58 | u32 addr; /* 32bit bus address */ | 58 | u32 addr; /* 32bit bus address */ |
59 | } __attribute__ ((packed)); | 59 | } __packed; |
60 | 60 | ||
61 | struct obj_bss { | 61 | struct obj_bss { |
62 | u8 address[6]; | 62 | u8 address[6]; |
@@ -77,17 +77,17 @@ struct obj_bss { | |||
77 | short rates; | 77 | short rates; |
78 | short basic_rates; | 78 | short basic_rates; |
79 | int:16; /* padding */ | 79 | int:16; /* padding */ |
80 | } __attribute__ ((packed)); | 80 | } __packed; |
81 | 81 | ||
82 | struct obj_bsslist { | 82 | struct obj_bsslist { |
83 | u32 nr; | 83 | u32 nr; |
84 | struct obj_bss bsslist[0]; | 84 | struct obj_bss bsslist[0]; |
85 | } __attribute__ ((packed)); | 85 | } __packed; |
86 | 86 | ||
87 | struct obj_frequencies { | 87 | struct obj_frequencies { |
88 | u16 nr; | 88 | u16 nr; |
89 | u16 mhz[0]; | 89 | u16 mhz[0]; |
90 | } __attribute__ ((packed)); | 90 | } __packed; |
91 | 91 | ||
92 | struct obj_attachment { | 92 | struct obj_attachment { |
93 | char type; | 93 | char type; |
@@ -95,7 +95,7 @@ struct obj_attachment { | |||
95 | short id; | 95 | short id; |
96 | short size; | 96 | short size; |
97 | char data[0]; | 97 | char data[0]; |
98 | } __attribute__((packed)); | 98 | } __packed; |
99 | 99 | ||
100 | /* | 100 | /* |
101 | * in case everything's ok, the inlined function below will be | 101 | * in case everything's ok, the inlined function below will be |
diff --git a/drivers/net/wireless/prism54/islpci_eth.h b/drivers/net/wireless/prism54/islpci_eth.h index 54f9a4b7bf9b..6ca30a5b7bfb 100644 --- a/drivers/net/wireless/prism54/islpci_eth.h +++ b/drivers/net/wireless/prism54/islpci_eth.h | |||
@@ -34,13 +34,13 @@ struct rfmon_header { | |||
34 | __le16 unk3; | 34 | __le16 unk3; |
35 | u8 rssi; | 35 | u8 rssi; |
36 | u8 padding[3]; | 36 | u8 padding[3]; |
37 | } __attribute__ ((packed)); | 37 | } __packed; |
38 | 38 | ||
39 | struct rx_annex_header { | 39 | struct rx_annex_header { |
40 | u8 addr1[ETH_ALEN]; | 40 | u8 addr1[ETH_ALEN]; |
41 | u8 addr2[ETH_ALEN]; | 41 | u8 addr2[ETH_ALEN]; |
42 | struct rfmon_header rfmon; | 42 | struct rfmon_header rfmon; |
43 | } __attribute__ ((packed)); | 43 | } __packed; |
44 | 44 | ||
45 | /* wlan-ng (and hopefully others) AVS header, version one. Fields in | 45 | /* wlan-ng (and hopefully others) AVS header, version one. Fields in |
46 | * network byte order. */ | 46 | * network byte order. */ |
diff --git a/drivers/net/wireless/prism54/islpci_mgt.h b/drivers/net/wireless/prism54/islpci_mgt.h index 0b27e50fe0d5..0db93db9b675 100644 --- a/drivers/net/wireless/prism54/islpci_mgt.h +++ b/drivers/net/wireless/prism54/islpci_mgt.h | |||
@@ -101,7 +101,7 @@ typedef struct { | |||
101 | u8 device_id; | 101 | u8 device_id; |
102 | u8 flags; | 102 | u8 flags; |
103 | u32 length; | 103 | u32 length; |
104 | } __attribute__ ((packed)) | 104 | } __packed |
105 | pimfor_header_t; | 105 | pimfor_header_t; |
106 | 106 | ||
107 | /* A received and interrupt-processed management frame, either for | 107 | /* A received and interrupt-processed management frame, either for |
diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c index 4bd61ee627c0..989b0561c01b 100644 --- a/drivers/net/wireless/rndis_wlan.c +++ b/drivers/net/wireless/rndis_wlan.c | |||
@@ -238,19 +238,19 @@ struct ndis_80211_auth_request { | |||
238 | u8 bssid[6]; | 238 | u8 bssid[6]; |
239 | u8 padding[2]; | 239 | u8 padding[2]; |
240 | __le32 flags; | 240 | __le32 flags; |
241 | } __attribute__((packed)); | 241 | } __packed; |
242 | 242 | ||
243 | struct ndis_80211_pmkid_candidate { | 243 | struct ndis_80211_pmkid_candidate { |
244 | u8 bssid[6]; | 244 | u8 bssid[6]; |
245 | u8 padding[2]; | 245 | u8 padding[2]; |
246 | __le32 flags; | 246 | __le32 flags; |
247 | } __attribute__((packed)); | 247 | } __packed; |
248 | 248 | ||
249 | struct ndis_80211_pmkid_cand_list { | 249 | struct ndis_80211_pmkid_cand_list { |
250 | __le32 version; | 250 | __le32 version; |
251 | __le32 num_candidates; | 251 | __le32 num_candidates; |
252 | struct ndis_80211_pmkid_candidate candidate_list[0]; | 252 | struct ndis_80211_pmkid_candidate candidate_list[0]; |
253 | } __attribute__((packed)); | 253 | } __packed; |
254 | 254 | ||
255 | struct ndis_80211_status_indication { | 255 | struct ndis_80211_status_indication { |
256 | __le32 status_type; | 256 | __le32 status_type; |
@@ -260,19 +260,19 @@ struct ndis_80211_status_indication { | |||
260 | struct ndis_80211_auth_request auth_request[0]; | 260 | struct ndis_80211_auth_request auth_request[0]; |
261 | struct ndis_80211_pmkid_cand_list cand_list; | 261 | struct ndis_80211_pmkid_cand_list cand_list; |
262 | } u; | 262 | } u; |
263 | } __attribute__((packed)); | 263 | } __packed; |
264 | 264 | ||
265 | struct ndis_80211_ssid { | 265 | struct ndis_80211_ssid { |
266 | __le32 length; | 266 | __le32 length; |
267 | u8 essid[NDIS_802_11_LENGTH_SSID]; | 267 | u8 essid[NDIS_802_11_LENGTH_SSID]; |
268 | } __attribute__((packed)); | 268 | } __packed; |
269 | 269 | ||
270 | struct ndis_80211_conf_freq_hop { | 270 | struct ndis_80211_conf_freq_hop { |
271 | __le32 length; | 271 | __le32 length; |
272 | __le32 hop_pattern; | 272 | __le32 hop_pattern; |
273 | __le32 hop_set; | 273 | __le32 hop_set; |
274 | __le32 dwell_time; | 274 | __le32 dwell_time; |
275 | } __attribute__((packed)); | 275 | } __packed; |
276 | 276 | ||
277 | struct ndis_80211_conf { | 277 | struct ndis_80211_conf { |
278 | __le32 length; | 278 | __le32 length; |
@@ -280,7 +280,7 @@ struct ndis_80211_conf { | |||
280 | __le32 atim_window; | 280 | __le32 atim_window; |
281 | __le32 ds_config; | 281 | __le32 ds_config; |
282 | struct ndis_80211_conf_freq_hop fh_config; | 282 | struct ndis_80211_conf_freq_hop fh_config; |
283 | } __attribute__((packed)); | 283 | } __packed; |
284 | 284 | ||
285 | struct ndis_80211_bssid_ex { | 285 | struct ndis_80211_bssid_ex { |
286 | __le32 length; | 286 | __le32 length; |
@@ -295,25 +295,25 @@ struct ndis_80211_bssid_ex { | |||
295 | u8 rates[NDIS_802_11_LENGTH_RATES_EX]; | 295 | u8 rates[NDIS_802_11_LENGTH_RATES_EX]; |
296 | __le32 ie_length; | 296 | __le32 ie_length; |
297 | u8 ies[0]; | 297 | u8 ies[0]; |
298 | } __attribute__((packed)); | 298 | } __packed; |
299 | 299 | ||
300 | struct ndis_80211_bssid_list_ex { | 300 | struct ndis_80211_bssid_list_ex { |
301 | __le32 num_items; | 301 | __le32 num_items; |
302 | struct ndis_80211_bssid_ex bssid[0]; | 302 | struct ndis_80211_bssid_ex bssid[0]; |
303 | } __attribute__((packed)); | 303 | } __packed; |
304 | 304 | ||
305 | struct ndis_80211_fixed_ies { | 305 | struct ndis_80211_fixed_ies { |
306 | u8 timestamp[8]; | 306 | u8 timestamp[8]; |
307 | __le16 beacon_interval; | 307 | __le16 beacon_interval; |
308 | __le16 capabilities; | 308 | __le16 capabilities; |
309 | } __attribute__((packed)); | 309 | } __packed; |
310 | 310 | ||
311 | struct ndis_80211_wep_key { | 311 | struct ndis_80211_wep_key { |
312 | __le32 size; | 312 | __le32 size; |
313 | __le32 index; | 313 | __le32 index; |
314 | __le32 length; | 314 | __le32 length; |
315 | u8 material[32]; | 315 | u8 material[32]; |
316 | } __attribute__((packed)); | 316 | } __packed; |
317 | 317 | ||
318 | struct ndis_80211_key { | 318 | struct ndis_80211_key { |
319 | __le32 size; | 319 | __le32 size; |
@@ -323,14 +323,14 @@ struct ndis_80211_key { | |||
323 | u8 padding[6]; | 323 | u8 padding[6]; |
324 | u8 rsc[8]; | 324 | u8 rsc[8]; |
325 | u8 material[32]; | 325 | u8 material[32]; |
326 | } __attribute__((packed)); | 326 | } __packed; |
327 | 327 | ||
328 | struct ndis_80211_remove_key { | 328 | struct ndis_80211_remove_key { |
329 | __le32 size; | 329 | __le32 size; |
330 | __le32 index; | 330 | __le32 index; |
331 | u8 bssid[6]; | 331 | u8 bssid[6]; |
332 | u8 padding[2]; | 332 | u8 padding[2]; |
333 | } __attribute__((packed)); | 333 | } __packed; |
334 | 334 | ||
335 | struct ndis_config_param { | 335 | struct ndis_config_param { |
336 | __le32 name_offs; | 336 | __le32 name_offs; |
@@ -338,7 +338,7 @@ struct ndis_config_param { | |||
338 | __le32 type; | 338 | __le32 type; |
339 | __le32 value_offs; | 339 | __le32 value_offs; |
340 | __le32 value_length; | 340 | __le32 value_length; |
341 | } __attribute__((packed)); | 341 | } __packed; |
342 | 342 | ||
343 | struct ndis_80211_assoc_info { | 343 | struct ndis_80211_assoc_info { |
344 | __le32 length; | 344 | __le32 length; |
@@ -358,12 +358,12 @@ struct ndis_80211_assoc_info { | |||
358 | } resp_ie; | 358 | } resp_ie; |
359 | __le32 resp_ie_length; | 359 | __le32 resp_ie_length; |
360 | __le32 offset_resp_ies; | 360 | __le32 offset_resp_ies; |
361 | } __attribute__((packed)); | 361 | } __packed; |
362 | 362 | ||
363 | struct ndis_80211_auth_encr_pair { | 363 | struct ndis_80211_auth_encr_pair { |
364 | __le32 auth_mode; | 364 | __le32 auth_mode; |
365 | __le32 encr_mode; | 365 | __le32 encr_mode; |
366 | } __attribute__((packed)); | 366 | } __packed; |
367 | 367 | ||
368 | struct ndis_80211_capability { | 368 | struct ndis_80211_capability { |
369 | __le32 length; | 369 | __le32 length; |
@@ -371,7 +371,7 @@ struct ndis_80211_capability { | |||
371 | __le32 num_pmkids; | 371 | __le32 num_pmkids; |
372 | __le32 num_auth_encr_pair; | 372 | __le32 num_auth_encr_pair; |
373 | struct ndis_80211_auth_encr_pair auth_encr_pair[0]; | 373 | struct ndis_80211_auth_encr_pair auth_encr_pair[0]; |
374 | } __attribute__((packed)); | 374 | } __packed; |
375 | 375 | ||
376 | struct ndis_80211_bssid_info { | 376 | struct ndis_80211_bssid_info { |
377 | u8 bssid[6]; | 377 | u8 bssid[6]; |
diff --git a/drivers/net/wireless/rt2x00/rt2800.h b/drivers/net/wireless/rt2x00/rt2800.h index 2aa03751c341..0b17934cf6a9 100644 --- a/drivers/net/wireless/rt2x00/rt2800.h +++ b/drivers/net/wireless/rt2x00/rt2800.h | |||
@@ -1370,17 +1370,17 @@ | |||
1370 | struct mac_wcid_entry { | 1370 | struct mac_wcid_entry { |
1371 | u8 mac[6]; | 1371 | u8 mac[6]; |
1372 | u8 reserved[2]; | 1372 | u8 reserved[2]; |
1373 | } __attribute__ ((packed)); | 1373 | } __packed; |
1374 | 1374 | ||
1375 | struct hw_key_entry { | 1375 | struct hw_key_entry { |
1376 | u8 key[16]; | 1376 | u8 key[16]; |
1377 | u8 tx_mic[8]; | 1377 | u8 tx_mic[8]; |
1378 | u8 rx_mic[8]; | 1378 | u8 rx_mic[8]; |
1379 | } __attribute__ ((packed)); | 1379 | } __packed; |
1380 | 1380 | ||
1381 | struct mac_iveiv_entry { | 1381 | struct mac_iveiv_entry { |
1382 | u8 iv[8]; | 1382 | u8 iv[8]; |
1383 | } __attribute__ ((packed)); | 1383 | } __packed; |
1384 | 1384 | ||
1385 | /* | 1385 | /* |
1386 | * MAC_WCID_ATTRIBUTE: | 1386 | * MAC_WCID_ATTRIBUTE: |
diff --git a/drivers/net/wireless/rt2x00/rt61pci.h b/drivers/net/wireless/rt2x00/rt61pci.h index df80f1af22a4..e2e728ab0b2e 100644 --- a/drivers/net/wireless/rt2x00/rt61pci.h +++ b/drivers/net/wireless/rt2x00/rt61pci.h | |||
@@ -153,13 +153,13 @@ struct hw_key_entry { | |||
153 | u8 key[16]; | 153 | u8 key[16]; |
154 | u8 tx_mic[8]; | 154 | u8 tx_mic[8]; |
155 | u8 rx_mic[8]; | 155 | u8 rx_mic[8]; |
156 | } __attribute__ ((packed)); | 156 | } __packed; |
157 | 157 | ||
158 | struct hw_pairwise_ta_entry { | 158 | struct hw_pairwise_ta_entry { |
159 | u8 address[6]; | 159 | u8 address[6]; |
160 | u8 cipher; | 160 | u8 cipher; |
161 | u8 reserved; | 161 | u8 reserved; |
162 | } __attribute__ ((packed)); | 162 | } __packed; |
163 | 163 | ||
164 | /* | 164 | /* |
165 | * Other on-chip shared memory space. | 165 | * Other on-chip shared memory space. |
diff --git a/drivers/net/wireless/rt2x00/rt73usb.h b/drivers/net/wireless/rt2x00/rt73usb.h index 7abe7eb14555..44d5b2bebd39 100644 --- a/drivers/net/wireless/rt2x00/rt73usb.h +++ b/drivers/net/wireless/rt2x00/rt73usb.h | |||
@@ -108,13 +108,13 @@ struct hw_key_entry { | |||
108 | u8 key[16]; | 108 | u8 key[16]; |
109 | u8 tx_mic[8]; | 109 | u8 tx_mic[8]; |
110 | u8 rx_mic[8]; | 110 | u8 rx_mic[8]; |
111 | } __attribute__ ((packed)); | 111 | } __packed; |
112 | 112 | ||
113 | struct hw_pairwise_ta_entry { | 113 | struct hw_pairwise_ta_entry { |
114 | u8 address[6]; | 114 | u8 address[6]; |
115 | u8 cipher; | 115 | u8 cipher; |
116 | u8 reserved; | 116 | u8 reserved; |
117 | } __attribute__ ((packed)); | 117 | } __packed; |
118 | 118 | ||
119 | /* | 119 | /* |
120 | * Since NULL frame won't be that long (256 byte), | 120 | * Since NULL frame won't be that long (256 byte), |
diff --git a/drivers/net/wireless/rtl818x/rtl8180.h b/drivers/net/wireless/rtl818x/rtl8180.h index 4baf0cf0826f..30523314da43 100644 --- a/drivers/net/wireless/rtl818x/rtl8180.h +++ b/drivers/net/wireless/rtl818x/rtl8180.h | |||
@@ -36,7 +36,7 @@ struct rtl8180_tx_desc { | |||
36 | u8 agc; | 36 | u8 agc; |
37 | u8 flags2; | 37 | u8 flags2; |
38 | u32 reserved[2]; | 38 | u32 reserved[2]; |
39 | } __attribute__ ((packed)); | 39 | } __packed; |
40 | 40 | ||
41 | struct rtl8180_rx_desc { | 41 | struct rtl8180_rx_desc { |
42 | __le32 flags; | 42 | __le32 flags; |
@@ -45,7 +45,7 @@ struct rtl8180_rx_desc { | |||
45 | __le32 rx_buf; | 45 | __le32 rx_buf; |
46 | __le64 tsft; | 46 | __le64 tsft; |
47 | }; | 47 | }; |
48 | } __attribute__ ((packed)); | 48 | } __packed; |
49 | 49 | ||
50 | struct rtl8180_tx_ring { | 50 | struct rtl8180_tx_ring { |
51 | struct rtl8180_tx_desc *desc; | 51 | struct rtl8180_tx_desc *desc; |
diff --git a/drivers/net/wireless/rtl818x/rtl8187.h b/drivers/net/wireless/rtl818x/rtl8187.h index 6bb32112e65c..98878160a65a 100644 --- a/drivers/net/wireless/rtl818x/rtl8187.h +++ b/drivers/net/wireless/rtl818x/rtl8187.h | |||
@@ -47,7 +47,7 @@ struct rtl8187_rx_hdr { | |||
47 | u8 agc; | 47 | u8 agc; |
48 | u8 reserved; | 48 | u8 reserved; |
49 | __le64 mac_time; | 49 | __le64 mac_time; |
50 | } __attribute__((packed)); | 50 | } __packed; |
51 | 51 | ||
52 | struct rtl8187b_rx_hdr { | 52 | struct rtl8187b_rx_hdr { |
53 | __le32 flags; | 53 | __le32 flags; |
@@ -59,7 +59,7 @@ struct rtl8187b_rx_hdr { | |||
59 | __le16 snr_long2end; | 59 | __le16 snr_long2end; |
60 | s8 pwdb_g12; | 60 | s8 pwdb_g12; |
61 | u8 fot; | 61 | u8 fot; |
62 | } __attribute__((packed)); | 62 | } __packed; |
63 | 63 | ||
64 | /* {rtl8187,rtl8187b}_tx_info is in skb */ | 64 | /* {rtl8187,rtl8187b}_tx_info is in skb */ |
65 | 65 | ||
@@ -68,7 +68,7 @@ struct rtl8187_tx_hdr { | |||
68 | __le16 rts_duration; | 68 | __le16 rts_duration; |
69 | __le16 len; | 69 | __le16 len; |
70 | __le32 retry; | 70 | __le32 retry; |
71 | } __attribute__((packed)); | 71 | } __packed; |
72 | 72 | ||
73 | struct rtl8187b_tx_hdr { | 73 | struct rtl8187b_tx_hdr { |
74 | __le32 flags; | 74 | __le32 flags; |
@@ -80,7 +80,7 @@ struct rtl8187b_tx_hdr { | |||
80 | __le32 unused_3; | 80 | __le32 unused_3; |
81 | __le32 retry; | 81 | __le32 retry; |
82 | __le32 unused_4[2]; | 82 | __le32 unused_4[2]; |
83 | } __attribute__((packed)); | 83 | } __packed; |
84 | 84 | ||
85 | enum { | 85 | enum { |
86 | DEVICE_RTL8187, | 86 | DEVICE_RTL8187, |
diff --git a/drivers/net/wireless/rtl818x/rtl818x.h b/drivers/net/wireless/rtl818x/rtl818x.h index 8522490d2e29..978519d1ff4c 100644 --- a/drivers/net/wireless/rtl818x/rtl818x.h +++ b/drivers/net/wireless/rtl818x/rtl818x.h | |||
@@ -185,7 +185,7 @@ struct rtl818x_csr { | |||
185 | u8 reserved_22[4]; | 185 | u8 reserved_22[4]; |
186 | __le16 TALLY_CNT; | 186 | __le16 TALLY_CNT; |
187 | u8 TALLY_SEL; | 187 | u8 TALLY_SEL; |
188 | } __attribute__((packed)); | 188 | } __packed; |
189 | 189 | ||
190 | struct rtl818x_rf_ops { | 190 | struct rtl818x_rf_ops { |
191 | char *name; | 191 | char *name; |
diff --git a/drivers/net/wireless/wl12xx/wl1251_acx.h b/drivers/net/wireless/wl12xx/wl1251_acx.h index 26160c45784c..842df310d92a 100644 --- a/drivers/net/wireless/wl12xx/wl1251_acx.h +++ b/drivers/net/wireless/wl12xx/wl1251_acx.h | |||
@@ -60,7 +60,7 @@ struct acx_error_counter { | |||
60 | /* the number of missed sequence numbers in the squentially */ | 60 | /* the number of missed sequence numbers in the squentially */ |
61 | /* values of frames seq numbers */ | 61 | /* values of frames seq numbers */ |
62 | u32 seq_num_miss; | 62 | u32 seq_num_miss; |
63 | } __attribute__ ((packed)); | 63 | } __packed; |
64 | 64 | ||
65 | struct acx_revision { | 65 | struct acx_revision { |
66 | struct acx_header header; | 66 | struct acx_header header; |
@@ -89,7 +89,7 @@ struct acx_revision { | |||
89 | * bits 24 - 31: Chip ID - The WiLink chip ID. | 89 | * bits 24 - 31: Chip ID - The WiLink chip ID. |
90 | */ | 90 | */ |
91 | u32 hw_version; | 91 | u32 hw_version; |
92 | } __attribute__ ((packed)); | 92 | } __packed; |
93 | 93 | ||
94 | enum wl1251_psm_mode { | 94 | enum wl1251_psm_mode { |
95 | /* Active mode */ | 95 | /* Active mode */ |
@@ -111,7 +111,7 @@ struct acx_sleep_auth { | |||
111 | /* 2 - ELP mode: Deep / Max sleep*/ | 111 | /* 2 - ELP mode: Deep / Max sleep*/ |
112 | u8 sleep_auth; | 112 | u8 sleep_auth; |
113 | u8 padding[3]; | 113 | u8 padding[3]; |
114 | } __attribute__ ((packed)); | 114 | } __packed; |
115 | 115 | ||
116 | enum { | 116 | enum { |
117 | HOSTIF_PCI_MASTER_HOST_INDIRECT, | 117 | HOSTIF_PCI_MASTER_HOST_INDIRECT, |
@@ -159,7 +159,7 @@ struct acx_data_path_params { | |||
159 | * complete ring until an interrupt is generated. | 159 | * complete ring until an interrupt is generated. |
160 | */ | 160 | */ |
161 | u32 tx_complete_timeout; | 161 | u32 tx_complete_timeout; |
162 | } __attribute__ ((packed)); | 162 | } __packed; |
163 | 163 | ||
164 | 164 | ||
165 | struct acx_data_path_params_resp { | 165 | struct acx_data_path_params_resp { |
@@ -180,7 +180,7 @@ struct acx_data_path_params_resp { | |||
180 | u32 tx_control_addr; | 180 | u32 tx_control_addr; |
181 | 181 | ||
182 | u32 tx_complete_addr; | 182 | u32 tx_complete_addr; |
183 | } __attribute__ ((packed)); | 183 | } __packed; |
184 | 184 | ||
185 | #define TX_MSDU_LIFETIME_MIN 0 | 185 | #define TX_MSDU_LIFETIME_MIN 0 |
186 | #define TX_MSDU_LIFETIME_MAX 3000 | 186 | #define TX_MSDU_LIFETIME_MAX 3000 |
@@ -197,7 +197,7 @@ struct acx_rx_msdu_lifetime { | |||
197 | * firmware discards the MSDU. | 197 | * firmware discards the MSDU. |
198 | */ | 198 | */ |
199 | u32 lifetime; | 199 | u32 lifetime; |
200 | } __attribute__ ((packed)); | 200 | } __packed; |
201 | 201 | ||
202 | /* | 202 | /* |
203 | * RX Config Options Table | 203 | * RX Config Options Table |
@@ -285,7 +285,7 @@ struct acx_rx_config { | |||
285 | 285 | ||
286 | u32 config_options; | 286 | u32 config_options; |
287 | u32 filter_options; | 287 | u32 filter_options; |
288 | } __attribute__ ((packed)); | 288 | } __packed; |
289 | 289 | ||
290 | enum { | 290 | enum { |
291 | QOS_AC_BE = 0, | 291 | QOS_AC_BE = 0, |
@@ -325,13 +325,13 @@ struct acx_tx_queue_qos_config { | |||
325 | 325 | ||
326 | /* Lowest memory blocks guaranteed for this queue */ | 326 | /* Lowest memory blocks guaranteed for this queue */ |
327 | u16 low_threshold; | 327 | u16 low_threshold; |
328 | } __attribute__ ((packed)); | 328 | } __packed; |
329 | 329 | ||
330 | struct acx_packet_detection { | 330 | struct acx_packet_detection { |
331 | struct acx_header header; | 331 | struct acx_header header; |
332 | 332 | ||
333 | u32 threshold; | 333 | u32 threshold; |
334 | } __attribute__ ((packed)); | 334 | } __packed; |
335 | 335 | ||
336 | 336 | ||
337 | enum acx_slot_type { | 337 | enum acx_slot_type { |
@@ -349,7 +349,7 @@ struct acx_slot { | |||
349 | u8 wone_index; /* Reserved */ | 349 | u8 wone_index; /* Reserved */ |
350 | u8 slot_time; | 350 | u8 slot_time; |
351 | u8 reserved[6]; | 351 | u8 reserved[6]; |
352 | } __attribute__ ((packed)); | 352 | } __packed; |
353 | 353 | ||
354 | 354 | ||
355 | #define ADDRESS_GROUP_MAX (8) | 355 | #define ADDRESS_GROUP_MAX (8) |
@@ -362,7 +362,7 @@ struct acx_dot11_grp_addr_tbl { | |||
362 | u8 num_groups; | 362 | u8 num_groups; |
363 | u8 pad[2]; | 363 | u8 pad[2]; |
364 | u8 mac_table[ADDRESS_GROUP_MAX_LEN]; | 364 | u8 mac_table[ADDRESS_GROUP_MAX_LEN]; |
365 | } __attribute__ ((packed)); | 365 | } __packed; |
366 | 366 | ||
367 | 367 | ||
368 | #define RX_TIMEOUT_PS_POLL_MIN 0 | 368 | #define RX_TIMEOUT_PS_POLL_MIN 0 |
@@ -388,7 +388,7 @@ struct acx_rx_timeout { | |||
388 | * from an UPSD enabled queue. | 388 | * from an UPSD enabled queue. |
389 | */ | 389 | */ |
390 | u16 upsd_timeout; | 390 | u16 upsd_timeout; |
391 | } __attribute__ ((packed)); | 391 | } __packed; |
392 | 392 | ||
393 | #define RTS_THRESHOLD_MIN 0 | 393 | #define RTS_THRESHOLD_MIN 0 |
394 | #define RTS_THRESHOLD_MAX 4096 | 394 | #define RTS_THRESHOLD_MAX 4096 |
@@ -399,7 +399,7 @@ struct acx_rts_threshold { | |||
399 | 399 | ||
400 | u16 threshold; | 400 | u16 threshold; |
401 | u8 pad[2]; | 401 | u8 pad[2]; |
402 | } __attribute__ ((packed)); | 402 | } __packed; |
403 | 403 | ||
404 | struct acx_beacon_filter_option { | 404 | struct acx_beacon_filter_option { |
405 | struct acx_header header; | 405 | struct acx_header header; |
@@ -415,7 +415,7 @@ struct acx_beacon_filter_option { | |||
415 | */ | 415 | */ |
416 | u8 max_num_beacons; | 416 | u8 max_num_beacons; |
417 | u8 pad[2]; | 417 | u8 pad[2]; |
418 | } __attribute__ ((packed)); | 418 | } __packed; |
419 | 419 | ||
420 | /* | 420 | /* |
421 | * ACXBeaconFilterEntry (not 221) | 421 | * ACXBeaconFilterEntry (not 221) |
@@ -461,7 +461,7 @@ struct acx_beacon_filter_ie_table { | |||
461 | u8 num_ie; | 461 | u8 num_ie; |
462 | u8 table[BEACON_FILTER_TABLE_MAX_SIZE]; | 462 | u8 table[BEACON_FILTER_TABLE_MAX_SIZE]; |
463 | u8 pad[3]; | 463 | u8 pad[3]; |
464 | } __attribute__ ((packed)); | 464 | } __packed; |
465 | 465 | ||
466 | #define SYNCH_FAIL_DEFAULT_THRESHOLD 10 /* number of beacons */ | 466 | #define SYNCH_FAIL_DEFAULT_THRESHOLD 10 /* number of beacons */ |
467 | #define NO_BEACON_DEFAULT_TIMEOUT (500) /* in microseconds */ | 467 | #define NO_BEACON_DEFAULT_TIMEOUT (500) /* in microseconds */ |
@@ -494,7 +494,7 @@ struct acx_bt_wlan_coex { | |||
494 | */ | 494 | */ |
495 | u8 enable; | 495 | u8 enable; |
496 | u8 pad[3]; | 496 | u8 pad[3]; |
497 | } __attribute__ ((packed)); | 497 | } __packed; |
498 | 498 | ||
499 | #define PTA_ANTENNA_TYPE_DEF (0) | 499 | #define PTA_ANTENNA_TYPE_DEF (0) |
500 | #define PTA_BT_HP_MAXTIME_DEF (2000) | 500 | #define PTA_BT_HP_MAXTIME_DEF (2000) |
@@ -648,7 +648,7 @@ struct acx_bt_wlan_coex_param { | |||
648 | 648 | ||
649 | /* range: 0 - 20 default: 1 */ | 649 | /* range: 0 - 20 default: 1 */ |
650 | u8 bt_hp_respected_num; | 650 | u8 bt_hp_respected_num; |
651 | } __attribute__ ((packed)); | 651 | } __packed; |
652 | 652 | ||
653 | #define CCA_THRSH_ENABLE_ENERGY_D 0x140A | 653 | #define CCA_THRSH_ENABLE_ENERGY_D 0x140A |
654 | #define CCA_THRSH_DISABLE_ENERGY_D 0xFFEF | 654 | #define CCA_THRSH_DISABLE_ENERGY_D 0xFFEF |
@@ -660,7 +660,7 @@ struct acx_energy_detection { | |||
660 | u16 rx_cca_threshold; | 660 | u16 rx_cca_threshold; |
661 | u8 tx_energy_detection; | 661 | u8 tx_energy_detection; |
662 | u8 pad; | 662 | u8 pad; |
663 | } __attribute__ ((packed)); | 663 | } __packed; |
664 | 664 | ||
665 | #define BCN_RX_TIMEOUT_DEF_VALUE 10000 | 665 | #define BCN_RX_TIMEOUT_DEF_VALUE 10000 |
666 | #define BROADCAST_RX_TIMEOUT_DEF_VALUE 20000 | 666 | #define BROADCAST_RX_TIMEOUT_DEF_VALUE 20000 |
@@ -679,14 +679,14 @@ struct acx_beacon_broadcast { | |||
679 | /* Consecutive PS Poll failures before updating the host */ | 679 | /* Consecutive PS Poll failures before updating the host */ |
680 | u8 ps_poll_threshold; | 680 | u8 ps_poll_threshold; |
681 | u8 pad[2]; | 681 | u8 pad[2]; |
682 | } __attribute__ ((packed)); | 682 | } __packed; |
683 | 683 | ||
684 | struct acx_event_mask { | 684 | struct acx_event_mask { |
685 | struct acx_header header; | 685 | struct acx_header header; |
686 | 686 | ||
687 | u32 event_mask; | 687 | u32 event_mask; |
688 | u32 high_event_mask; /* Unused */ | 688 | u32 high_event_mask; /* Unused */ |
689 | } __attribute__ ((packed)); | 689 | } __packed; |
690 | 690 | ||
691 | #define CFG_RX_FCS BIT(2) | 691 | #define CFG_RX_FCS BIT(2) |
692 | #define CFG_RX_ALL_GOOD BIT(3) | 692 | #define CFG_RX_ALL_GOOD BIT(3) |
@@ -729,7 +729,7 @@ struct acx_fw_gen_frame_rates { | |||
729 | u8 tx_ctrl_frame_mod; /* CCK_* or PBCC_* */ | 729 | u8 tx_ctrl_frame_mod; /* CCK_* or PBCC_* */ |
730 | u8 tx_mgt_frame_rate; | 730 | u8 tx_mgt_frame_rate; |
731 | u8 tx_mgt_frame_mod; | 731 | u8 tx_mgt_frame_mod; |
732 | } __attribute__ ((packed)); | 732 | } __packed; |
733 | 733 | ||
734 | /* STA MAC */ | 734 | /* STA MAC */ |
735 | struct acx_dot11_station_id { | 735 | struct acx_dot11_station_id { |
@@ -737,28 +737,28 @@ struct acx_dot11_station_id { | |||
737 | 737 | ||
738 | u8 mac[ETH_ALEN]; | 738 | u8 mac[ETH_ALEN]; |
739 | u8 pad[2]; | 739 | u8 pad[2]; |
740 | } __attribute__ ((packed)); | 740 | } __packed; |
741 | 741 | ||
742 | struct acx_feature_config { | 742 | struct acx_feature_config { |
743 | struct acx_header header; | 743 | struct acx_header header; |
744 | 744 | ||
745 | u32 options; | 745 | u32 options; |
746 | u32 data_flow_options; | 746 | u32 data_flow_options; |
747 | } __attribute__ ((packed)); | 747 | } __packed; |
748 | 748 | ||
749 | struct acx_current_tx_power { | 749 | struct acx_current_tx_power { |
750 | struct acx_header header; | 750 | struct acx_header header; |
751 | 751 | ||
752 | u8 current_tx_power; | 752 | u8 current_tx_power; |
753 | u8 padding[3]; | 753 | u8 padding[3]; |
754 | } __attribute__ ((packed)); | 754 | } __packed; |
755 | 755 | ||
756 | struct acx_dot11_default_key { | 756 | struct acx_dot11_default_key { |
757 | struct acx_header header; | 757 | struct acx_header header; |
758 | 758 | ||
759 | u8 id; | 759 | u8 id; |
760 | u8 pad[3]; | 760 | u8 pad[3]; |
761 | } __attribute__ ((packed)); | 761 | } __packed; |
762 | 762 | ||
763 | struct acx_tsf_info { | 763 | struct acx_tsf_info { |
764 | struct acx_header header; | 764 | struct acx_header header; |
@@ -769,7 +769,7 @@ struct acx_tsf_info { | |||
769 | u32 last_TBTT_lsb; | 769 | u32 last_TBTT_lsb; |
770 | u8 last_dtim_count; | 770 | u8 last_dtim_count; |
771 | u8 pad[3]; | 771 | u8 pad[3]; |
772 | } __attribute__ ((packed)); | 772 | } __packed; |
773 | 773 | ||
774 | enum acx_wake_up_event { | 774 | enum acx_wake_up_event { |
775 | WAKE_UP_EVENT_BEACON_BITMAP = 0x01, /* Wake on every Beacon*/ | 775 | WAKE_UP_EVENT_BEACON_BITMAP = 0x01, /* Wake on every Beacon*/ |
@@ -785,7 +785,7 @@ struct acx_wake_up_condition { | |||
785 | u8 wake_up_event; /* Only one bit can be set */ | 785 | u8 wake_up_event; /* Only one bit can be set */ |
786 | u8 listen_interval; | 786 | u8 listen_interval; |
787 | u8 pad[2]; | 787 | u8 pad[2]; |
788 | } __attribute__ ((packed)); | 788 | } __packed; |
789 | 789 | ||
790 | struct acx_aid { | 790 | struct acx_aid { |
791 | struct acx_header header; | 791 | struct acx_header header; |
@@ -795,7 +795,7 @@ struct acx_aid { | |||
795 | */ | 795 | */ |
796 | u16 aid; | 796 | u16 aid; |
797 | u8 pad[2]; | 797 | u8 pad[2]; |
798 | } __attribute__ ((packed)); | 798 | } __packed; |
799 | 799 | ||
800 | enum acx_preamble_type { | 800 | enum acx_preamble_type { |
801 | ACX_PREAMBLE_LONG = 0, | 801 | ACX_PREAMBLE_LONG = 0, |
@@ -811,7 +811,7 @@ struct acx_preamble { | |||
811 | */ | 811 | */ |
812 | u8 preamble; | 812 | u8 preamble; |
813 | u8 padding[3]; | 813 | u8 padding[3]; |
814 | } __attribute__ ((packed)); | 814 | } __packed; |
815 | 815 | ||
816 | enum acx_ctsprotect_type { | 816 | enum acx_ctsprotect_type { |
817 | CTSPROTECT_DISABLE = 0, | 817 | CTSPROTECT_DISABLE = 0, |
@@ -822,11 +822,11 @@ struct acx_ctsprotect { | |||
822 | struct acx_header header; | 822 | struct acx_header header; |
823 | u8 ctsprotect; | 823 | u8 ctsprotect; |
824 | u8 padding[3]; | 824 | u8 padding[3]; |
825 | } __attribute__ ((packed)); | 825 | } __packed; |
826 | 826 | ||
827 | struct acx_tx_statistics { | 827 | struct acx_tx_statistics { |
828 | u32 internal_desc_overflow; | 828 | u32 internal_desc_overflow; |
829 | } __attribute__ ((packed)); | 829 | } __packed; |
830 | 830 | ||
831 | struct acx_rx_statistics { | 831 | struct acx_rx_statistics { |
832 | u32 out_of_mem; | 832 | u32 out_of_mem; |
@@ -837,14 +837,14 @@ struct acx_rx_statistics { | |||
837 | u32 xfr_hint_trig; | 837 | u32 xfr_hint_trig; |
838 | u32 path_reset; | 838 | u32 path_reset; |
839 | u32 reset_counter; | 839 | u32 reset_counter; |
840 | } __attribute__ ((packed)); | 840 | } __packed; |
841 | 841 | ||
842 | struct acx_dma_statistics { | 842 | struct acx_dma_statistics { |
843 | u32 rx_requested; | 843 | u32 rx_requested; |
844 | u32 rx_errors; | 844 | u32 rx_errors; |
845 | u32 tx_requested; | 845 | u32 tx_requested; |
846 | u32 tx_errors; | 846 | u32 tx_errors; |
847 | } __attribute__ ((packed)); | 847 | } __packed; |
848 | 848 | ||
849 | struct acx_isr_statistics { | 849 | struct acx_isr_statistics { |
850 | /* host command complete */ | 850 | /* host command complete */ |
@@ -903,7 +903,7 @@ struct acx_isr_statistics { | |||
903 | 903 | ||
904 | /* (INT_STS_ND & INT_TRIG_LOW_RSSI) */ | 904 | /* (INT_STS_ND & INT_TRIG_LOW_RSSI) */ |
905 | u32 low_rssi; | 905 | u32 low_rssi; |
906 | } __attribute__ ((packed)); | 906 | } __packed; |
907 | 907 | ||
908 | struct acx_wep_statistics { | 908 | struct acx_wep_statistics { |
909 | /* WEP address keys configured */ | 909 | /* WEP address keys configured */ |
@@ -925,7 +925,7 @@ struct acx_wep_statistics { | |||
925 | 925 | ||
926 | /* WEP decrypt interrupts */ | 926 | /* WEP decrypt interrupts */ |
927 | u32 interrupt; | 927 | u32 interrupt; |
928 | } __attribute__ ((packed)); | 928 | } __packed; |
929 | 929 | ||
930 | #define ACX_MISSED_BEACONS_SPREAD 10 | 930 | #define ACX_MISSED_BEACONS_SPREAD 10 |
931 | 931 | ||
@@ -985,12 +985,12 @@ struct acx_pwr_statistics { | |||
985 | 985 | ||
986 | /* the number of beacons in awake mode */ | 986 | /* the number of beacons in awake mode */ |
987 | u32 rcvd_awake_beacons; | 987 | u32 rcvd_awake_beacons; |
988 | } __attribute__ ((packed)); | 988 | } __packed; |
989 | 989 | ||
990 | struct acx_mic_statistics { | 990 | struct acx_mic_statistics { |
991 | u32 rx_pkts; | 991 | u32 rx_pkts; |
992 | u32 calc_failure; | 992 | u32 calc_failure; |
993 | } __attribute__ ((packed)); | 993 | } __packed; |
994 | 994 | ||
995 | struct acx_aes_statistics { | 995 | struct acx_aes_statistics { |
996 | u32 encrypt_fail; | 996 | u32 encrypt_fail; |
@@ -999,7 +999,7 @@ struct acx_aes_statistics { | |||
999 | u32 decrypt_packets; | 999 | u32 decrypt_packets; |
1000 | u32 encrypt_interrupt; | 1000 | u32 encrypt_interrupt; |
1001 | u32 decrypt_interrupt; | 1001 | u32 decrypt_interrupt; |
1002 | } __attribute__ ((packed)); | 1002 | } __packed; |
1003 | 1003 | ||
1004 | struct acx_event_statistics { | 1004 | struct acx_event_statistics { |
1005 | u32 heart_beat; | 1005 | u32 heart_beat; |
@@ -1010,7 +1010,7 @@ struct acx_event_statistics { | |||
1010 | u32 oom_late; | 1010 | u32 oom_late; |
1011 | u32 phy_transmit_error; | 1011 | u32 phy_transmit_error; |
1012 | u32 tx_stuck; | 1012 | u32 tx_stuck; |
1013 | } __attribute__ ((packed)); | 1013 | } __packed; |
1014 | 1014 | ||
1015 | struct acx_ps_statistics { | 1015 | struct acx_ps_statistics { |
1016 | u32 pspoll_timeouts; | 1016 | u32 pspoll_timeouts; |
@@ -1020,7 +1020,7 @@ struct acx_ps_statistics { | |||
1020 | u32 pspoll_max_apturn; | 1020 | u32 pspoll_max_apturn; |
1021 | u32 pspoll_utilization; | 1021 | u32 pspoll_utilization; |
1022 | u32 upsd_utilization; | 1022 | u32 upsd_utilization; |
1023 | } __attribute__ ((packed)); | 1023 | } __packed; |
1024 | 1024 | ||
1025 | struct acx_rxpipe_statistics { | 1025 | struct acx_rxpipe_statistics { |
1026 | u32 rx_prep_beacon_drop; | 1026 | u32 rx_prep_beacon_drop; |
@@ -1028,7 +1028,7 @@ struct acx_rxpipe_statistics { | |||
1028 | u32 beacon_buffer_thres_host_int_trig_rx_data; | 1028 | u32 beacon_buffer_thres_host_int_trig_rx_data; |
1029 | u32 missed_beacon_host_int_trig_rx_data; | 1029 | u32 missed_beacon_host_int_trig_rx_data; |
1030 | u32 tx_xfr_host_int_trig_rx_data; | 1030 | u32 tx_xfr_host_int_trig_rx_data; |
1031 | } __attribute__ ((packed)); | 1031 | } __packed; |
1032 | 1032 | ||
1033 | struct acx_statistics { | 1033 | struct acx_statistics { |
1034 | struct acx_header header; | 1034 | struct acx_header header; |
@@ -1044,7 +1044,7 @@ struct acx_statistics { | |||
1044 | struct acx_event_statistics event; | 1044 | struct acx_event_statistics event; |
1045 | struct acx_ps_statistics ps; | 1045 | struct acx_ps_statistics ps; |
1046 | struct acx_rxpipe_statistics rxpipe; | 1046 | struct acx_rxpipe_statistics rxpipe; |
1047 | } __attribute__ ((packed)); | 1047 | } __packed; |
1048 | 1048 | ||
1049 | #define ACX_MAX_RATE_CLASSES 8 | 1049 | #define ACX_MAX_RATE_CLASSES 8 |
1050 | #define ACX_RATE_MASK_UNSPECIFIED 0 | 1050 | #define ACX_RATE_MASK_UNSPECIFIED 0 |
@@ -1063,7 +1063,7 @@ struct acx_rate_policy { | |||
1063 | 1063 | ||
1064 | u32 rate_class_cnt; | 1064 | u32 rate_class_cnt; |
1065 | struct acx_rate_class rate_class[ACX_MAX_RATE_CLASSES]; | 1065 | struct acx_rate_class rate_class[ACX_MAX_RATE_CLASSES]; |
1066 | } __attribute__ ((packed)); | 1066 | } __packed; |
1067 | 1067 | ||
1068 | struct wl1251_acx_memory { | 1068 | struct wl1251_acx_memory { |
1069 | __le16 num_stations; /* number of STAs to be supported. */ | 1069 | __le16 num_stations; /* number of STAs to be supported. */ |
@@ -1082,7 +1082,7 @@ struct wl1251_acx_memory { | |||
1082 | u8 tx_min_mem_block_num; | 1082 | u8 tx_min_mem_block_num; |
1083 | u8 num_ssid_profiles; | 1083 | u8 num_ssid_profiles; |
1084 | __le16 debug_buffer_size; | 1084 | __le16 debug_buffer_size; |
1085 | } __attribute__ ((packed)); | 1085 | } __packed; |
1086 | 1086 | ||
1087 | 1087 | ||
1088 | #define ACX_RX_DESC_MIN 1 | 1088 | #define ACX_RX_DESC_MIN 1 |
@@ -1094,7 +1094,7 @@ struct wl1251_acx_rx_queue_config { | |||
1094 | u8 type; | 1094 | u8 type; |
1095 | u8 priority; | 1095 | u8 priority; |
1096 | __le32 dma_address; | 1096 | __le32 dma_address; |
1097 | } __attribute__ ((packed)); | 1097 | } __packed; |
1098 | 1098 | ||
1099 | #define ACX_TX_DESC_MIN 1 | 1099 | #define ACX_TX_DESC_MIN 1 |
1100 | #define ACX_TX_DESC_MAX 127 | 1100 | #define ACX_TX_DESC_MAX 127 |
@@ -1103,7 +1103,7 @@ struct wl1251_acx_tx_queue_config { | |||
1103 | u8 num_descs; | 1103 | u8 num_descs; |
1104 | u8 pad[2]; | 1104 | u8 pad[2]; |
1105 | u8 attributes; | 1105 | u8 attributes; |
1106 | } __attribute__ ((packed)); | 1106 | } __packed; |
1107 | 1107 | ||
1108 | #define MAX_TX_QUEUE_CONFIGS 5 | 1108 | #define MAX_TX_QUEUE_CONFIGS 5 |
1109 | #define MAX_TX_QUEUES 4 | 1109 | #define MAX_TX_QUEUES 4 |
@@ -1113,7 +1113,7 @@ struct wl1251_acx_config_memory { | |||
1113 | struct wl1251_acx_memory mem_config; | 1113 | struct wl1251_acx_memory mem_config; |
1114 | struct wl1251_acx_rx_queue_config rx_queue_config; | 1114 | struct wl1251_acx_rx_queue_config rx_queue_config; |
1115 | struct wl1251_acx_tx_queue_config tx_queue_config[MAX_TX_QUEUE_CONFIGS]; | 1115 | struct wl1251_acx_tx_queue_config tx_queue_config[MAX_TX_QUEUE_CONFIGS]; |
1116 | } __attribute__ ((packed)); | 1116 | } __packed; |
1117 | 1117 | ||
1118 | struct wl1251_acx_mem_map { | 1118 | struct wl1251_acx_mem_map { |
1119 | struct acx_header header; | 1119 | struct acx_header header; |
@@ -1147,7 +1147,7 @@ struct wl1251_acx_mem_map { | |||
1147 | 1147 | ||
1148 | /* Number of blocks FW allocated for RX packets */ | 1148 | /* Number of blocks FW allocated for RX packets */ |
1149 | u32 num_rx_mem_blocks; | 1149 | u32 num_rx_mem_blocks; |
1150 | } __attribute__ ((packed)); | 1150 | } __packed; |
1151 | 1151 | ||
1152 | 1152 | ||
1153 | struct wl1251_acx_wr_tbtt_and_dtim { | 1153 | struct wl1251_acx_wr_tbtt_and_dtim { |
@@ -1164,7 +1164,7 @@ struct wl1251_acx_wr_tbtt_and_dtim { | |||
1164 | */ | 1164 | */ |
1165 | u8 dtim; | 1165 | u8 dtim; |
1166 | u8 padding; | 1166 | u8 padding; |
1167 | } __attribute__ ((packed)); | 1167 | } __packed; |
1168 | 1168 | ||
1169 | struct wl1251_acx_ac_cfg { | 1169 | struct wl1251_acx_ac_cfg { |
1170 | struct acx_header header; | 1170 | struct acx_header header; |
@@ -1194,7 +1194,7 @@ struct wl1251_acx_ac_cfg { | |||
1194 | 1194 | ||
1195 | /* The TX Op Limit (in microseconds) for the access class. */ | 1195 | /* The TX Op Limit (in microseconds) for the access class. */ |
1196 | u16 txop_limit; | 1196 | u16 txop_limit; |
1197 | } __attribute__ ((packed)); | 1197 | } __packed; |
1198 | 1198 | ||
1199 | 1199 | ||
1200 | enum wl1251_acx_channel_type { | 1200 | enum wl1251_acx_channel_type { |
@@ -1245,7 +1245,7 @@ struct wl1251_acx_tid_cfg { | |||
1245 | 1245 | ||
1246 | /* not supported */ | 1246 | /* not supported */ |
1247 | u32 apsdconf[2]; | 1247 | u32 apsdconf[2]; |
1248 | } __attribute__ ((packed)); | 1248 | } __packed; |
1249 | 1249 | ||
1250 | /************************************************************************* | 1250 | /************************************************************************* |
1251 | 1251 | ||
diff --git a/drivers/net/wireless/wl12xx/wl1251_cmd.h b/drivers/net/wireless/wl12xx/wl1251_cmd.h index 4ad67cae94d2..7e70dd5a21b8 100644 --- a/drivers/net/wireless/wl12xx/wl1251_cmd.h +++ b/drivers/net/wireless/wl12xx/wl1251_cmd.h | |||
@@ -106,7 +106,7 @@ struct wl1251_cmd_header { | |||
106 | u16 status; | 106 | u16 status; |
107 | /* payload */ | 107 | /* payload */ |
108 | u8 data[0]; | 108 | u8 data[0]; |
109 | } __attribute__ ((packed)); | 109 | } __packed; |
110 | 110 | ||
111 | struct wl1251_command { | 111 | struct wl1251_command { |
112 | struct wl1251_cmd_header header; | 112 | struct wl1251_cmd_header header; |
@@ -201,7 +201,7 @@ struct wl1251_scan_parameters { | |||
201 | u8 ssid_len; | 201 | u8 ssid_len; |
202 | u8 ssid[32]; | 202 | u8 ssid[32]; |
203 | 203 | ||
204 | } __attribute__ ((packed)); | 204 | } __packed; |
205 | 205 | ||
206 | struct wl1251_scan_ch_parameters { | 206 | struct wl1251_scan_ch_parameters { |
207 | u32 min_duration; /* in TU */ | 207 | u32 min_duration; /* in TU */ |
@@ -218,7 +218,7 @@ struct wl1251_scan_ch_parameters { | |||
218 | u8 tx_power_att; | 218 | u8 tx_power_att; |
219 | u8 channel; | 219 | u8 channel; |
220 | u8 pad[3]; | 220 | u8 pad[3]; |
221 | } __attribute__ ((packed)); | 221 | } __packed; |
222 | 222 | ||
223 | /* SCAN parameters */ | 223 | /* SCAN parameters */ |
224 | #define SCAN_MAX_NUM_OF_CHANNELS 16 | 224 | #define SCAN_MAX_NUM_OF_CHANNELS 16 |
@@ -228,7 +228,7 @@ struct wl1251_cmd_scan { | |||
228 | 228 | ||
229 | struct wl1251_scan_parameters params; | 229 | struct wl1251_scan_parameters params; |
230 | struct wl1251_scan_ch_parameters channels[SCAN_MAX_NUM_OF_CHANNELS]; | 230 | struct wl1251_scan_ch_parameters channels[SCAN_MAX_NUM_OF_CHANNELS]; |
231 | } __attribute__ ((packed)); | 231 | } __packed; |
232 | 232 | ||
233 | enum { | 233 | enum { |
234 | BSS_TYPE_IBSS = 0, | 234 | BSS_TYPE_IBSS = 0, |
@@ -276,14 +276,14 @@ struct cmd_join { | |||
276 | u8 tx_mgt_frame_rate; /* OBSOLETE */ | 276 | u8 tx_mgt_frame_rate; /* OBSOLETE */ |
277 | u8 tx_mgt_frame_mod; /* OBSOLETE */ | 277 | u8 tx_mgt_frame_mod; /* OBSOLETE */ |
278 | u8 reserved; | 278 | u8 reserved; |
279 | } __attribute__ ((packed)); | 279 | } __packed; |
280 | 280 | ||
281 | struct cmd_enabledisable_path { | 281 | struct cmd_enabledisable_path { |
282 | struct wl1251_cmd_header header; | 282 | struct wl1251_cmd_header header; |
283 | 283 | ||
284 | u8 channel; | 284 | u8 channel; |
285 | u8 padding[3]; | 285 | u8 padding[3]; |
286 | } __attribute__ ((packed)); | 286 | } __packed; |
287 | 287 | ||
288 | #define WL1251_MAX_TEMPLATE_SIZE 300 | 288 | #define WL1251_MAX_TEMPLATE_SIZE 300 |
289 | 289 | ||
@@ -292,7 +292,7 @@ struct wl1251_cmd_packet_template { | |||
292 | 292 | ||
293 | __le16 size; | 293 | __le16 size; |
294 | u8 data[0]; | 294 | u8 data[0]; |
295 | } __attribute__ ((packed)); | 295 | } __packed; |
296 | 296 | ||
297 | #define TIM_ELE_ID 5 | 297 | #define TIM_ELE_ID 5 |
298 | #define PARTIAL_VBM_MAX 251 | 298 | #define PARTIAL_VBM_MAX 251 |
@@ -304,7 +304,7 @@ struct wl1251_tim { | |||
304 | u8 dtim_period; | 304 | u8 dtim_period; |
305 | u8 bitmap_ctrl; | 305 | u8 bitmap_ctrl; |
306 | u8 pvb_field[PARTIAL_VBM_MAX]; /* Partial Virtual Bitmap */ | 306 | u8 pvb_field[PARTIAL_VBM_MAX]; /* Partial Virtual Bitmap */ |
307 | } __attribute__ ((packed)); | 307 | } __packed; |
308 | 308 | ||
309 | /* Virtual Bit Map update */ | 309 | /* Virtual Bit Map update */ |
310 | struct wl1251_cmd_vbm_update { | 310 | struct wl1251_cmd_vbm_update { |
@@ -312,7 +312,7 @@ struct wl1251_cmd_vbm_update { | |||
312 | __le16 len; | 312 | __le16 len; |
313 | u8 padding[2]; | 313 | u8 padding[2]; |
314 | struct wl1251_tim tim; | 314 | struct wl1251_tim tim; |
315 | } __attribute__ ((packed)); | 315 | } __packed; |
316 | 316 | ||
317 | enum wl1251_cmd_ps_mode { | 317 | enum wl1251_cmd_ps_mode { |
318 | STATION_ACTIVE_MODE, | 318 | STATION_ACTIVE_MODE, |
@@ -333,7 +333,7 @@ struct wl1251_cmd_ps_params { | |||
333 | u8 hang_over_period; | 333 | u8 hang_over_period; |
334 | u16 null_data_rate; | 334 | u16 null_data_rate; |
335 | u8 pad[2]; | 335 | u8 pad[2]; |
336 | } __attribute__ ((packed)); | 336 | } __packed; |
337 | 337 | ||
338 | struct wl1251_cmd_trigger_scan_to { | 338 | struct wl1251_cmd_trigger_scan_to { |
339 | struct wl1251_cmd_header header; | 339 | struct wl1251_cmd_header header; |
@@ -411,7 +411,7 @@ struct wl1251_cmd_set_keys { | |||
411 | u8 key[MAX_KEY_SIZE]; | 411 | u8 key[MAX_KEY_SIZE]; |
412 | u16 ac_seq_num16[NUM_ACCESS_CATEGORIES_COPY]; | 412 | u16 ac_seq_num16[NUM_ACCESS_CATEGORIES_COPY]; |
413 | u32 ac_seq_num32[NUM_ACCESS_CATEGORIES_COPY]; | 413 | u32 ac_seq_num32[NUM_ACCESS_CATEGORIES_COPY]; |
414 | } __attribute__ ((packed)); | 414 | } __packed; |
415 | 415 | ||
416 | 416 | ||
417 | #endif /* __WL1251_CMD_H__ */ | 417 | #endif /* __WL1251_CMD_H__ */ |
diff --git a/drivers/net/wireless/wl12xx/wl1251_event.h b/drivers/net/wireless/wl12xx/wl1251_event.h index be0ac54d6246..f48a2b66bc5a 100644 --- a/drivers/net/wireless/wl12xx/wl1251_event.h +++ b/drivers/net/wireless/wl12xx/wl1251_event.h | |||
@@ -82,7 +82,7 @@ struct event_debug_report { | |||
82 | u32 report_1; | 82 | u32 report_1; |
83 | u32 report_2; | 83 | u32 report_2; |
84 | u32 report_3; | 84 | u32 report_3; |
85 | } __attribute__ ((packed)); | 85 | } __packed; |
86 | 86 | ||
87 | struct event_mailbox { | 87 | struct event_mailbox { |
88 | u32 events_vector; | 88 | u32 events_vector; |
@@ -112,7 +112,7 @@ struct event_mailbox { | |||
112 | struct event_debug_report report; | 112 | struct event_debug_report report; |
113 | u8 average_snr_level; | 113 | u8 average_snr_level; |
114 | u8 padding[19]; | 114 | u8 padding[19]; |
115 | } __attribute__ ((packed)); | 115 | } __packed; |
116 | 116 | ||
117 | int wl1251_event_unmask(struct wl1251 *wl); | 117 | int wl1251_event_unmask(struct wl1251 *wl); |
118 | void wl1251_event_mbox_config(struct wl1251 *wl); | 118 | void wl1251_event_mbox_config(struct wl1251 *wl); |
diff --git a/drivers/net/wireless/wl12xx/wl1251_rx.h b/drivers/net/wireless/wl12xx/wl1251_rx.h index 563a3fde40fb..da4e53406a0e 100644 --- a/drivers/net/wireless/wl12xx/wl1251_rx.h +++ b/drivers/net/wireless/wl12xx/wl1251_rx.h | |||
@@ -117,7 +117,7 @@ struct wl1251_rx_descriptor { | |||
117 | s8 rssi; /* in dB */ | 117 | s8 rssi; /* in dB */ |
118 | u8 rcpi; /* in dB */ | 118 | u8 rcpi; /* in dB */ |
119 | u8 snr; /* in dB */ | 119 | u8 snr; /* in dB */ |
120 | } __attribute__ ((packed)); | 120 | } __packed; |
121 | 121 | ||
122 | void wl1251_rx(struct wl1251 *wl); | 122 | void wl1251_rx(struct wl1251 *wl); |
123 | 123 | ||
diff --git a/drivers/net/wireless/wl12xx/wl1251_tx.h b/drivers/net/wireless/wl12xx/wl1251_tx.h index 55856c6bb97a..65c4be8c2e80 100644 --- a/drivers/net/wireless/wl12xx/wl1251_tx.h +++ b/drivers/net/wireless/wl12xx/wl1251_tx.h | |||
@@ -109,7 +109,7 @@ struct tx_control { | |||
109 | unsigned xfer_pad:1; | 109 | unsigned xfer_pad:1; |
110 | 110 | ||
111 | unsigned reserved:7; | 111 | unsigned reserved:7; |
112 | } __attribute__ ((packed)); | 112 | } __packed; |
113 | 113 | ||
114 | 114 | ||
115 | struct tx_double_buffer_desc { | 115 | struct tx_double_buffer_desc { |
@@ -156,7 +156,7 @@ struct tx_double_buffer_desc { | |||
156 | u8 num_mem_blocks; | 156 | u8 num_mem_blocks; |
157 | 157 | ||
158 | u8 reserved; | 158 | u8 reserved; |
159 | } __attribute__ ((packed)); | 159 | } __packed; |
160 | 160 | ||
161 | enum { | 161 | enum { |
162 | TX_SUCCESS = 0, | 162 | TX_SUCCESS = 0, |
@@ -208,7 +208,7 @@ struct tx_result { | |||
208 | 208 | ||
209 | /* See done_1 */ | 209 | /* See done_1 */ |
210 | u8 done_2; | 210 | u8 done_2; |
211 | } __attribute__ ((packed)); | 211 | } __packed; |
212 | 212 | ||
213 | static inline int wl1251_tx_get_queue(int queue) | 213 | static inline int wl1251_tx_get_queue(int queue) |
214 | { | 214 | { |
diff --git a/drivers/net/wireless/wl12xx/wl1271.h b/drivers/net/wireless/wl12xx/wl1271.h index 6f1b6b5640c0..9af14646c278 100644 --- a/drivers/net/wireless/wl12xx/wl1271.h +++ b/drivers/net/wireless/wl12xx/wl1271.h | |||
@@ -141,7 +141,7 @@ struct wl1271_nvs_file { | |||
141 | u8 dyn_radio_params[WL1271_NVS_FEM_COUNT] | 141 | u8 dyn_radio_params[WL1271_NVS_FEM_COUNT] |
142 | [WL1271_NVS_DYN_RADIO_PARAMS_SIZE_PADDED]; | 142 | [WL1271_NVS_DYN_RADIO_PARAMS_SIZE_PADDED]; |
143 | u8 ini_spare[WL1271_NVS_INI_SPARE_SIZE]; | 143 | u8 ini_spare[WL1271_NVS_INI_SPARE_SIZE]; |
144 | } __attribute__ ((packed)); | 144 | } __packed; |
145 | 145 | ||
146 | /* | 146 | /* |
147 | * Enable/disable 802.11a support for WL1273 | 147 | * Enable/disable 802.11a support for WL1273 |
@@ -317,7 +317,7 @@ struct wl1271_fw_status { | |||
317 | __le32 tx_released_blks[NUM_TX_QUEUES]; | 317 | __le32 tx_released_blks[NUM_TX_QUEUES]; |
318 | __le32 fw_localtime; | 318 | __le32 fw_localtime; |
319 | __le32 padding[2]; | 319 | __le32 padding[2]; |
320 | } __attribute__ ((packed)); | 320 | } __packed; |
321 | 321 | ||
322 | struct wl1271_rx_mem_pool_addr { | 322 | struct wl1271_rx_mem_pool_addr { |
323 | u32 addr; | 323 | u32 addr; |
diff --git a/drivers/net/wireless/wl12xx/wl1271_acx.h b/drivers/net/wireless/wl12xx/wl1271_acx.h index 420e7e2fc021..4c87e601df2f 100644 --- a/drivers/net/wireless/wl12xx/wl1271_acx.h +++ b/drivers/net/wireless/wl12xx/wl1271_acx.h | |||
@@ -75,7 +75,7 @@ struct acx_header { | |||
75 | 75 | ||
76 | /* payload length (not including headers */ | 76 | /* payload length (not including headers */ |
77 | __le16 len; | 77 | __le16 len; |
78 | } __attribute__ ((packed)); | 78 | } __packed; |
79 | 79 | ||
80 | struct acx_error_counter { | 80 | struct acx_error_counter { |
81 | struct acx_header header; | 81 | struct acx_header header; |
@@ -98,7 +98,7 @@ struct acx_error_counter { | |||
98 | /* the number of missed sequence numbers in the squentially */ | 98 | /* the number of missed sequence numbers in the squentially */ |
99 | /* values of frames seq numbers */ | 99 | /* values of frames seq numbers */ |
100 | __le32 seq_num_miss; | 100 | __le32 seq_num_miss; |
101 | } __attribute__ ((packed)); | 101 | } __packed; |
102 | 102 | ||
103 | struct acx_revision { | 103 | struct acx_revision { |
104 | struct acx_header header; | 104 | struct acx_header header; |
@@ -127,7 +127,7 @@ struct acx_revision { | |||
127 | * bits 24 - 31: Chip ID - The WiLink chip ID. | 127 | * bits 24 - 31: Chip ID - The WiLink chip ID. |
128 | */ | 128 | */ |
129 | __le32 hw_version; | 129 | __le32 hw_version; |
130 | } __attribute__ ((packed)); | 130 | } __packed; |
131 | 131 | ||
132 | enum wl1271_psm_mode { | 132 | enum wl1271_psm_mode { |
133 | /* Active mode */ | 133 | /* Active mode */ |
@@ -149,7 +149,7 @@ struct acx_sleep_auth { | |||
149 | /* 2 - ELP mode: Deep / Max sleep*/ | 149 | /* 2 - ELP mode: Deep / Max sleep*/ |
150 | u8 sleep_auth; | 150 | u8 sleep_auth; |
151 | u8 padding[3]; | 151 | u8 padding[3]; |
152 | } __attribute__ ((packed)); | 152 | } __packed; |
153 | 153 | ||
154 | enum { | 154 | enum { |
155 | HOSTIF_PCI_MASTER_HOST_INDIRECT, | 155 | HOSTIF_PCI_MASTER_HOST_INDIRECT, |
@@ -187,7 +187,7 @@ struct acx_rx_msdu_lifetime { | |||
187 | * firmware discards the MSDU. | 187 | * firmware discards the MSDU. |
188 | */ | 188 | */ |
189 | __le32 lifetime; | 189 | __le32 lifetime; |
190 | } __attribute__ ((packed)); | 190 | } __packed; |
191 | 191 | ||
192 | /* | 192 | /* |
193 | * RX Config Options Table | 193 | * RX Config Options Table |
@@ -275,13 +275,13 @@ struct acx_rx_config { | |||
275 | 275 | ||
276 | __le32 config_options; | 276 | __le32 config_options; |
277 | __le32 filter_options; | 277 | __le32 filter_options; |
278 | } __attribute__ ((packed)); | 278 | } __packed; |
279 | 279 | ||
280 | struct acx_packet_detection { | 280 | struct acx_packet_detection { |
281 | struct acx_header header; | 281 | struct acx_header header; |
282 | 282 | ||
283 | __le32 threshold; | 283 | __le32 threshold; |
284 | } __attribute__ ((packed)); | 284 | } __packed; |
285 | 285 | ||
286 | 286 | ||
287 | enum acx_slot_type { | 287 | enum acx_slot_type { |
@@ -299,7 +299,7 @@ struct acx_slot { | |||
299 | u8 wone_index; /* Reserved */ | 299 | u8 wone_index; /* Reserved */ |
300 | u8 slot_time; | 300 | u8 slot_time; |
301 | u8 reserved[6]; | 301 | u8 reserved[6]; |
302 | } __attribute__ ((packed)); | 302 | } __packed; |
303 | 303 | ||
304 | 304 | ||
305 | #define ACX_MC_ADDRESS_GROUP_MAX (8) | 305 | #define ACX_MC_ADDRESS_GROUP_MAX (8) |
@@ -312,21 +312,21 @@ struct acx_dot11_grp_addr_tbl { | |||
312 | u8 num_groups; | 312 | u8 num_groups; |
313 | u8 pad[2]; | 313 | u8 pad[2]; |
314 | u8 mac_table[ADDRESS_GROUP_MAX_LEN]; | 314 | u8 mac_table[ADDRESS_GROUP_MAX_LEN]; |
315 | } __attribute__ ((packed)); | 315 | } __packed; |
316 | 316 | ||
317 | struct acx_rx_timeout { | 317 | struct acx_rx_timeout { |
318 | struct acx_header header; | 318 | struct acx_header header; |
319 | 319 | ||
320 | __le16 ps_poll_timeout; | 320 | __le16 ps_poll_timeout; |
321 | __le16 upsd_timeout; | 321 | __le16 upsd_timeout; |
322 | } __attribute__ ((packed)); | 322 | } __packed; |
323 | 323 | ||
324 | struct acx_rts_threshold { | 324 | struct acx_rts_threshold { |
325 | struct acx_header header; | 325 | struct acx_header header; |
326 | 326 | ||
327 | __le16 threshold; | 327 | __le16 threshold; |
328 | u8 pad[2]; | 328 | u8 pad[2]; |
329 | } __attribute__ ((packed)); | 329 | } __packed; |
330 | 330 | ||
331 | struct acx_beacon_filter_option { | 331 | struct acx_beacon_filter_option { |
332 | struct acx_header header; | 332 | struct acx_header header; |
@@ -342,7 +342,7 @@ struct acx_beacon_filter_option { | |||
342 | */ | 342 | */ |
343 | u8 max_num_beacons; | 343 | u8 max_num_beacons; |
344 | u8 pad[2]; | 344 | u8 pad[2]; |
345 | } __attribute__ ((packed)); | 345 | } __packed; |
346 | 346 | ||
347 | /* | 347 | /* |
348 | * ACXBeaconFilterEntry (not 221) | 348 | * ACXBeaconFilterEntry (not 221) |
@@ -383,21 +383,21 @@ struct acx_beacon_filter_ie_table { | |||
383 | u8 num_ie; | 383 | u8 num_ie; |
384 | u8 pad[3]; | 384 | u8 pad[3]; |
385 | u8 table[BEACON_FILTER_TABLE_MAX_SIZE]; | 385 | u8 table[BEACON_FILTER_TABLE_MAX_SIZE]; |
386 | } __attribute__ ((packed)); | 386 | } __packed; |
387 | 387 | ||
388 | struct acx_conn_monit_params { | 388 | struct acx_conn_monit_params { |
389 | struct acx_header header; | 389 | struct acx_header header; |
390 | 390 | ||
391 | __le32 synch_fail_thold; /* number of beacons missed */ | 391 | __le32 synch_fail_thold; /* number of beacons missed */ |
392 | __le32 bss_lose_timeout; /* number of TU's from synch fail */ | 392 | __le32 bss_lose_timeout; /* number of TU's from synch fail */ |
393 | } __attribute__ ((packed)); | 393 | } __packed; |
394 | 394 | ||
395 | struct acx_bt_wlan_coex { | 395 | struct acx_bt_wlan_coex { |
396 | struct acx_header header; | 396 | struct acx_header header; |
397 | 397 | ||
398 | u8 enable; | 398 | u8 enable; |
399 | u8 pad[3]; | 399 | u8 pad[3]; |
400 | } __attribute__ ((packed)); | 400 | } __packed; |
401 | 401 | ||
402 | struct acx_bt_wlan_coex_param { | 402 | struct acx_bt_wlan_coex_param { |
403 | struct acx_header header; | 403 | struct acx_header header; |
@@ -405,7 +405,7 @@ struct acx_bt_wlan_coex_param { | |||
405 | __le32 params[CONF_SG_PARAMS_MAX]; | 405 | __le32 params[CONF_SG_PARAMS_MAX]; |
406 | u8 param_idx; | 406 | u8 param_idx; |
407 | u8 padding[3]; | 407 | u8 padding[3]; |
408 | } __attribute__ ((packed)); | 408 | } __packed; |
409 | 409 | ||
410 | struct acx_dco_itrim_params { | 410 | struct acx_dco_itrim_params { |
411 | struct acx_header header; | 411 | struct acx_header header; |
@@ -413,7 +413,7 @@ struct acx_dco_itrim_params { | |||
413 | u8 enable; | 413 | u8 enable; |
414 | u8 padding[3]; | 414 | u8 padding[3]; |
415 | __le32 timeout; | 415 | __le32 timeout; |
416 | } __attribute__ ((packed)); | 416 | } __packed; |
417 | 417 | ||
418 | struct acx_energy_detection { | 418 | struct acx_energy_detection { |
419 | struct acx_header header; | 419 | struct acx_header header; |
@@ -422,7 +422,7 @@ struct acx_energy_detection { | |||
422 | __le16 rx_cca_threshold; | 422 | __le16 rx_cca_threshold; |
423 | u8 tx_energy_detection; | 423 | u8 tx_energy_detection; |
424 | u8 pad; | 424 | u8 pad; |
425 | } __attribute__ ((packed)); | 425 | } __packed; |
426 | 426 | ||
427 | struct acx_beacon_broadcast { | 427 | struct acx_beacon_broadcast { |
428 | struct acx_header header; | 428 | struct acx_header header; |
@@ -436,14 +436,14 @@ struct acx_beacon_broadcast { | |||
436 | /* Consecutive PS Poll failures before updating the host */ | 436 | /* Consecutive PS Poll failures before updating the host */ |
437 | u8 ps_poll_threshold; | 437 | u8 ps_poll_threshold; |
438 | u8 pad[2]; | 438 | u8 pad[2]; |
439 | } __attribute__ ((packed)); | 439 | } __packed; |
440 | 440 | ||
441 | struct acx_event_mask { | 441 | struct acx_event_mask { |
442 | struct acx_header header; | 442 | struct acx_header header; |
443 | 443 | ||
444 | __le32 event_mask; | 444 | __le32 event_mask; |
445 | __le32 high_event_mask; /* Unused */ | 445 | __le32 high_event_mask; /* Unused */ |
446 | } __attribute__ ((packed)); | 446 | } __packed; |
447 | 447 | ||
448 | #define CFG_RX_FCS BIT(2) | 448 | #define CFG_RX_FCS BIT(2) |
449 | #define CFG_RX_ALL_GOOD BIT(3) | 449 | #define CFG_RX_ALL_GOOD BIT(3) |
@@ -488,14 +488,14 @@ struct acx_feature_config { | |||
488 | 488 | ||
489 | __le32 options; | 489 | __le32 options; |
490 | __le32 data_flow_options; | 490 | __le32 data_flow_options; |
491 | } __attribute__ ((packed)); | 491 | } __packed; |
492 | 492 | ||
493 | struct acx_current_tx_power { | 493 | struct acx_current_tx_power { |
494 | struct acx_header header; | 494 | struct acx_header header; |
495 | 495 | ||
496 | u8 current_tx_power; | 496 | u8 current_tx_power; |
497 | u8 padding[3]; | 497 | u8 padding[3]; |
498 | } __attribute__ ((packed)); | 498 | } __packed; |
499 | 499 | ||
500 | struct acx_wake_up_condition { | 500 | struct acx_wake_up_condition { |
501 | struct acx_header header; | 501 | struct acx_header header; |
@@ -503,7 +503,7 @@ struct acx_wake_up_condition { | |||
503 | u8 wake_up_event; /* Only one bit can be set */ | 503 | u8 wake_up_event; /* Only one bit can be set */ |
504 | u8 listen_interval; | 504 | u8 listen_interval; |
505 | u8 pad[2]; | 505 | u8 pad[2]; |
506 | } __attribute__ ((packed)); | 506 | } __packed; |
507 | 507 | ||
508 | struct acx_aid { | 508 | struct acx_aid { |
509 | struct acx_header header; | 509 | struct acx_header header; |
@@ -513,7 +513,7 @@ struct acx_aid { | |||
513 | */ | 513 | */ |
514 | __le16 aid; | 514 | __le16 aid; |
515 | u8 pad[2]; | 515 | u8 pad[2]; |
516 | } __attribute__ ((packed)); | 516 | } __packed; |
517 | 517 | ||
518 | enum acx_preamble_type { | 518 | enum acx_preamble_type { |
519 | ACX_PREAMBLE_LONG = 0, | 519 | ACX_PREAMBLE_LONG = 0, |
@@ -529,7 +529,7 @@ struct acx_preamble { | |||
529 | */ | 529 | */ |
530 | u8 preamble; | 530 | u8 preamble; |
531 | u8 padding[3]; | 531 | u8 padding[3]; |
532 | } __attribute__ ((packed)); | 532 | } __packed; |
533 | 533 | ||
534 | enum acx_ctsprotect_type { | 534 | enum acx_ctsprotect_type { |
535 | CTSPROTECT_DISABLE = 0, | 535 | CTSPROTECT_DISABLE = 0, |
@@ -540,11 +540,11 @@ struct acx_ctsprotect { | |||
540 | struct acx_header header; | 540 | struct acx_header header; |
541 | u8 ctsprotect; | 541 | u8 ctsprotect; |
542 | u8 padding[3]; | 542 | u8 padding[3]; |
543 | } __attribute__ ((packed)); | 543 | } __packed; |
544 | 544 | ||
545 | struct acx_tx_statistics { | 545 | struct acx_tx_statistics { |
546 | __le32 internal_desc_overflow; | 546 | __le32 internal_desc_overflow; |
547 | } __attribute__ ((packed)); | 547 | } __packed; |
548 | 548 | ||
549 | struct acx_rx_statistics { | 549 | struct acx_rx_statistics { |
550 | __le32 out_of_mem; | 550 | __le32 out_of_mem; |
@@ -555,14 +555,14 @@ struct acx_rx_statistics { | |||
555 | __le32 xfr_hint_trig; | 555 | __le32 xfr_hint_trig; |
556 | __le32 path_reset; | 556 | __le32 path_reset; |
557 | __le32 reset_counter; | 557 | __le32 reset_counter; |
558 | } __attribute__ ((packed)); | 558 | } __packed; |
559 | 559 | ||
560 | struct acx_dma_statistics { | 560 | struct acx_dma_statistics { |
561 | __le32 rx_requested; | 561 | __le32 rx_requested; |
562 | __le32 rx_errors; | 562 | __le32 rx_errors; |
563 | __le32 tx_requested; | 563 | __le32 tx_requested; |
564 | __le32 tx_errors; | 564 | __le32 tx_errors; |
565 | } __attribute__ ((packed)); | 565 | } __packed; |
566 | 566 | ||
567 | struct acx_isr_statistics { | 567 | struct acx_isr_statistics { |
568 | /* host command complete */ | 568 | /* host command complete */ |
@@ -621,7 +621,7 @@ struct acx_isr_statistics { | |||
621 | 621 | ||
622 | /* (INT_STS_ND & INT_TRIG_LOW_RSSI) */ | 622 | /* (INT_STS_ND & INT_TRIG_LOW_RSSI) */ |
623 | __le32 low_rssi; | 623 | __le32 low_rssi; |
624 | } __attribute__ ((packed)); | 624 | } __packed; |
625 | 625 | ||
626 | struct acx_wep_statistics { | 626 | struct acx_wep_statistics { |
627 | /* WEP address keys configured */ | 627 | /* WEP address keys configured */ |
@@ -643,7 +643,7 @@ struct acx_wep_statistics { | |||
643 | 643 | ||
644 | /* WEP decrypt interrupts */ | 644 | /* WEP decrypt interrupts */ |
645 | __le32 interrupt; | 645 | __le32 interrupt; |
646 | } __attribute__ ((packed)); | 646 | } __packed; |
647 | 647 | ||
648 | #define ACX_MISSED_BEACONS_SPREAD 10 | 648 | #define ACX_MISSED_BEACONS_SPREAD 10 |
649 | 649 | ||
@@ -703,12 +703,12 @@ struct acx_pwr_statistics { | |||
703 | 703 | ||
704 | /* the number of beacons in awake mode */ | 704 | /* the number of beacons in awake mode */ |
705 | __le32 rcvd_awake_beacons; | 705 | __le32 rcvd_awake_beacons; |
706 | } __attribute__ ((packed)); | 706 | } __packed; |
707 | 707 | ||
708 | struct acx_mic_statistics { | 708 | struct acx_mic_statistics { |
709 | __le32 rx_pkts; | 709 | __le32 rx_pkts; |
710 | __le32 calc_failure; | 710 | __le32 calc_failure; |
711 | } __attribute__ ((packed)); | 711 | } __packed; |
712 | 712 | ||
713 | struct acx_aes_statistics { | 713 | struct acx_aes_statistics { |
714 | __le32 encrypt_fail; | 714 | __le32 encrypt_fail; |
@@ -717,7 +717,7 @@ struct acx_aes_statistics { | |||
717 | __le32 decrypt_packets; | 717 | __le32 decrypt_packets; |
718 | __le32 encrypt_interrupt; | 718 | __le32 encrypt_interrupt; |
719 | __le32 decrypt_interrupt; | 719 | __le32 decrypt_interrupt; |
720 | } __attribute__ ((packed)); | 720 | } __packed; |
721 | 721 | ||
722 | struct acx_event_statistics { | 722 | struct acx_event_statistics { |
723 | __le32 heart_beat; | 723 | __le32 heart_beat; |
@@ -728,7 +728,7 @@ struct acx_event_statistics { | |||
728 | __le32 oom_late; | 728 | __le32 oom_late; |
729 | __le32 phy_transmit_error; | 729 | __le32 phy_transmit_error; |
730 | __le32 tx_stuck; | 730 | __le32 tx_stuck; |
731 | } __attribute__ ((packed)); | 731 | } __packed; |
732 | 732 | ||
733 | struct acx_ps_statistics { | 733 | struct acx_ps_statistics { |
734 | __le32 pspoll_timeouts; | 734 | __le32 pspoll_timeouts; |
@@ -738,7 +738,7 @@ struct acx_ps_statistics { | |||
738 | __le32 pspoll_max_apturn; | 738 | __le32 pspoll_max_apturn; |
739 | __le32 pspoll_utilization; | 739 | __le32 pspoll_utilization; |
740 | __le32 upsd_utilization; | 740 | __le32 upsd_utilization; |
741 | } __attribute__ ((packed)); | 741 | } __packed; |
742 | 742 | ||
743 | struct acx_rxpipe_statistics { | 743 | struct acx_rxpipe_statistics { |
744 | __le32 rx_prep_beacon_drop; | 744 | __le32 rx_prep_beacon_drop; |
@@ -746,7 +746,7 @@ struct acx_rxpipe_statistics { | |||
746 | __le32 beacon_buffer_thres_host_int_trig_rx_data; | 746 | __le32 beacon_buffer_thres_host_int_trig_rx_data; |
747 | __le32 missed_beacon_host_int_trig_rx_data; | 747 | __le32 missed_beacon_host_int_trig_rx_data; |
748 | __le32 tx_xfr_host_int_trig_rx_data; | 748 | __le32 tx_xfr_host_int_trig_rx_data; |
749 | } __attribute__ ((packed)); | 749 | } __packed; |
750 | 750 | ||
751 | struct acx_statistics { | 751 | struct acx_statistics { |
752 | struct acx_header header; | 752 | struct acx_header header; |
@@ -762,7 +762,7 @@ struct acx_statistics { | |||
762 | struct acx_event_statistics event; | 762 | struct acx_event_statistics event; |
763 | struct acx_ps_statistics ps; | 763 | struct acx_ps_statistics ps; |
764 | struct acx_rxpipe_statistics rxpipe; | 764 | struct acx_rxpipe_statistics rxpipe; |
765 | } __attribute__ ((packed)); | 765 | } __packed; |
766 | 766 | ||
767 | struct acx_rate_class { | 767 | struct acx_rate_class { |
768 | __le32 enabled_rates; | 768 | __le32 enabled_rates; |
@@ -780,7 +780,7 @@ struct acx_rate_policy { | |||
780 | 780 | ||
781 | __le32 rate_class_cnt; | 781 | __le32 rate_class_cnt; |
782 | struct acx_rate_class rate_class[CONF_TX_MAX_RATE_CLASSES]; | 782 | struct acx_rate_class rate_class[CONF_TX_MAX_RATE_CLASSES]; |
783 | } __attribute__ ((packed)); | 783 | } __packed; |
784 | 784 | ||
785 | struct acx_ac_cfg { | 785 | struct acx_ac_cfg { |
786 | struct acx_header header; | 786 | struct acx_header header; |
@@ -790,7 +790,7 @@ struct acx_ac_cfg { | |||
790 | u8 aifsn; | 790 | u8 aifsn; |
791 | u8 reserved; | 791 | u8 reserved; |
792 | __le16 tx_op_limit; | 792 | __le16 tx_op_limit; |
793 | } __attribute__ ((packed)); | 793 | } __packed; |
794 | 794 | ||
795 | struct acx_tid_config { | 795 | struct acx_tid_config { |
796 | struct acx_header header; | 796 | struct acx_header header; |
@@ -801,19 +801,19 @@ struct acx_tid_config { | |||
801 | u8 ack_policy; | 801 | u8 ack_policy; |
802 | u8 padding[3]; | 802 | u8 padding[3]; |
803 | __le32 apsd_conf[2]; | 803 | __le32 apsd_conf[2]; |
804 | } __attribute__ ((packed)); | 804 | } __packed; |
805 | 805 | ||
806 | struct acx_frag_threshold { | 806 | struct acx_frag_threshold { |
807 | struct acx_header header; | 807 | struct acx_header header; |
808 | __le16 frag_threshold; | 808 | __le16 frag_threshold; |
809 | u8 padding[2]; | 809 | u8 padding[2]; |
810 | } __attribute__ ((packed)); | 810 | } __packed; |
811 | 811 | ||
812 | struct acx_tx_config_options { | 812 | struct acx_tx_config_options { |
813 | struct acx_header header; | 813 | struct acx_header header; |
814 | __le16 tx_compl_timeout; /* msec */ | 814 | __le16 tx_compl_timeout; /* msec */ |
815 | __le16 tx_compl_threshold; /* number of packets */ | 815 | __le16 tx_compl_threshold; /* number of packets */ |
816 | } __attribute__ ((packed)); | 816 | } __packed; |
817 | 817 | ||
818 | #define ACX_RX_MEM_BLOCKS 70 | 818 | #define ACX_RX_MEM_BLOCKS 70 |
819 | #define ACX_TX_MIN_MEM_BLOCKS 40 | 819 | #define ACX_TX_MIN_MEM_BLOCKS 40 |
@@ -828,7 +828,7 @@ struct wl1271_acx_config_memory { | |||
828 | u8 num_stations; | 828 | u8 num_stations; |
829 | u8 num_ssid_profiles; | 829 | u8 num_ssid_profiles; |
830 | __le32 total_tx_descriptors; | 830 | __le32 total_tx_descriptors; |
831 | } __attribute__ ((packed)); | 831 | } __packed; |
832 | 832 | ||
833 | struct wl1271_acx_mem_map { | 833 | struct wl1271_acx_mem_map { |
834 | struct acx_header header; | 834 | struct acx_header header; |
@@ -872,7 +872,7 @@ struct wl1271_acx_mem_map { | |||
872 | u8 *rx_cbuf; | 872 | u8 *rx_cbuf; |
873 | __le32 rx_ctrl; | 873 | __le32 rx_ctrl; |
874 | __le32 tx_ctrl; | 874 | __le32 tx_ctrl; |
875 | } __attribute__ ((packed)); | 875 | } __packed; |
876 | 876 | ||
877 | struct wl1271_acx_rx_config_opt { | 877 | struct wl1271_acx_rx_config_opt { |
878 | struct acx_header header; | 878 | struct acx_header header; |
@@ -882,7 +882,7 @@ struct wl1271_acx_rx_config_opt { | |||
882 | __le16 timeout; | 882 | __le16 timeout; |
883 | u8 queue_type; | 883 | u8 queue_type; |
884 | u8 reserved; | 884 | u8 reserved; |
885 | } __attribute__ ((packed)); | 885 | } __packed; |
886 | 886 | ||
887 | 887 | ||
888 | struct wl1271_acx_bet_enable { | 888 | struct wl1271_acx_bet_enable { |
@@ -891,7 +891,7 @@ struct wl1271_acx_bet_enable { | |||
891 | u8 enable; | 891 | u8 enable; |
892 | u8 max_consecutive; | 892 | u8 max_consecutive; |
893 | u8 padding[2]; | 893 | u8 padding[2]; |
894 | } __attribute__ ((packed)); | 894 | } __packed; |
895 | 895 | ||
896 | #define ACX_IPV4_VERSION 4 | 896 | #define ACX_IPV4_VERSION 4 |
897 | #define ACX_IPV6_VERSION 6 | 897 | #define ACX_IPV6_VERSION 6 |
@@ -905,7 +905,7 @@ struct wl1271_acx_arp_filter { | |||
905 | requests directed to this IP address will pass | 905 | requests directed to this IP address will pass |
906 | through. For IPv4, the first four bytes are | 906 | through. For IPv4, the first four bytes are |
907 | used. */ | 907 | used. */ |
908 | } __attribute__((packed)); | 908 | } __packed; |
909 | 909 | ||
910 | struct wl1271_acx_pm_config { | 910 | struct wl1271_acx_pm_config { |
911 | struct acx_header header; | 911 | struct acx_header header; |
@@ -913,14 +913,14 @@ struct wl1271_acx_pm_config { | |||
913 | __le32 host_clk_settling_time; | 913 | __le32 host_clk_settling_time; |
914 | u8 host_fast_wakeup_support; | 914 | u8 host_fast_wakeup_support; |
915 | u8 padding[3]; | 915 | u8 padding[3]; |
916 | } __attribute__ ((packed)); | 916 | } __packed; |
917 | 917 | ||
918 | struct wl1271_acx_keep_alive_mode { | 918 | struct wl1271_acx_keep_alive_mode { |
919 | struct acx_header header; | 919 | struct acx_header header; |
920 | 920 | ||
921 | u8 enabled; | 921 | u8 enabled; |
922 | u8 padding[3]; | 922 | u8 padding[3]; |
923 | } __attribute__ ((packed)); | 923 | } __packed; |
924 | 924 | ||
925 | enum { | 925 | enum { |
926 | ACX_KEEP_ALIVE_NO_TX = 0, | 926 | ACX_KEEP_ALIVE_NO_TX = 0, |
@@ -940,7 +940,7 @@ struct wl1271_acx_keep_alive_config { | |||
940 | u8 tpl_validation; | 940 | u8 tpl_validation; |
941 | u8 trigger; | 941 | u8 trigger; |
942 | u8 padding; | 942 | u8 padding; |
943 | } __attribute__ ((packed)); | 943 | } __packed; |
944 | 944 | ||
945 | enum { | 945 | enum { |
946 | WL1271_ACX_TRIG_TYPE_LEVEL = 0, | 946 | WL1271_ACX_TRIG_TYPE_LEVEL = 0, |
diff --git a/drivers/net/wireless/wl12xx/wl1271_cmd.h b/drivers/net/wireless/wl12xx/wl1271_cmd.h index f2820b42a943..d88faf9d2642 100644 --- a/drivers/net/wireless/wl12xx/wl1271_cmd.h +++ b/drivers/net/wireless/wl12xx/wl1271_cmd.h | |||
@@ -136,14 +136,14 @@ struct wl1271_cmd_header { | |||
136 | __le16 status; | 136 | __le16 status; |
137 | /* payload */ | 137 | /* payload */ |
138 | u8 data[0]; | 138 | u8 data[0]; |
139 | } __attribute__ ((packed)); | 139 | } __packed; |
140 | 140 | ||
141 | #define WL1271_CMD_MAX_PARAMS 572 | 141 | #define WL1271_CMD_MAX_PARAMS 572 |
142 | 142 | ||
143 | struct wl1271_command { | 143 | struct wl1271_command { |
144 | struct wl1271_cmd_header header; | 144 | struct wl1271_cmd_header header; |
145 | u8 parameters[WL1271_CMD_MAX_PARAMS]; | 145 | u8 parameters[WL1271_CMD_MAX_PARAMS]; |
146 | } __attribute__ ((packed)); | 146 | } __packed; |
147 | 147 | ||
148 | enum { | 148 | enum { |
149 | CMD_MAILBOX_IDLE = 0, | 149 | CMD_MAILBOX_IDLE = 0, |
@@ -196,7 +196,7 @@ struct cmd_read_write_memory { | |||
196 | of this field is the Host in WRITE command or the Wilink in READ | 196 | of this field is the Host in WRITE command or the Wilink in READ |
197 | command. */ | 197 | command. */ |
198 | u8 value[MAX_READ_SIZE]; | 198 | u8 value[MAX_READ_SIZE]; |
199 | } __attribute__ ((packed)); | 199 | } __packed; |
200 | 200 | ||
201 | #define CMDMBOX_HEADER_LEN 4 | 201 | #define CMDMBOX_HEADER_LEN 4 |
202 | #define CMDMBOX_INFO_ELEM_HEADER_LEN 4 | 202 | #define CMDMBOX_INFO_ELEM_HEADER_LEN 4 |
@@ -243,14 +243,14 @@ struct wl1271_cmd_join { | |||
243 | u8 ssid[IW_ESSID_MAX_SIZE]; | 243 | u8 ssid[IW_ESSID_MAX_SIZE]; |
244 | u8 ctrl; /* JOIN_CMD_CTRL_* */ | 244 | u8 ctrl; /* JOIN_CMD_CTRL_* */ |
245 | u8 reserved[3]; | 245 | u8 reserved[3]; |
246 | } __attribute__ ((packed)); | 246 | } __packed; |
247 | 247 | ||
248 | struct cmd_enabledisable_path { | 248 | struct cmd_enabledisable_path { |
249 | struct wl1271_cmd_header header; | 249 | struct wl1271_cmd_header header; |
250 | 250 | ||
251 | u8 channel; | 251 | u8 channel; |
252 | u8 padding[3]; | 252 | u8 padding[3]; |
253 | } __attribute__ ((packed)); | 253 | } __packed; |
254 | 254 | ||
255 | #define WL1271_RATE_AUTOMATIC 0 | 255 | #define WL1271_RATE_AUTOMATIC 0 |
256 | 256 | ||
@@ -266,7 +266,7 @@ struct wl1271_cmd_template_set { | |||
266 | u8 aflags; | 266 | u8 aflags; |
267 | u8 reserved; | 267 | u8 reserved; |
268 | u8 template_data[WL1271_CMD_TEMPL_MAX_SIZE]; | 268 | u8 template_data[WL1271_CMD_TEMPL_MAX_SIZE]; |
269 | } __attribute__ ((packed)); | 269 | } __packed; |
270 | 270 | ||
271 | #define TIM_ELE_ID 5 | 271 | #define TIM_ELE_ID 5 |
272 | #define PARTIAL_VBM_MAX 251 | 272 | #define PARTIAL_VBM_MAX 251 |
@@ -278,7 +278,7 @@ struct wl1271_tim { | |||
278 | u8 dtim_period; | 278 | u8 dtim_period; |
279 | u8 bitmap_ctrl; | 279 | u8 bitmap_ctrl; |
280 | u8 pvb_field[PARTIAL_VBM_MAX]; /* Partial Virtual Bitmap */ | 280 | u8 pvb_field[PARTIAL_VBM_MAX]; /* Partial Virtual Bitmap */ |
281 | } __attribute__ ((packed)); | 281 | } __packed; |
282 | 282 | ||
283 | enum wl1271_cmd_ps_mode { | 283 | enum wl1271_cmd_ps_mode { |
284 | STATION_ACTIVE_MODE, | 284 | STATION_ACTIVE_MODE, |
@@ -298,7 +298,7 @@ struct wl1271_cmd_ps_params { | |||
298 | */ | 298 | */ |
299 | u8 hang_over_period; | 299 | u8 hang_over_period; |
300 | __le32 null_data_rate; | 300 | __le32 null_data_rate; |
301 | } __attribute__ ((packed)); | 301 | } __packed; |
302 | 302 | ||
303 | /* HW encryption keys */ | 303 | /* HW encryption keys */ |
304 | #define NUM_ACCESS_CATEGORIES_COPY 4 | 304 | #define NUM_ACCESS_CATEGORIES_COPY 4 |
@@ -348,7 +348,7 @@ struct wl1271_cmd_set_keys { | |||
348 | u8 key[MAX_KEY_SIZE]; | 348 | u8 key[MAX_KEY_SIZE]; |
349 | __le16 ac_seq_num16[NUM_ACCESS_CATEGORIES_COPY]; | 349 | __le16 ac_seq_num16[NUM_ACCESS_CATEGORIES_COPY]; |
350 | __le32 ac_seq_num32[NUM_ACCESS_CATEGORIES_COPY]; | 350 | __le32 ac_seq_num32[NUM_ACCESS_CATEGORIES_COPY]; |
351 | } __attribute__ ((packed)); | 351 | } __packed; |
352 | 352 | ||
353 | 353 | ||
354 | #define WL1271_SCAN_MAX_CHANNELS 24 | 354 | #define WL1271_SCAN_MAX_CHANNELS 24 |
@@ -385,7 +385,7 @@ struct basic_scan_params { | |||
385 | u8 use_ssid_list; | 385 | u8 use_ssid_list; |
386 | u8 scan_tag; | 386 | u8 scan_tag; |
387 | u8 padding2; | 387 | u8 padding2; |
388 | } __attribute__ ((packed)); | 388 | } __packed; |
389 | 389 | ||
390 | struct basic_scan_channel_params { | 390 | struct basic_scan_channel_params { |
391 | /* Duration in TU to wait for frames on a channel for active scan */ | 391 | /* Duration in TU to wait for frames on a channel for active scan */ |
@@ -400,25 +400,25 @@ struct basic_scan_channel_params { | |||
400 | u8 dfs_candidate; | 400 | u8 dfs_candidate; |
401 | u8 activity_detected; | 401 | u8 activity_detected; |
402 | u8 pad; | 402 | u8 pad; |
403 | } __attribute__ ((packed)); | 403 | } __packed; |
404 | 404 | ||
405 | struct wl1271_cmd_scan { | 405 | struct wl1271_cmd_scan { |
406 | struct wl1271_cmd_header header; | 406 | struct wl1271_cmd_header header; |
407 | 407 | ||
408 | struct basic_scan_params params; | 408 | struct basic_scan_params params; |
409 | struct basic_scan_channel_params channels[WL1271_SCAN_MAX_CHANNELS]; | 409 | struct basic_scan_channel_params channels[WL1271_SCAN_MAX_CHANNELS]; |
410 | } __attribute__ ((packed)); | 410 | } __packed; |
411 | 411 | ||
412 | struct wl1271_cmd_trigger_scan_to { | 412 | struct wl1271_cmd_trigger_scan_to { |
413 | struct wl1271_cmd_header header; | 413 | struct wl1271_cmd_header header; |
414 | 414 | ||
415 | __le32 timeout; | 415 | __le32 timeout; |
416 | } __attribute__ ((packed)); | 416 | } __packed; |
417 | 417 | ||
418 | struct wl1271_cmd_test_header { | 418 | struct wl1271_cmd_test_header { |
419 | u8 id; | 419 | u8 id; |
420 | u8 padding[3]; | 420 | u8 padding[3]; |
421 | } __attribute__ ((packed)); | 421 | } __packed; |
422 | 422 | ||
423 | enum wl1271_channel_tune_bands { | 423 | enum wl1271_channel_tune_bands { |
424 | WL1271_CHANNEL_TUNE_BAND_2_4, | 424 | WL1271_CHANNEL_TUNE_BAND_2_4, |
@@ -441,7 +441,7 @@ struct wl1271_general_parms_cmd { | |||
441 | 441 | ||
442 | u8 params[WL1271_NVS_GENERAL_PARAMS_SIZE]; | 442 | u8 params[WL1271_NVS_GENERAL_PARAMS_SIZE]; |
443 | s8 reserved[23]; | 443 | s8 reserved[23]; |
444 | } __attribute__ ((packed)); | 444 | } __packed; |
445 | 445 | ||
446 | #define WL1271_STAT_RADIO_PARAMS_5_SIZE 29 | 446 | #define WL1271_STAT_RADIO_PARAMS_5_SIZE 29 |
447 | #define WL1271_DYN_RADIO_PARAMS_5_SIZE 104 | 447 | #define WL1271_DYN_RADIO_PARAMS_5_SIZE 104 |
@@ -457,7 +457,7 @@ struct wl1271_radio_parms_cmd { | |||
457 | u8 dyn_radio_params[WL1271_NVS_DYN_RADIO_PARAMS_SIZE]; | 457 | u8 dyn_radio_params[WL1271_NVS_DYN_RADIO_PARAMS_SIZE]; |
458 | u8 reserved; | 458 | u8 reserved; |
459 | u8 dyn_radio_params_5[WL1271_DYN_RADIO_PARAMS_5_SIZE]; | 459 | u8 dyn_radio_params_5[WL1271_DYN_RADIO_PARAMS_5_SIZE]; |
460 | } __attribute__ ((packed)); | 460 | } __packed; |
461 | 461 | ||
462 | struct wl1271_cmd_cal_channel_tune { | 462 | struct wl1271_cmd_cal_channel_tune { |
463 | struct wl1271_cmd_header header; | 463 | struct wl1271_cmd_header header; |
@@ -468,7 +468,7 @@ struct wl1271_cmd_cal_channel_tune { | |||
468 | u8 channel; | 468 | u8 channel; |
469 | 469 | ||
470 | __le16 radio_status; | 470 | __le16 radio_status; |
471 | } __attribute__ ((packed)); | 471 | } __packed; |
472 | 472 | ||
473 | struct wl1271_cmd_cal_update_ref_point { | 473 | struct wl1271_cmd_cal_update_ref_point { |
474 | struct wl1271_cmd_header header; | 474 | struct wl1271_cmd_header header; |
@@ -479,7 +479,7 @@ struct wl1271_cmd_cal_update_ref_point { | |||
479 | __le32 ref_detector; | 479 | __le32 ref_detector; |
480 | u8 sub_band; | 480 | u8 sub_band; |
481 | u8 padding[3]; | 481 | u8 padding[3]; |
482 | } __attribute__ ((packed)); | 482 | } __packed; |
483 | 483 | ||
484 | #define MAX_TLV_LENGTH 400 | 484 | #define MAX_TLV_LENGTH 400 |
485 | #define MAX_NVS_VERSION_LENGTH 12 | 485 | #define MAX_NVS_VERSION_LENGTH 12 |
@@ -501,7 +501,7 @@ struct wl1271_cmd_cal_p2g { | |||
501 | 501 | ||
502 | u8 sub_band_mask; | 502 | u8 sub_band_mask; |
503 | u8 padding2; | 503 | u8 padding2; |
504 | } __attribute__ ((packed)); | 504 | } __packed; |
505 | 505 | ||
506 | 506 | ||
507 | /* | 507 | /* |
@@ -529,6 +529,6 @@ struct wl1271_cmd_disconnect { | |||
529 | u8 type; | 529 | u8 type; |
530 | 530 | ||
531 | u8 padding; | 531 | u8 padding; |
532 | } __attribute__ ((packed)); | 532 | } __packed; |
533 | 533 | ||
534 | #endif /* __WL1271_CMD_H__ */ | 534 | #endif /* __WL1271_CMD_H__ */ |
diff --git a/drivers/net/wireless/wl12xx/wl1271_event.h b/drivers/net/wireless/wl12xx/wl1271_event.h index 58371008f270..43d5aeae1783 100644 --- a/drivers/net/wireless/wl12xx/wl1271_event.h +++ b/drivers/net/wireless/wl12xx/wl1271_event.h | |||
@@ -85,7 +85,7 @@ struct event_debug_report { | |||
85 | __le32 report_1; | 85 | __le32 report_1; |
86 | __le32 report_2; | 86 | __le32 report_2; |
87 | __le32 report_3; | 87 | __le32 report_3; |
88 | } __attribute__ ((packed)); | 88 | } __packed; |
89 | 89 | ||
90 | #define NUM_OF_RSSI_SNR_TRIGGERS 8 | 90 | #define NUM_OF_RSSI_SNR_TRIGGERS 8 |
91 | 91 | ||
@@ -116,7 +116,7 @@ struct event_mailbox { | |||
116 | u8 ps_status; | 116 | u8 ps_status; |
117 | 117 | ||
118 | u8 reserved_5[29]; | 118 | u8 reserved_5[29]; |
119 | } __attribute__ ((packed)); | 119 | } __packed; |
120 | 120 | ||
121 | int wl1271_event_unmask(struct wl1271 *wl); | 121 | int wl1271_event_unmask(struct wl1271 *wl); |
122 | void wl1271_event_mbox_config(struct wl1271 *wl); | 122 | void wl1271_event_mbox_config(struct wl1271 *wl); |
diff --git a/drivers/net/wireless/wl12xx/wl1271_rx.h b/drivers/net/wireless/wl12xx/wl1271_rx.h index b89be4758e78..13a232333b13 100644 --- a/drivers/net/wireless/wl12xx/wl1271_rx.h +++ b/drivers/net/wireless/wl12xx/wl1271_rx.h | |||
@@ -113,7 +113,7 @@ struct wl1271_rx_descriptor { | |||
113 | u8 process_id; | 113 | u8 process_id; |
114 | u8 pad_len; | 114 | u8 pad_len; |
115 | u8 reserved; | 115 | u8 reserved; |
116 | } __attribute__ ((packed)); | 116 | } __packed; |
117 | 117 | ||
118 | void wl1271_rx(struct wl1271 *wl, struct wl1271_fw_status *status); | 118 | void wl1271_rx(struct wl1271 *wl, struct wl1271_fw_status *status); |
119 | u8 wl1271_rate_to_idx(struct wl1271 *wl, int rate); | 119 | u8 wl1271_rate_to_idx(struct wl1271 *wl, int rate); |
diff --git a/drivers/net/wireless/wl12xx/wl1271_tx.h b/drivers/net/wireless/wl12xx/wl1271_tx.h index 3b8b7ac253fd..91d0adb0ea40 100644 --- a/drivers/net/wireless/wl12xx/wl1271_tx.h +++ b/drivers/net/wireless/wl12xx/wl1271_tx.h | |||
@@ -80,7 +80,7 @@ struct wl1271_tx_hw_descr { | |||
80 | /* Identifier of the remote STA in IBSS, 1 in infra-BSS */ | 80 | /* Identifier of the remote STA in IBSS, 1 in infra-BSS */ |
81 | u8 aid; | 81 | u8 aid; |
82 | u8 reserved; | 82 | u8 reserved; |
83 | } __attribute__ ((packed)); | 83 | } __packed; |
84 | 84 | ||
85 | enum wl1271_tx_hw_res_status { | 85 | enum wl1271_tx_hw_res_status { |
86 | TX_SUCCESS = 0, | 86 | TX_SUCCESS = 0, |
@@ -115,13 +115,13 @@ struct wl1271_tx_hw_res_descr { | |||
115 | u8 rate_class_index; | 115 | u8 rate_class_index; |
116 | /* for 4-byte alignment. */ | 116 | /* for 4-byte alignment. */ |
117 | u8 spare; | 117 | u8 spare; |
118 | } __attribute__ ((packed)); | 118 | } __packed; |
119 | 119 | ||
120 | struct wl1271_tx_hw_res_if { | 120 | struct wl1271_tx_hw_res_if { |
121 | __le32 tx_result_fw_counter; | 121 | __le32 tx_result_fw_counter; |
122 | __le32 tx_result_host_counter; | 122 | __le32 tx_result_host_counter; |
123 | struct wl1271_tx_hw_res_descr tx_results_queue[TX_HW_RESULT_QUEUE_LEN]; | 123 | struct wl1271_tx_hw_res_descr tx_results_queue[TX_HW_RESULT_QUEUE_LEN]; |
124 | } __attribute__ ((packed)); | 124 | } __packed; |
125 | 125 | ||
126 | static inline int wl1271_tx_get_queue(int queue) | 126 | static inline int wl1271_tx_get_queue(int queue) |
127 | { | 127 | { |
diff --git a/drivers/net/wireless/wl12xx/wl12xx_80211.h b/drivers/net/wireless/wl12xx/wl12xx_80211.h index 055d7bc6f592..184628027213 100644 --- a/drivers/net/wireless/wl12xx/wl12xx_80211.h +++ b/drivers/net/wireless/wl12xx/wl12xx_80211.h | |||
@@ -66,41 +66,41 @@ struct ieee80211_header { | |||
66 | u8 bssid[ETH_ALEN]; | 66 | u8 bssid[ETH_ALEN]; |
67 | __le16 seq_ctl; | 67 | __le16 seq_ctl; |
68 | u8 payload[0]; | 68 | u8 payload[0]; |
69 | } __attribute__ ((packed)); | 69 | } __packed; |
70 | 70 | ||
71 | struct wl12xx_ie_header { | 71 | struct wl12xx_ie_header { |
72 | u8 id; | 72 | u8 id; |
73 | u8 len; | 73 | u8 len; |
74 | } __attribute__ ((packed)); | 74 | } __packed; |
75 | 75 | ||
76 | /* IEs */ | 76 | /* IEs */ |
77 | 77 | ||
78 | struct wl12xx_ie_ssid { | 78 | struct wl12xx_ie_ssid { |
79 | struct wl12xx_ie_header header; | 79 | struct wl12xx_ie_header header; |
80 | char ssid[IW_ESSID_MAX_SIZE]; | 80 | char ssid[IW_ESSID_MAX_SIZE]; |
81 | } __attribute__ ((packed)); | 81 | } __packed; |
82 | 82 | ||
83 | struct wl12xx_ie_rates { | 83 | struct wl12xx_ie_rates { |
84 | struct wl12xx_ie_header header; | 84 | struct wl12xx_ie_header header; |
85 | u8 rates[MAX_SUPPORTED_RATES]; | 85 | u8 rates[MAX_SUPPORTED_RATES]; |
86 | } __attribute__ ((packed)); | 86 | } __packed; |
87 | 87 | ||
88 | struct wl12xx_ie_ds_params { | 88 | struct wl12xx_ie_ds_params { |
89 | struct wl12xx_ie_header header; | 89 | struct wl12xx_ie_header header; |
90 | u8 channel; | 90 | u8 channel; |
91 | } __attribute__ ((packed)); | 91 | } __packed; |
92 | 92 | ||
93 | struct country_triplet { | 93 | struct country_triplet { |
94 | u8 channel; | 94 | u8 channel; |
95 | u8 num_channels; | 95 | u8 num_channels; |
96 | u8 max_tx_power; | 96 | u8 max_tx_power; |
97 | } __attribute__ ((packed)); | 97 | } __packed; |
98 | 98 | ||
99 | struct wl12xx_ie_country { | 99 | struct wl12xx_ie_country { |
100 | struct wl12xx_ie_header header; | 100 | struct wl12xx_ie_header header; |
101 | u8 country_string[COUNTRY_STRING_LEN]; | 101 | u8 country_string[COUNTRY_STRING_LEN]; |
102 | struct country_triplet triplets[MAX_COUNTRY_TRIPLETS]; | 102 | struct country_triplet triplets[MAX_COUNTRY_TRIPLETS]; |
103 | } __attribute__ ((packed)); | 103 | } __packed; |
104 | 104 | ||
105 | 105 | ||
106 | /* Templates */ | 106 | /* Templates */ |
@@ -115,30 +115,30 @@ struct wl12xx_beacon_template { | |||
115 | struct wl12xx_ie_rates ext_rates; | 115 | struct wl12xx_ie_rates ext_rates; |
116 | struct wl12xx_ie_ds_params ds_params; | 116 | struct wl12xx_ie_ds_params ds_params; |
117 | struct wl12xx_ie_country country; | 117 | struct wl12xx_ie_country country; |
118 | } __attribute__ ((packed)); | 118 | } __packed; |
119 | 119 | ||
120 | struct wl12xx_null_data_template { | 120 | struct wl12xx_null_data_template { |
121 | struct ieee80211_header header; | 121 | struct ieee80211_header header; |
122 | } __attribute__ ((packed)); | 122 | } __packed; |
123 | 123 | ||
124 | struct wl12xx_ps_poll_template { | 124 | struct wl12xx_ps_poll_template { |
125 | __le16 fc; | 125 | __le16 fc; |
126 | __le16 aid; | 126 | __le16 aid; |
127 | u8 bssid[ETH_ALEN]; | 127 | u8 bssid[ETH_ALEN]; |
128 | u8 ta[ETH_ALEN]; | 128 | u8 ta[ETH_ALEN]; |
129 | } __attribute__ ((packed)); | 129 | } __packed; |
130 | 130 | ||
131 | struct wl12xx_qos_null_data_template { | 131 | struct wl12xx_qos_null_data_template { |
132 | struct ieee80211_header header; | 132 | struct ieee80211_header header; |
133 | __le16 qos_ctl; | 133 | __le16 qos_ctl; |
134 | } __attribute__ ((packed)); | 134 | } __packed; |
135 | 135 | ||
136 | struct wl12xx_probe_req_template { | 136 | struct wl12xx_probe_req_template { |
137 | struct ieee80211_header header; | 137 | struct ieee80211_header header; |
138 | struct wl12xx_ie_ssid ssid; | 138 | struct wl12xx_ie_ssid ssid; |
139 | struct wl12xx_ie_rates rates; | 139 | struct wl12xx_ie_rates rates; |
140 | struct wl12xx_ie_rates ext_rates; | 140 | struct wl12xx_ie_rates ext_rates; |
141 | } __attribute__ ((packed)); | 141 | } __packed; |
142 | 142 | ||
143 | 143 | ||
144 | struct wl12xx_probe_resp_template { | 144 | struct wl12xx_probe_resp_template { |
@@ -151,6 +151,6 @@ struct wl12xx_probe_resp_template { | |||
151 | struct wl12xx_ie_rates ext_rates; | 151 | struct wl12xx_ie_rates ext_rates; |
152 | struct wl12xx_ie_ds_params ds_params; | 152 | struct wl12xx_ie_ds_params ds_params; |
153 | struct wl12xx_ie_country country; | 153 | struct wl12xx_ie_country country; |
154 | } __attribute__ ((packed)); | 154 | } __packed; |
155 | 155 | ||
156 | #endif | 156 | #endif |
diff --git a/drivers/net/wireless/wl3501.h b/drivers/net/wireless/wl3501.h index 8816e371fd0e..3fbfd19818f1 100644 --- a/drivers/net/wireless/wl3501.h +++ b/drivers/net/wireless/wl3501.h | |||
@@ -231,12 +231,12 @@ struct iw_mgmt_info_element { | |||
231 | but sizeof(enum) > sizeof(u8) :-( */ | 231 | but sizeof(enum) > sizeof(u8) :-( */ |
232 | u8 len; | 232 | u8 len; |
233 | u8 data[0]; | 233 | u8 data[0]; |
234 | } __attribute__ ((packed)); | 234 | } __packed; |
235 | 235 | ||
236 | struct iw_mgmt_essid_pset { | 236 | struct iw_mgmt_essid_pset { |
237 | struct iw_mgmt_info_element el; | 237 | struct iw_mgmt_info_element el; |
238 | u8 essid[IW_ESSID_MAX_SIZE]; | 238 | u8 essid[IW_ESSID_MAX_SIZE]; |
239 | } __attribute__ ((packed)); | 239 | } __packed; |
240 | 240 | ||
241 | /* | 241 | /* |
242 | * According to 802.11 Wireless Netowors, the definitive guide - O'Reilly | 242 | * According to 802.11 Wireless Netowors, the definitive guide - O'Reilly |
@@ -247,12 +247,12 @@ struct iw_mgmt_essid_pset { | |||
247 | struct iw_mgmt_data_rset { | 247 | struct iw_mgmt_data_rset { |
248 | struct iw_mgmt_info_element el; | 248 | struct iw_mgmt_info_element el; |
249 | u8 data_rate_labels[IW_DATA_RATE_MAX_LABELS]; | 249 | u8 data_rate_labels[IW_DATA_RATE_MAX_LABELS]; |
250 | } __attribute__ ((packed)); | 250 | } __packed; |
251 | 251 | ||
252 | struct iw_mgmt_ds_pset { | 252 | struct iw_mgmt_ds_pset { |
253 | struct iw_mgmt_info_element el; | 253 | struct iw_mgmt_info_element el; |
254 | u8 chan; | 254 | u8 chan; |
255 | } __attribute__ ((packed)); | 255 | } __packed; |
256 | 256 | ||
257 | struct iw_mgmt_cf_pset { | 257 | struct iw_mgmt_cf_pset { |
258 | struct iw_mgmt_info_element el; | 258 | struct iw_mgmt_info_element el; |
@@ -260,12 +260,12 @@ struct iw_mgmt_cf_pset { | |||
260 | u8 cfp_period; | 260 | u8 cfp_period; |
261 | u16 cfp_max_duration; | 261 | u16 cfp_max_duration; |
262 | u16 cfp_dur_remaining; | 262 | u16 cfp_dur_remaining; |
263 | } __attribute__ ((packed)); | 263 | } __packed; |
264 | 264 | ||
265 | struct iw_mgmt_ibss_pset { | 265 | struct iw_mgmt_ibss_pset { |
266 | struct iw_mgmt_info_element el; | 266 | struct iw_mgmt_info_element el; |
267 | u16 atim_window; | 267 | u16 atim_window; |
268 | } __attribute__ ((packed)); | 268 | } __packed; |
269 | 269 | ||
270 | struct wl3501_tx_hdr { | 270 | struct wl3501_tx_hdr { |
271 | u16 tx_cnt; | 271 | u16 tx_cnt; |
@@ -544,12 +544,12 @@ struct wl3501_80211_tx_plcp_hdr { | |||
544 | u8 service; | 544 | u8 service; |
545 | u16 len; | 545 | u16 len; |
546 | u16 crc16; | 546 | u16 crc16; |
547 | } __attribute__ ((packed)); | 547 | } __packed; |
548 | 548 | ||
549 | struct wl3501_80211_tx_hdr { | 549 | struct wl3501_80211_tx_hdr { |
550 | struct wl3501_80211_tx_plcp_hdr pclp_hdr; | 550 | struct wl3501_80211_tx_plcp_hdr pclp_hdr; |
551 | struct ieee80211_hdr mac_hdr; | 551 | struct ieee80211_hdr mac_hdr; |
552 | } __attribute__ ((packed)); | 552 | } __packed; |
553 | 553 | ||
554 | /* | 554 | /* |
555 | Reserve the beginning Tx space for descriptor use. | 555 | Reserve the beginning Tx space for descriptor use. |
diff --git a/drivers/net/wireless/zd1211rw/zd_mac.h b/drivers/net/wireless/zd1211rw/zd_mac.h index 630c298a730e..e4c70e359ced 100644 --- a/drivers/net/wireless/zd1211rw/zd_mac.h +++ b/drivers/net/wireless/zd1211rw/zd_mac.h | |||
@@ -35,7 +35,7 @@ struct zd_ctrlset { | |||
35 | __le16 current_length; | 35 | __le16 current_length; |
36 | u8 service; | 36 | u8 service; |
37 | __le16 next_frame_length; | 37 | __le16 next_frame_length; |
38 | } __attribute__((packed)); | 38 | } __packed; |
39 | 39 | ||
40 | #define ZD_CS_RESERVED_SIZE 25 | 40 | #define ZD_CS_RESERVED_SIZE 25 |
41 | 41 | ||
@@ -106,7 +106,7 @@ struct zd_ctrlset { | |||
106 | struct rx_length_info { | 106 | struct rx_length_info { |
107 | __le16 length[3]; | 107 | __le16 length[3]; |
108 | __le16 tag; | 108 | __le16 tag; |
109 | } __attribute__((packed)); | 109 | } __packed; |
110 | 110 | ||
111 | #define RX_LENGTH_INFO_TAG 0x697e | 111 | #define RX_LENGTH_INFO_TAG 0x697e |
112 | 112 | ||
@@ -117,7 +117,7 @@ struct rx_status { | |||
117 | u8 signal_quality_ofdm; | 117 | u8 signal_quality_ofdm; |
118 | u8 decryption_type; | 118 | u8 decryption_type; |
119 | u8 frame_status; | 119 | u8 frame_status; |
120 | } __attribute__((packed)); | 120 | } __packed; |
121 | 121 | ||
122 | /* rx_status field decryption_type */ | 122 | /* rx_status field decryption_type */ |
123 | #define ZD_RX_NO_WEP 0 | 123 | #define ZD_RX_NO_WEP 0 |
@@ -153,7 +153,7 @@ struct tx_status { | |||
153 | u8 mac[ETH_ALEN]; | 153 | u8 mac[ETH_ALEN]; |
154 | u8 retry; | 154 | u8 retry; |
155 | u8 failure; | 155 | u8 failure; |
156 | } __attribute__((packed)); | 156 | } __packed; |
157 | 157 | ||
158 | enum mac_flags { | 158 | enum mac_flags { |
159 | MAC_FIXED_CHANNEL = 0x01, | 159 | MAC_FIXED_CHANNEL = 0x01, |
@@ -225,7 +225,7 @@ enum { | |||
225 | struct ofdm_plcp_header { | 225 | struct ofdm_plcp_header { |
226 | u8 prefix[3]; | 226 | u8 prefix[3]; |
227 | __le16 service; | 227 | __le16 service; |
228 | } __attribute__((packed)); | 228 | } __packed; |
229 | 229 | ||
230 | static inline u8 zd_ofdm_plcp_header_rate(const struct ofdm_plcp_header *header) | 230 | static inline u8 zd_ofdm_plcp_header_rate(const struct ofdm_plcp_header *header) |
231 | { | 231 | { |
@@ -252,7 +252,7 @@ struct cck_plcp_header { | |||
252 | u8 service; | 252 | u8 service; |
253 | __le16 length; | 253 | __le16 length; |
254 | __le16 crc16; | 254 | __le16 crc16; |
255 | } __attribute__((packed)); | 255 | } __packed; |
256 | 256 | ||
257 | static inline u8 zd_cck_plcp_header_signal(const struct cck_plcp_header *header) | 257 | static inline u8 zd_cck_plcp_header_signal(const struct cck_plcp_header *header) |
258 | { | 258 | { |
diff --git a/drivers/net/wireless/zd1211rw/zd_usb.h b/drivers/net/wireless/zd1211rw/zd_usb.h index 049f8b91f020..1b1655cb7cb4 100644 --- a/drivers/net/wireless/zd1211rw/zd_usb.h +++ b/drivers/net/wireless/zd1211rw/zd_usb.h | |||
@@ -79,17 +79,17 @@ enum control_requests { | |||
79 | struct usb_req_read_regs { | 79 | struct usb_req_read_regs { |
80 | __le16 id; | 80 | __le16 id; |
81 | __le16 addr[0]; | 81 | __le16 addr[0]; |
82 | } __attribute__((packed)); | 82 | } __packed; |
83 | 83 | ||
84 | struct reg_data { | 84 | struct reg_data { |
85 | __le16 addr; | 85 | __le16 addr; |
86 | __le16 value; | 86 | __le16 value; |
87 | } __attribute__((packed)); | 87 | } __packed; |
88 | 88 | ||
89 | struct usb_req_write_regs { | 89 | struct usb_req_write_regs { |
90 | __le16 id; | 90 | __le16 id; |
91 | struct reg_data reg_writes[0]; | 91 | struct reg_data reg_writes[0]; |
92 | } __attribute__((packed)); | 92 | } __packed; |
93 | 93 | ||
94 | enum { | 94 | enum { |
95 | RF_IF_LE = 0x02, | 95 | RF_IF_LE = 0x02, |
@@ -106,7 +106,7 @@ struct usb_req_rfwrite { | |||
106 | /* RF2595: 24 */ | 106 | /* RF2595: 24 */ |
107 | __le16 bit_values[0]; | 107 | __le16 bit_values[0]; |
108 | /* (CR203 & ~(RF_IF_LE | RF_CLK | RF_DATA)) | (bit ? RF_DATA : 0) */ | 108 | /* (CR203 & ~(RF_IF_LE | RF_CLK | RF_DATA)) | (bit ? RF_DATA : 0) */ |
109 | } __attribute__((packed)); | 109 | } __packed; |
110 | 110 | ||
111 | /* USB interrupt */ | 111 | /* USB interrupt */ |
112 | 112 | ||
@@ -123,12 +123,12 @@ enum usb_int_flags { | |||
123 | struct usb_int_header { | 123 | struct usb_int_header { |
124 | u8 type; /* must always be 1 */ | 124 | u8 type; /* must always be 1 */ |
125 | u8 id; | 125 | u8 id; |
126 | } __attribute__((packed)); | 126 | } __packed; |
127 | 127 | ||
128 | struct usb_int_regs { | 128 | struct usb_int_regs { |
129 | struct usb_int_header hdr; | 129 | struct usb_int_header hdr; |
130 | struct reg_data regs[0]; | 130 | struct reg_data regs[0]; |
131 | } __attribute__((packed)); | 131 | } __packed; |
132 | 132 | ||
133 | struct usb_int_retry_fail { | 133 | struct usb_int_retry_fail { |
134 | struct usb_int_header hdr; | 134 | struct usb_int_header hdr; |
@@ -136,7 +136,7 @@ struct usb_int_retry_fail { | |||
136 | u8 _dummy; | 136 | u8 _dummy; |
137 | u8 addr[ETH_ALEN]; | 137 | u8 addr[ETH_ALEN]; |
138 | u8 ibss_wakeup_dest; | 138 | u8 ibss_wakeup_dest; |
139 | } __attribute__((packed)); | 139 | } __packed; |
140 | 140 | ||
141 | struct read_regs_int { | 141 | struct read_regs_int { |
142 | struct completion completion; | 142 | struct completion completion; |
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c index d504e2b60257..b50fedcef8ac 100644 --- a/drivers/net/xen-netfront.c +++ b/drivers/net/xen-netfront.c | |||
@@ -1621,6 +1621,7 @@ static void backend_changed(struct xenbus_device *dev, | |||
1621 | if (xennet_connect(netdev) != 0) | 1621 | if (xennet_connect(netdev) != 0) |
1622 | break; | 1622 | break; |
1623 | xenbus_switch_state(dev, XenbusStateConnected); | 1623 | xenbus_switch_state(dev, XenbusStateConnected); |
1624 | netif_notify_peers(netdev); | ||
1624 | break; | 1625 | break; |
1625 | 1626 | ||
1626 | case XenbusStateClosing: | 1627 | case XenbusStateClosing: |
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c index 0f41c9195e9b..4c218e910635 100644 --- a/drivers/vhost/net.c +++ b/drivers/vhost/net.c | |||
@@ -519,13 +519,12 @@ static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd) | |||
519 | 519 | ||
520 | /* start polling new socket */ | 520 | /* start polling new socket */ |
521 | oldsock = vq->private_data; | 521 | oldsock = vq->private_data; |
522 | if (sock == oldsock) | 522 | if (sock != oldsock){ |
523 | goto done; | 523 | vhost_net_disable_vq(n, vq); |
524 | rcu_assign_pointer(vq->private_data, sock); | ||
525 | vhost_net_enable_vq(n, vq); | ||
526 | } | ||
524 | 527 | ||
525 | vhost_net_disable_vq(n, vq); | ||
526 | rcu_assign_pointer(vq->private_data, sock); | ||
527 | vhost_net_enable_vq(n, vq); | ||
528 | done: | ||
529 | if (oldsock) { | 528 | if (oldsock) { |
530 | vhost_net_flush_vq(n, index); | 529 | vhost_net_flush_vq(n, index); |
531 | fput(oldsock->file); | 530 | fput(oldsock->file); |
@@ -626,7 +625,7 @@ static long vhost_net_compat_ioctl(struct file *f, unsigned int ioctl, | |||
626 | } | 625 | } |
627 | #endif | 626 | #endif |
628 | 627 | ||
629 | const static struct file_operations vhost_net_fops = { | 628 | static const struct file_operations vhost_net_fops = { |
630 | .owner = THIS_MODULE, | 629 | .owner = THIS_MODULE, |
631 | .release = vhost_net_release, | 630 | .release = vhost_net_release, |
632 | .unlocked_ioctl = vhost_net_ioctl, | 631 | .unlocked_ioctl = vhost_net_ioctl, |
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c index 3b83382e06eb..04344b711c56 100644 --- a/drivers/vhost/vhost.c +++ b/drivers/vhost/vhost.c | |||
@@ -237,8 +237,8 @@ static int vq_memory_access_ok(void __user *log_base, struct vhost_memory *mem, | |||
237 | { | 237 | { |
238 | int i; | 238 | int i; |
239 | 239 | ||
240 | if (!mem) | 240 | if (!mem) |
241 | return 0; | 241 | return 0; |
242 | 242 | ||
243 | for (i = 0; i < mem->nregions; ++i) { | 243 | for (i = 0; i < mem->nregions; ++i) { |
244 | struct vhost_memory_region *m = mem->regions + i; | 244 | struct vhost_memory_region *m = mem->regions + i; |
diff --git a/include/linux/if_bonding.h b/include/linux/if_bonding.h index cd525fae3c98..2c7994372bde 100644 --- a/include/linux/if_bonding.h +++ b/include/linux/if_bonding.h | |||
@@ -83,6 +83,7 @@ | |||
83 | 83 | ||
84 | #define BOND_DEFAULT_MAX_BONDS 1 /* Default maximum number of devices to support */ | 84 | #define BOND_DEFAULT_MAX_BONDS 1 /* Default maximum number of devices to support */ |
85 | 85 | ||
86 | #define BOND_DEFAULT_TX_QUEUES 16 /* Default number of tx queues per device */ | ||
86 | /* hashing types */ | 87 | /* hashing types */ |
87 | #define BOND_XMIT_POLICY_LAYER2 0 /* layer 2 (MAC only), default */ | 88 | #define BOND_XMIT_POLICY_LAYER2 0 /* layer 2 (MAC only), default */ |
88 | #define BOND_XMIT_POLICY_LAYER34 1 /* layer 3+4 (IP ^ (TCP || UDP)) */ | 89 | #define BOND_XMIT_POLICY_LAYER34 1 /* layer 3+4 (IP ^ (TCP || UDP)) */ |
diff --git a/include/linux/if_bridge.h b/include/linux/if_bridge.h index 938b7e81df95..0d241a5c4909 100644 --- a/include/linux/if_bridge.h +++ b/include/linux/if_bridge.h | |||
@@ -102,8 +102,6 @@ struct __fdb_entry { | |||
102 | #include <linux/netdevice.h> | 102 | #include <linux/netdevice.h> |
103 | 103 | ||
104 | extern void brioctl_set(int (*ioctl_hook)(struct net *, unsigned int, void __user *)); | 104 | extern void brioctl_set(int (*ioctl_hook)(struct net *, unsigned int, void __user *)); |
105 | extern struct sk_buff *(*br_handle_frame_hook)(struct net_bridge_port *p, | ||
106 | struct sk_buff *skb); | ||
107 | extern int (*br_should_route_hook)(struct sk_buff *skb); | 105 | extern int (*br_should_route_hook)(struct sk_buff *skb); |
108 | 106 | ||
109 | #endif | 107 | #endif |
diff --git a/include/linux/if_ether.h b/include/linux/if_ether.h index bed7a4682b90..c831467774d0 100644 --- a/include/linux/if_ether.h +++ b/include/linux/if_ether.h | |||
@@ -119,7 +119,7 @@ struct ethhdr { | |||
119 | unsigned char h_dest[ETH_ALEN]; /* destination eth addr */ | 119 | unsigned char h_dest[ETH_ALEN]; /* destination eth addr */ |
120 | unsigned char h_source[ETH_ALEN]; /* source ether addr */ | 120 | unsigned char h_source[ETH_ALEN]; /* source ether addr */ |
121 | __be16 h_proto; /* packet type ID field */ | 121 | __be16 h_proto; /* packet type ID field */ |
122 | } __attribute__((packed)); | 122 | } __packed; |
123 | 123 | ||
124 | #ifdef __KERNEL__ | 124 | #ifdef __KERNEL__ |
125 | #include <linux/skbuff.h> | 125 | #include <linux/skbuff.h> |
diff --git a/include/linux/if_fddi.h b/include/linux/if_fddi.h index 5459c5c09930..9947c39e62f6 100644 --- a/include/linux/if_fddi.h +++ b/include/linux/if_fddi.h | |||
@@ -67,7 +67,7 @@ struct fddi_8022_1_hdr { | |||
67 | __u8 dsap; /* destination service access point */ | 67 | __u8 dsap; /* destination service access point */ |
68 | __u8 ssap; /* source service access point */ | 68 | __u8 ssap; /* source service access point */ |
69 | __u8 ctrl; /* control byte #1 */ | 69 | __u8 ctrl; /* control byte #1 */ |
70 | } __attribute__ ((packed)); | 70 | } __packed; |
71 | 71 | ||
72 | /* Define 802.2 Type 2 header */ | 72 | /* Define 802.2 Type 2 header */ |
73 | struct fddi_8022_2_hdr { | 73 | struct fddi_8022_2_hdr { |
@@ -75,7 +75,7 @@ struct fddi_8022_2_hdr { | |||
75 | __u8 ssap; /* source service access point */ | 75 | __u8 ssap; /* source service access point */ |
76 | __u8 ctrl_1; /* control byte #1 */ | 76 | __u8 ctrl_1; /* control byte #1 */ |
77 | __u8 ctrl_2; /* control byte #2 */ | 77 | __u8 ctrl_2; /* control byte #2 */ |
78 | } __attribute__ ((packed)); | 78 | } __packed; |
79 | 79 | ||
80 | /* Define 802.2 SNAP header */ | 80 | /* Define 802.2 SNAP header */ |
81 | #define FDDI_K_OUI_LEN 3 | 81 | #define FDDI_K_OUI_LEN 3 |
@@ -85,7 +85,7 @@ struct fddi_snap_hdr { | |||
85 | __u8 ctrl; /* always 0x03 */ | 85 | __u8 ctrl; /* always 0x03 */ |
86 | __u8 oui[FDDI_K_OUI_LEN]; /* organizational universal id */ | 86 | __u8 oui[FDDI_K_OUI_LEN]; /* organizational universal id */ |
87 | __be16 ethertype; /* packet type ID field */ | 87 | __be16 ethertype; /* packet type ID field */ |
88 | } __attribute__ ((packed)); | 88 | } __packed; |
89 | 89 | ||
90 | /* Define FDDI LLC frame header */ | 90 | /* Define FDDI LLC frame header */ |
91 | struct fddihdr { | 91 | struct fddihdr { |
@@ -98,7 +98,7 @@ struct fddihdr { | |||
98 | struct fddi_8022_2_hdr llc_8022_2; | 98 | struct fddi_8022_2_hdr llc_8022_2; |
99 | struct fddi_snap_hdr llc_snap; | 99 | struct fddi_snap_hdr llc_snap; |
100 | } hdr; | 100 | } hdr; |
101 | } __attribute__ ((packed)); | 101 | } __packed; |
102 | 102 | ||
103 | #ifdef __KERNEL__ | 103 | #ifdef __KERNEL__ |
104 | #include <linux/netdevice.h> | 104 | #include <linux/netdevice.h> |
diff --git a/include/linux/if_frad.h b/include/linux/if_frad.h index 80b3a1056a5f..191ee0869bc1 100644 --- a/include/linux/if_frad.h +++ b/include/linux/if_frad.h | |||
@@ -135,7 +135,7 @@ struct frhdr | |||
135 | __be16 PID; | 135 | __be16 PID; |
136 | 136 | ||
137 | #define IP_NLPID pad | 137 | #define IP_NLPID pad |
138 | } __attribute__((packed)); | 138 | } __packed; |
139 | 139 | ||
140 | /* see RFC 1490 for the definition of the following */ | 140 | /* see RFC 1490 for the definition of the following */ |
141 | #define FRAD_I_UI 0x03 | 141 | #define FRAD_I_UI 0x03 |
diff --git a/include/linux/if_hippi.h b/include/linux/if_hippi.h index 8d038eb8db5c..5fe5f307c6f5 100644 --- a/include/linux/if_hippi.h +++ b/include/linux/if_hippi.h | |||
@@ -104,7 +104,7 @@ struct hippi_fp_hdr { | |||
104 | __be32 fixed; | 104 | __be32 fixed; |
105 | #endif | 105 | #endif |
106 | __be32 d2_size; | 106 | __be32 d2_size; |
107 | } __attribute__ ((packed)); | 107 | } __packed; |
108 | 108 | ||
109 | struct hippi_le_hdr { | 109 | struct hippi_le_hdr { |
110 | #if defined (__BIG_ENDIAN_BITFIELD) | 110 | #if defined (__BIG_ENDIAN_BITFIELD) |
@@ -129,7 +129,7 @@ struct hippi_le_hdr { | |||
129 | __u8 daddr[HIPPI_ALEN]; | 129 | __u8 daddr[HIPPI_ALEN]; |
130 | __u16 locally_administered; | 130 | __u16 locally_administered; |
131 | __u8 saddr[HIPPI_ALEN]; | 131 | __u8 saddr[HIPPI_ALEN]; |
132 | } __attribute__ ((packed)); | 132 | } __packed; |
133 | 133 | ||
134 | #define HIPPI_OUI_LEN 3 | 134 | #define HIPPI_OUI_LEN 3 |
135 | /* | 135 | /* |
@@ -142,12 +142,12 @@ struct hippi_snap_hdr { | |||
142 | __u8 ctrl; /* always 0x03 */ | 142 | __u8 ctrl; /* always 0x03 */ |
143 | __u8 oui[HIPPI_OUI_LEN]; /* organizational universal id (zero)*/ | 143 | __u8 oui[HIPPI_OUI_LEN]; /* organizational universal id (zero)*/ |
144 | __be16 ethertype; /* packet type ID field */ | 144 | __be16 ethertype; /* packet type ID field */ |
145 | } __attribute__ ((packed)); | 145 | } __packed; |
146 | 146 | ||
147 | struct hippi_hdr { | 147 | struct hippi_hdr { |
148 | struct hippi_fp_hdr fp; | 148 | struct hippi_fp_hdr fp; |
149 | struct hippi_le_hdr le; | 149 | struct hippi_le_hdr le; |
150 | struct hippi_snap_hdr snap; | 150 | struct hippi_snap_hdr snap; |
151 | } __attribute__ ((packed)); | 151 | } __packed; |
152 | 152 | ||
153 | #endif /* _LINUX_IF_HIPPI_H */ | 153 | #endif /* _LINUX_IF_HIPPI_H */ |
diff --git a/include/linux/if_macvlan.h b/include/linux/if_macvlan.h index 9ea047aca795..c26a0e4f0ce8 100644 --- a/include/linux/if_macvlan.h +++ b/include/linux/if_macvlan.h | |||
@@ -84,8 +84,4 @@ extern int macvlan_link_register(struct rtnl_link_ops *ops); | |||
84 | extern netdev_tx_t macvlan_start_xmit(struct sk_buff *skb, | 84 | extern netdev_tx_t macvlan_start_xmit(struct sk_buff *skb, |
85 | struct net_device *dev); | 85 | struct net_device *dev); |
86 | 86 | ||
87 | |||
88 | extern struct sk_buff *(*macvlan_handle_frame_hook)(struct macvlan_port *, | ||
89 | struct sk_buff *); | ||
90 | |||
91 | #endif /* _LINUX_IF_MACVLAN_H */ | 87 | #endif /* _LINUX_IF_MACVLAN_H */ |
diff --git a/include/linux/if_packet.h b/include/linux/if_packet.h index 6ac23ef1801a..72bfa5a034dd 100644 --- a/include/linux/if_packet.h +++ b/include/linux/if_packet.h | |||
@@ -48,6 +48,7 @@ struct sockaddr_ll { | |||
48 | #define PACKET_LOSS 14 | 48 | #define PACKET_LOSS 14 |
49 | #define PACKET_VNET_HDR 15 | 49 | #define PACKET_VNET_HDR 15 |
50 | #define PACKET_TX_TIMESTAMP 16 | 50 | #define PACKET_TX_TIMESTAMP 16 |
51 | #define PACKET_TIMESTAMP 17 | ||
51 | 52 | ||
52 | struct tpacket_stats { | 53 | struct tpacket_stats { |
53 | unsigned int tp_packets; | 54 | unsigned int tp_packets; |
diff --git a/include/linux/if_pppox.h b/include/linux/if_pppox.h index a6577af0c4e6..1925e0c3f162 100644 --- a/include/linux/if_pppox.h +++ b/include/linux/if_pppox.h | |||
@@ -59,7 +59,7 @@ struct sockaddr_pppox { | |||
59 | union{ | 59 | union{ |
60 | struct pppoe_addr pppoe; | 60 | struct pppoe_addr pppoe; |
61 | }sa_addr; | 61 | }sa_addr; |
62 | }__attribute__ ((packed)); | 62 | } __packed; |
63 | 63 | ||
64 | /* The use of the above union isn't viable because the size of this | 64 | /* The use of the above union isn't viable because the size of this |
65 | * struct must stay fixed over time -- applications use sizeof(struct | 65 | * struct must stay fixed over time -- applications use sizeof(struct |
@@ -70,7 +70,7 @@ struct sockaddr_pppol2tp { | |||
70 | sa_family_t sa_family; /* address family, AF_PPPOX */ | 70 | sa_family_t sa_family; /* address family, AF_PPPOX */ |
71 | unsigned int sa_protocol; /* protocol identifier */ | 71 | unsigned int sa_protocol; /* protocol identifier */ |
72 | struct pppol2tp_addr pppol2tp; | 72 | struct pppol2tp_addr pppol2tp; |
73 | }__attribute__ ((packed)); | 73 | } __packed; |
74 | 74 | ||
75 | /* The L2TPv3 protocol changes tunnel and session ids from 16 to 32 | 75 | /* The L2TPv3 protocol changes tunnel and session ids from 16 to 32 |
76 | * bits. So we need a different sockaddr structure. | 76 | * bits. So we need a different sockaddr structure. |
@@ -79,7 +79,7 @@ struct sockaddr_pppol2tpv3 { | |||
79 | sa_family_t sa_family; /* address family, AF_PPPOX */ | 79 | sa_family_t sa_family; /* address family, AF_PPPOX */ |
80 | unsigned int sa_protocol; /* protocol identifier */ | 80 | unsigned int sa_protocol; /* protocol identifier */ |
81 | struct pppol2tpv3_addr pppol2tp; | 81 | struct pppol2tpv3_addr pppol2tp; |
82 | } __attribute__ ((packed)); | 82 | } __packed; |
83 | 83 | ||
84 | /********************************************************************* | 84 | /********************************************************************* |
85 | * | 85 | * |
@@ -129,7 +129,7 @@ struct pppoe_hdr { | |||
129 | __be16 sid; | 129 | __be16 sid; |
130 | __be16 length; | 130 | __be16 length; |
131 | struct pppoe_tag tag[0]; | 131 | struct pppoe_tag tag[0]; |
132 | } __attribute__ ((packed)); | 132 | } __packed; |
133 | 133 | ||
134 | /* Length of entire PPPoE + PPP header */ | 134 | /* Length of entire PPPoE + PPP header */ |
135 | #define PPPOE_SES_HLEN 8 | 135 | #define PPPOE_SES_HLEN 8 |
diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h index 99e1ab7e3eec..940e21595351 100644 --- a/include/linux/ipv6.h +++ b/include/linux/ipv6.h | |||
@@ -58,7 +58,7 @@ struct ipv6_opt_hdr { | |||
58 | /* | 58 | /* |
59 | * TLV encoded option data follows. | 59 | * TLV encoded option data follows. |
60 | */ | 60 | */ |
61 | } __attribute__ ((packed)); /* required for some archs */ | 61 | } __packed; /* required for some archs */ |
62 | 62 | ||
63 | #define ipv6_destopt_hdr ipv6_opt_hdr | 63 | #define ipv6_destopt_hdr ipv6_opt_hdr |
64 | #define ipv6_hopopt_hdr ipv6_opt_hdr | 64 | #define ipv6_hopopt_hdr ipv6_opt_hdr |
@@ -99,7 +99,7 @@ struct ipv6_destopt_hao { | |||
99 | __u8 type; | 99 | __u8 type; |
100 | __u8 length; | 100 | __u8 length; |
101 | struct in6_addr addr; | 101 | struct in6_addr addr; |
102 | } __attribute__ ((__packed__)); | 102 | } __packed; |
103 | 103 | ||
104 | /* | 104 | /* |
105 | * IPv6 fixed header | 105 | * IPv6 fixed header |
diff --git a/include/linux/isdnif.h b/include/linux/isdnif.h index b9b5a684ed69..b8c23f88dd54 100644 --- a/include/linux/isdnif.h +++ b/include/linux/isdnif.h | |||
@@ -317,7 +317,7 @@ typedef struct T30_s { | |||
317 | __u8 r_scantime; | 317 | __u8 r_scantime; |
318 | __u8 r_id[FAXIDLEN]; | 318 | __u8 r_id[FAXIDLEN]; |
319 | __u8 r_code; | 319 | __u8 r_code; |
320 | } __attribute__((packed)) T30_s; | 320 | } __packed T30_s; |
321 | 321 | ||
322 | #define ISDN_TTY_FAX_CONN_IN 0 | 322 | #define ISDN_TTY_FAX_CONN_IN 0 |
323 | #define ISDN_TTY_FAX_CONN_OUT 1 | 323 | #define ISDN_TTY_FAX_CONN_OUT 1 |
diff --git a/include/linux/mISDNif.h b/include/linux/mISDNif.h index 78c3bed1c3f5..b5e7f2202484 100644 --- a/include/linux/mISDNif.h +++ b/include/linux/mISDNif.h | |||
@@ -251,7 +251,7 @@ | |||
251 | struct mISDNhead { | 251 | struct mISDNhead { |
252 | unsigned int prim; | 252 | unsigned int prim; |
253 | unsigned int id; | 253 | unsigned int id; |
254 | } __attribute__((packed)); | 254 | } __packed; |
255 | 255 | ||
256 | #define MISDN_HEADER_LEN sizeof(struct mISDNhead) | 256 | #define MISDN_HEADER_LEN sizeof(struct mISDNhead) |
257 | #define MAX_DATA_SIZE 2048 | 257 | #define MAX_DATA_SIZE 2048 |
diff --git a/include/linux/nbd.h b/include/linux/nbd.h index 155719dab813..bb58854a8061 100644 --- a/include/linux/nbd.h +++ b/include/linux/nbd.h | |||
@@ -88,7 +88,7 @@ struct nbd_request { | |||
88 | char handle[8]; | 88 | char handle[8]; |
89 | __be64 from; | 89 | __be64 from; |
90 | __be32 len; | 90 | __be32 len; |
91 | } __attribute__ ((packed)); | 91 | } __packed; |
92 | 92 | ||
93 | /* | 93 | /* |
94 | * This is the reply packet that nbd-server sends back to the client after | 94 | * This is the reply packet that nbd-server sends back to the client after |
diff --git a/include/linux/ncp.h b/include/linux/ncp.h index 99f0adeeb3f3..3ace8370e61e 100644 --- a/include/linux/ncp.h +++ b/include/linux/ncp.h | |||
@@ -27,7 +27,7 @@ struct ncp_request_header { | |||
27 | __u8 conn_high; | 27 | __u8 conn_high; |
28 | __u8 function; | 28 | __u8 function; |
29 | __u8 data[0]; | 29 | __u8 data[0]; |
30 | } __attribute__((packed)); | 30 | } __packed; |
31 | 31 | ||
32 | #define NCP_REPLY (0x3333) | 32 | #define NCP_REPLY (0x3333) |
33 | #define NCP_WATCHDOG (0x3E3E) | 33 | #define NCP_WATCHDOG (0x3E3E) |
@@ -42,7 +42,7 @@ struct ncp_reply_header { | |||
42 | __u8 completion_code; | 42 | __u8 completion_code; |
43 | __u8 connection_state; | 43 | __u8 connection_state; |
44 | __u8 data[0]; | 44 | __u8 data[0]; |
45 | } __attribute__((packed)); | 45 | } __packed; |
46 | 46 | ||
47 | #define NCP_VOLNAME_LEN (16) | 47 | #define NCP_VOLNAME_LEN (16) |
48 | #define NCP_NUMBER_OF_VOLUMES (256) | 48 | #define NCP_NUMBER_OF_VOLUMES (256) |
@@ -158,7 +158,7 @@ struct nw_info_struct { | |||
158 | #ifdef __KERNEL__ | 158 | #ifdef __KERNEL__ |
159 | struct nw_nfs_info nfs; | 159 | struct nw_nfs_info nfs; |
160 | #endif | 160 | #endif |
161 | } __attribute__((packed)); | 161 | } __packed; |
162 | 162 | ||
163 | /* modify mask - use with MODIFY_DOS_INFO structure */ | 163 | /* modify mask - use with MODIFY_DOS_INFO structure */ |
164 | #define DM_ATTRIBUTES (cpu_to_le32(0x02)) | 164 | #define DM_ATTRIBUTES (cpu_to_le32(0x02)) |
@@ -190,12 +190,12 @@ struct nw_modify_dos_info { | |||
190 | __u16 inheritanceGrantMask; | 190 | __u16 inheritanceGrantMask; |
191 | __u16 inheritanceRevokeMask; | 191 | __u16 inheritanceRevokeMask; |
192 | __u32 maximumSpace; | 192 | __u32 maximumSpace; |
193 | } __attribute__((packed)); | 193 | } __packed; |
194 | 194 | ||
195 | struct nw_search_sequence { | 195 | struct nw_search_sequence { |
196 | __u8 volNumber; | 196 | __u8 volNumber; |
197 | __u32 dirBase; | 197 | __u32 dirBase; |
198 | __u32 sequence; | 198 | __u32 sequence; |
199 | } __attribute__((packed)); | 199 | } __packed; |
200 | 200 | ||
201 | #endif /* _LINUX_NCP_H */ | 201 | #endif /* _LINUX_NCP_H */ |
diff --git a/include/linux/ncp_fs_sb.h b/include/linux/ncp_fs_sb.h index 5ec9ca671687..8da05bc098ca 100644 --- a/include/linux/ncp_fs_sb.h +++ b/include/linux/ncp_fs_sb.h | |||
@@ -104,13 +104,13 @@ struct ncp_server { | |||
104 | 104 | ||
105 | unsigned int state; /* STREAM only: receiver state */ | 105 | unsigned int state; /* STREAM only: receiver state */ |
106 | struct { | 106 | struct { |
107 | __u32 magic __attribute__((packed)); | 107 | __u32 magic __packed; |
108 | __u32 len __attribute__((packed)); | 108 | __u32 len __packed; |
109 | __u16 type __attribute__((packed)); | 109 | __u16 type __packed; |
110 | __u16 p1 __attribute__((packed)); | 110 | __u16 p1 __packed; |
111 | __u16 p2 __attribute__((packed)); | 111 | __u16 p2 __packed; |
112 | __u16 p3 __attribute__((packed)); | 112 | __u16 p3 __packed; |
113 | __u16 type2 __attribute__((packed)); | 113 | __u16 type2 __packed; |
114 | } buf; /* STREAM only: temporary buffer */ | 114 | } buf; /* STREAM only: temporary buffer */ |
115 | unsigned char* ptr; /* STREAM only: pointer to data */ | 115 | unsigned char* ptr; /* STREAM only: pointer to data */ |
116 | size_t len; /* STREAM only: length of data to receive */ | 116 | size_t len; /* STREAM only: length of data to receive */ |
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 40291f375024..5156b806924c 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h | |||
@@ -381,6 +381,8 @@ enum gro_result { | |||
381 | }; | 381 | }; |
382 | typedef enum gro_result gro_result_t; | 382 | typedef enum gro_result gro_result_t; |
383 | 383 | ||
384 | typedef struct sk_buff *rx_handler_func_t(struct sk_buff *skb); | ||
385 | |||
384 | extern void __napi_schedule(struct napi_struct *n); | 386 | extern void __napi_schedule(struct napi_struct *n); |
385 | 387 | ||
386 | static inline int napi_disable_pending(struct napi_struct *n) | 388 | static inline int napi_disable_pending(struct napi_struct *n) |
@@ -957,6 +959,7 @@ struct net_device { | |||
957 | #endif | 959 | #endif |
958 | 960 | ||
959 | struct netdev_queue rx_queue; | 961 | struct netdev_queue rx_queue; |
962 | rx_handler_func_t *rx_handler; | ||
960 | 963 | ||
961 | struct netdev_queue *_tx ____cacheline_aligned_in_smp; | 964 | struct netdev_queue *_tx ____cacheline_aligned_in_smp; |
962 | 965 | ||
@@ -1087,11 +1090,7 @@ static inline void netdev_for_each_tx_queue(struct net_device *dev, | |||
1087 | static inline | 1090 | static inline |
1088 | struct net *dev_net(const struct net_device *dev) | 1091 | struct net *dev_net(const struct net_device *dev) |
1089 | { | 1092 | { |
1090 | #ifdef CONFIG_NET_NS | 1093 | return read_pnet(&dev->nd_net); |
1091 | return dev->nd_net; | ||
1092 | #else | ||
1093 | return &init_net; | ||
1094 | #endif | ||
1095 | } | 1094 | } |
1096 | 1095 | ||
1097 | static inline | 1096 | static inline |
@@ -1693,6 +1692,10 @@ static inline void napi_free_frags(struct napi_struct *napi) | |||
1693 | napi->skb = NULL; | 1692 | napi->skb = NULL; |
1694 | } | 1693 | } |
1695 | 1694 | ||
1695 | extern int netdev_rx_handler_register(struct net_device *dev, | ||
1696 | rx_handler_func_t *rx_handler); | ||
1697 | extern void netdev_rx_handler_unregister(struct net_device *dev); | ||
1698 | |||
1696 | extern void netif_nit_deliver(struct sk_buff *skb); | 1699 | extern void netif_nit_deliver(struct sk_buff *skb); |
1697 | extern int dev_valid_name(const char *name); | 1700 | extern int dev_valid_name(const char *name); |
1698 | extern int dev_ioctl(struct net *net, unsigned int cmd, void __user *); | 1701 | extern int dev_ioctl(struct net *net, unsigned int cmd, void __user *); |
@@ -1772,6 +1775,8 @@ extern void netif_carrier_on(struct net_device *dev); | |||
1772 | 1775 | ||
1773 | extern void netif_carrier_off(struct net_device *dev); | 1776 | extern void netif_carrier_off(struct net_device *dev); |
1774 | 1777 | ||
1778 | extern void netif_notify_peers(struct net_device *dev); | ||
1779 | |||
1775 | /** | 1780 | /** |
1776 | * netif_dormant_on - mark device as dormant. | 1781 | * netif_dormant_on - mark device as dormant. |
1777 | * @dev: network device | 1782 | * @dev: network device |
diff --git a/include/linux/notifier.h b/include/linux/notifier.h index 540703b555cb..b2f1a4d83550 100644 --- a/include/linux/notifier.h +++ b/include/linux/notifier.h | |||
@@ -210,6 +210,7 @@ static inline int notifier_to_errno(int ret) | |||
210 | #define NETDEV_POST_INIT 0x0010 | 210 | #define NETDEV_POST_INIT 0x0010 |
211 | #define NETDEV_UNREGISTER_BATCH 0x0011 | 211 | #define NETDEV_UNREGISTER_BATCH 0x0011 |
212 | #define NETDEV_BONDING_DESLAVE 0x0012 | 212 | #define NETDEV_BONDING_DESLAVE 0x0012 |
213 | #define NETDEV_NOTIFY_PEERS 0x0013 | ||
213 | 214 | ||
214 | #define SYS_DOWN 0x0001 /* Notify of system down */ | 215 | #define SYS_DOWN 0x0001 /* Notify of system down */ |
215 | #define SYS_RESTART SYS_DOWN | 216 | #define SYS_RESTART SYS_DOWN |
diff --git a/include/linux/phonet.h b/include/linux/phonet.h index e5126cff9b2a..24426c3d6b5a 100644 --- a/include/linux/phonet.h +++ b/include/linux/phonet.h | |||
@@ -56,7 +56,7 @@ struct phonethdr { | |||
56 | __be16 pn_length; | 56 | __be16 pn_length; |
57 | __u8 pn_robj; | 57 | __u8 pn_robj; |
58 | __u8 pn_sobj; | 58 | __u8 pn_sobj; |
59 | } __attribute__((packed)); | 59 | } __packed; |
60 | 60 | ||
61 | /* Common Phonet payload header */ | 61 | /* Common Phonet payload header */ |
62 | struct phonetmsg { | 62 | struct phonetmsg { |
@@ -98,7 +98,7 @@ struct sockaddr_pn { | |||
98 | __u8 spn_dev; | 98 | __u8 spn_dev; |
99 | __u8 spn_resource; | 99 | __u8 spn_resource; |
100 | __u8 spn_zero[sizeof(struct sockaddr) - sizeof(sa_family_t) - 3]; | 100 | __u8 spn_zero[sizeof(struct sockaddr) - sizeof(sa_family_t) - 3]; |
101 | } __attribute__ ((packed)); | 101 | } __packed; |
102 | 102 | ||
103 | /* Well known address */ | 103 | /* Well known address */ |
104 | #define PN_DEV_PC 0x10 | 104 | #define PN_DEV_PC 0x10 |
diff --git a/include/linux/rds.h b/include/linux/rds.h index cab4994c2f63..24bce3ded9ea 100644 --- a/include/linux/rds.h +++ b/include/linux/rds.h | |||
@@ -100,7 +100,7 @@ | |||
100 | struct rds_info_counter { | 100 | struct rds_info_counter { |
101 | u_int8_t name[32]; | 101 | u_int8_t name[32]; |
102 | u_int64_t value; | 102 | u_int64_t value; |
103 | } __attribute__((packed)); | 103 | } __packed; |
104 | 104 | ||
105 | #define RDS_INFO_CONNECTION_FLAG_SENDING 0x01 | 105 | #define RDS_INFO_CONNECTION_FLAG_SENDING 0x01 |
106 | #define RDS_INFO_CONNECTION_FLAG_CONNECTING 0x02 | 106 | #define RDS_INFO_CONNECTION_FLAG_CONNECTING 0x02 |
@@ -115,7 +115,7 @@ struct rds_info_connection { | |||
115 | __be32 faddr; | 115 | __be32 faddr; |
116 | u_int8_t transport[TRANSNAMSIZ]; /* null term ascii */ | 116 | u_int8_t transport[TRANSNAMSIZ]; /* null term ascii */ |
117 | u_int8_t flags; | 117 | u_int8_t flags; |
118 | } __attribute__((packed)); | 118 | } __packed; |
119 | 119 | ||
120 | struct rds_info_flow { | 120 | struct rds_info_flow { |
121 | __be32 laddr; | 121 | __be32 laddr; |
@@ -123,7 +123,7 @@ struct rds_info_flow { | |||
123 | u_int32_t bytes; | 123 | u_int32_t bytes; |
124 | __be16 lport; | 124 | __be16 lport; |
125 | __be16 fport; | 125 | __be16 fport; |
126 | } __attribute__((packed)); | 126 | } __packed; |
127 | 127 | ||
128 | #define RDS_INFO_MESSAGE_FLAG_ACK 0x01 | 128 | #define RDS_INFO_MESSAGE_FLAG_ACK 0x01 |
129 | #define RDS_INFO_MESSAGE_FLAG_FAST_ACK 0x02 | 129 | #define RDS_INFO_MESSAGE_FLAG_FAST_ACK 0x02 |
@@ -136,7 +136,7 @@ struct rds_info_message { | |||
136 | __be16 lport; | 136 | __be16 lport; |
137 | __be16 fport; | 137 | __be16 fport; |
138 | u_int8_t flags; | 138 | u_int8_t flags; |
139 | } __attribute__((packed)); | 139 | } __packed; |
140 | 140 | ||
141 | struct rds_info_socket { | 141 | struct rds_info_socket { |
142 | u_int32_t sndbuf; | 142 | u_int32_t sndbuf; |
@@ -146,7 +146,7 @@ struct rds_info_socket { | |||
146 | __be16 connected_port; | 146 | __be16 connected_port; |
147 | u_int32_t rcvbuf; | 147 | u_int32_t rcvbuf; |
148 | u_int64_t inum; | 148 | u_int64_t inum; |
149 | } __attribute__((packed)); | 149 | } __packed; |
150 | 150 | ||
151 | struct rds_info_tcp_socket { | 151 | struct rds_info_tcp_socket { |
152 | __be32 local_addr; | 152 | __be32 local_addr; |
@@ -158,7 +158,7 @@ struct rds_info_tcp_socket { | |||
158 | u_int32_t last_sent_nxt; | 158 | u_int32_t last_sent_nxt; |
159 | u_int32_t last_expected_una; | 159 | u_int32_t last_expected_una; |
160 | u_int32_t last_seen_una; | 160 | u_int32_t last_seen_una; |
161 | } __attribute__((packed)); | 161 | } __packed; |
162 | 162 | ||
163 | #define RDS_IB_GID_LEN 16 | 163 | #define RDS_IB_GID_LEN 16 |
164 | struct rds_info_rdma_connection { | 164 | struct rds_info_rdma_connection { |
diff --git a/include/linux/sctp.h b/include/linux/sctp.h index c20d3ce673c0..c11a28706fa4 100644 --- a/include/linux/sctp.h +++ b/include/linux/sctp.h | |||
@@ -61,7 +61,7 @@ typedef struct sctphdr { | |||
61 | __be16 dest; | 61 | __be16 dest; |
62 | __be32 vtag; | 62 | __be32 vtag; |
63 | __le32 checksum; | 63 | __le32 checksum; |
64 | } __attribute__((packed)) sctp_sctphdr_t; | 64 | } __packed sctp_sctphdr_t; |
65 | 65 | ||
66 | #ifdef __KERNEL__ | 66 | #ifdef __KERNEL__ |
67 | #include <linux/skbuff.h> | 67 | #include <linux/skbuff.h> |
@@ -77,7 +77,7 @@ typedef struct sctp_chunkhdr { | |||
77 | __u8 type; | 77 | __u8 type; |
78 | __u8 flags; | 78 | __u8 flags; |
79 | __be16 length; | 79 | __be16 length; |
80 | } __attribute__((packed)) sctp_chunkhdr_t; | 80 | } __packed sctp_chunkhdr_t; |
81 | 81 | ||
82 | 82 | ||
83 | /* Section 3.2. Chunk Type Values. | 83 | /* Section 3.2. Chunk Type Values. |
@@ -167,7 +167,7 @@ enum { SCTP_CHUNK_FLAG_T = 0x01 }; | |||
167 | typedef struct sctp_paramhdr { | 167 | typedef struct sctp_paramhdr { |
168 | __be16 type; | 168 | __be16 type; |
169 | __be16 length; | 169 | __be16 length; |
170 | } __attribute__((packed)) sctp_paramhdr_t; | 170 | } __packed sctp_paramhdr_t; |
171 | 171 | ||
172 | typedef enum { | 172 | typedef enum { |
173 | 173 | ||
@@ -228,12 +228,12 @@ typedef struct sctp_datahdr { | |||
228 | __be16 ssn; | 228 | __be16 ssn; |
229 | __be32 ppid; | 229 | __be32 ppid; |
230 | __u8 payload[0]; | 230 | __u8 payload[0]; |
231 | } __attribute__((packed)) sctp_datahdr_t; | 231 | } __packed sctp_datahdr_t; |
232 | 232 | ||
233 | typedef struct sctp_data_chunk { | 233 | typedef struct sctp_data_chunk { |
234 | sctp_chunkhdr_t chunk_hdr; | 234 | sctp_chunkhdr_t chunk_hdr; |
235 | sctp_datahdr_t data_hdr; | 235 | sctp_datahdr_t data_hdr; |
236 | } __attribute__((packed)) sctp_data_chunk_t; | 236 | } __packed sctp_data_chunk_t; |
237 | 237 | ||
238 | /* DATA Chuck Specific Flags */ | 238 | /* DATA Chuck Specific Flags */ |
239 | enum { | 239 | enum { |
@@ -259,78 +259,78 @@ typedef struct sctp_inithdr { | |||
259 | __be16 num_inbound_streams; | 259 | __be16 num_inbound_streams; |
260 | __be32 initial_tsn; | 260 | __be32 initial_tsn; |
261 | __u8 params[0]; | 261 | __u8 params[0]; |
262 | } __attribute__((packed)) sctp_inithdr_t; | 262 | } __packed sctp_inithdr_t; |
263 | 263 | ||
264 | typedef struct sctp_init_chunk { | 264 | typedef struct sctp_init_chunk { |
265 | sctp_chunkhdr_t chunk_hdr; | 265 | sctp_chunkhdr_t chunk_hdr; |
266 | sctp_inithdr_t init_hdr; | 266 | sctp_inithdr_t init_hdr; |
267 | } __attribute__((packed)) sctp_init_chunk_t; | 267 | } __packed sctp_init_chunk_t; |
268 | 268 | ||
269 | 269 | ||
270 | /* Section 3.3.2.1. IPv4 Address Parameter (5) */ | 270 | /* Section 3.3.2.1. IPv4 Address Parameter (5) */ |
271 | typedef struct sctp_ipv4addr_param { | 271 | typedef struct sctp_ipv4addr_param { |
272 | sctp_paramhdr_t param_hdr; | 272 | sctp_paramhdr_t param_hdr; |
273 | struct in_addr addr; | 273 | struct in_addr addr; |
274 | } __attribute__((packed)) sctp_ipv4addr_param_t; | 274 | } __packed sctp_ipv4addr_param_t; |
275 | 275 | ||
276 | /* Section 3.3.2.1. IPv6 Address Parameter (6) */ | 276 | /* Section 3.3.2.1. IPv6 Address Parameter (6) */ |
277 | typedef struct sctp_ipv6addr_param { | 277 | typedef struct sctp_ipv6addr_param { |
278 | sctp_paramhdr_t param_hdr; | 278 | sctp_paramhdr_t param_hdr; |
279 | struct in6_addr addr; | 279 | struct in6_addr addr; |
280 | } __attribute__((packed)) sctp_ipv6addr_param_t; | 280 | } __packed sctp_ipv6addr_param_t; |
281 | 281 | ||
282 | /* Section 3.3.2.1 Cookie Preservative (9) */ | 282 | /* Section 3.3.2.1 Cookie Preservative (9) */ |
283 | typedef struct sctp_cookie_preserve_param { | 283 | typedef struct sctp_cookie_preserve_param { |
284 | sctp_paramhdr_t param_hdr; | 284 | sctp_paramhdr_t param_hdr; |
285 | __be32 lifespan_increment; | 285 | __be32 lifespan_increment; |
286 | } __attribute__((packed)) sctp_cookie_preserve_param_t; | 286 | } __packed sctp_cookie_preserve_param_t; |
287 | 287 | ||
288 | /* Section 3.3.2.1 Host Name Address (11) */ | 288 | /* Section 3.3.2.1 Host Name Address (11) */ |
289 | typedef struct sctp_hostname_param { | 289 | typedef struct sctp_hostname_param { |
290 | sctp_paramhdr_t param_hdr; | 290 | sctp_paramhdr_t param_hdr; |
291 | uint8_t hostname[0]; | 291 | uint8_t hostname[0]; |
292 | } __attribute__((packed)) sctp_hostname_param_t; | 292 | } __packed sctp_hostname_param_t; |
293 | 293 | ||
294 | /* Section 3.3.2.1 Supported Address Types (12) */ | 294 | /* Section 3.3.2.1 Supported Address Types (12) */ |
295 | typedef struct sctp_supported_addrs_param { | 295 | typedef struct sctp_supported_addrs_param { |
296 | sctp_paramhdr_t param_hdr; | 296 | sctp_paramhdr_t param_hdr; |
297 | __be16 types[0]; | 297 | __be16 types[0]; |
298 | } __attribute__((packed)) sctp_supported_addrs_param_t; | 298 | } __packed sctp_supported_addrs_param_t; |
299 | 299 | ||
300 | /* Appendix A. ECN Capable (32768) */ | 300 | /* Appendix A. ECN Capable (32768) */ |
301 | typedef struct sctp_ecn_capable_param { | 301 | typedef struct sctp_ecn_capable_param { |
302 | sctp_paramhdr_t param_hdr; | 302 | sctp_paramhdr_t param_hdr; |
303 | } __attribute__((packed)) sctp_ecn_capable_param_t; | 303 | } __packed sctp_ecn_capable_param_t; |
304 | 304 | ||
305 | /* ADDIP Section 3.2.6 Adaptation Layer Indication */ | 305 | /* ADDIP Section 3.2.6 Adaptation Layer Indication */ |
306 | typedef struct sctp_adaptation_ind_param { | 306 | typedef struct sctp_adaptation_ind_param { |
307 | struct sctp_paramhdr param_hdr; | 307 | struct sctp_paramhdr param_hdr; |
308 | __be32 adaptation_ind; | 308 | __be32 adaptation_ind; |
309 | } __attribute__((packed)) sctp_adaptation_ind_param_t; | 309 | } __packed sctp_adaptation_ind_param_t; |
310 | 310 | ||
311 | /* ADDIP Section 4.2.7 Supported Extensions Parameter */ | 311 | /* ADDIP Section 4.2.7 Supported Extensions Parameter */ |
312 | typedef struct sctp_supported_ext_param { | 312 | typedef struct sctp_supported_ext_param { |
313 | struct sctp_paramhdr param_hdr; | 313 | struct sctp_paramhdr param_hdr; |
314 | __u8 chunks[0]; | 314 | __u8 chunks[0]; |
315 | } __attribute__((packed)) sctp_supported_ext_param_t; | 315 | } __packed sctp_supported_ext_param_t; |
316 | 316 | ||
317 | /* AUTH Section 3.1 Random */ | 317 | /* AUTH Section 3.1 Random */ |
318 | typedef struct sctp_random_param { | 318 | typedef struct sctp_random_param { |
319 | sctp_paramhdr_t param_hdr; | 319 | sctp_paramhdr_t param_hdr; |
320 | __u8 random_val[0]; | 320 | __u8 random_val[0]; |
321 | } __attribute__((packed)) sctp_random_param_t; | 321 | } __packed sctp_random_param_t; |
322 | 322 | ||
323 | /* AUTH Section 3.2 Chunk List */ | 323 | /* AUTH Section 3.2 Chunk List */ |
324 | typedef struct sctp_chunks_param { | 324 | typedef struct sctp_chunks_param { |
325 | sctp_paramhdr_t param_hdr; | 325 | sctp_paramhdr_t param_hdr; |
326 | __u8 chunks[0]; | 326 | __u8 chunks[0]; |
327 | } __attribute__((packed)) sctp_chunks_param_t; | 327 | } __packed sctp_chunks_param_t; |
328 | 328 | ||
329 | /* AUTH Section 3.3 HMAC Algorithm */ | 329 | /* AUTH Section 3.3 HMAC Algorithm */ |
330 | typedef struct sctp_hmac_algo_param { | 330 | typedef struct sctp_hmac_algo_param { |
331 | sctp_paramhdr_t param_hdr; | 331 | sctp_paramhdr_t param_hdr; |
332 | __be16 hmac_ids[0]; | 332 | __be16 hmac_ids[0]; |
333 | } __attribute__((packed)) sctp_hmac_algo_param_t; | 333 | } __packed sctp_hmac_algo_param_t; |
334 | 334 | ||
335 | /* RFC 2960. Section 3.3.3 Initiation Acknowledgement (INIT ACK) (2): | 335 | /* RFC 2960. Section 3.3.3 Initiation Acknowledgement (INIT ACK) (2): |
336 | * The INIT ACK chunk is used to acknowledge the initiation of an SCTP | 336 | * The INIT ACK chunk is used to acknowledge the initiation of an SCTP |
@@ -342,13 +342,13 @@ typedef sctp_init_chunk_t sctp_initack_chunk_t; | |||
342 | typedef struct sctp_cookie_param { | 342 | typedef struct sctp_cookie_param { |
343 | sctp_paramhdr_t p; | 343 | sctp_paramhdr_t p; |
344 | __u8 body[0]; | 344 | __u8 body[0]; |
345 | } __attribute__((packed)) sctp_cookie_param_t; | 345 | } __packed sctp_cookie_param_t; |
346 | 346 | ||
347 | /* Section 3.3.3.1 Unrecognized Parameters (8) */ | 347 | /* Section 3.3.3.1 Unrecognized Parameters (8) */ |
348 | typedef struct sctp_unrecognized_param { | 348 | typedef struct sctp_unrecognized_param { |
349 | sctp_paramhdr_t param_hdr; | 349 | sctp_paramhdr_t param_hdr; |
350 | sctp_paramhdr_t unrecognized; | 350 | sctp_paramhdr_t unrecognized; |
351 | } __attribute__((packed)) sctp_unrecognized_param_t; | 351 | } __packed sctp_unrecognized_param_t; |
352 | 352 | ||
353 | 353 | ||
354 | 354 | ||
@@ -363,7 +363,7 @@ typedef struct sctp_unrecognized_param { | |||
363 | typedef struct sctp_gap_ack_block { | 363 | typedef struct sctp_gap_ack_block { |
364 | __be16 start; | 364 | __be16 start; |
365 | __be16 end; | 365 | __be16 end; |
366 | } __attribute__((packed)) sctp_gap_ack_block_t; | 366 | } __packed sctp_gap_ack_block_t; |
367 | 367 | ||
368 | typedef __be32 sctp_dup_tsn_t; | 368 | typedef __be32 sctp_dup_tsn_t; |
369 | 369 | ||
@@ -378,12 +378,12 @@ typedef struct sctp_sackhdr { | |||
378 | __be16 num_gap_ack_blocks; | 378 | __be16 num_gap_ack_blocks; |
379 | __be16 num_dup_tsns; | 379 | __be16 num_dup_tsns; |
380 | sctp_sack_variable_t variable[0]; | 380 | sctp_sack_variable_t variable[0]; |
381 | } __attribute__((packed)) sctp_sackhdr_t; | 381 | } __packed sctp_sackhdr_t; |
382 | 382 | ||
383 | typedef struct sctp_sack_chunk { | 383 | typedef struct sctp_sack_chunk { |
384 | sctp_chunkhdr_t chunk_hdr; | 384 | sctp_chunkhdr_t chunk_hdr; |
385 | sctp_sackhdr_t sack_hdr; | 385 | sctp_sackhdr_t sack_hdr; |
386 | } __attribute__((packed)) sctp_sack_chunk_t; | 386 | } __packed sctp_sack_chunk_t; |
387 | 387 | ||
388 | 388 | ||
389 | /* RFC 2960. Section 3.3.5 Heartbeat Request (HEARTBEAT) (4): | 389 | /* RFC 2960. Section 3.3.5 Heartbeat Request (HEARTBEAT) (4): |
@@ -395,12 +395,12 @@ typedef struct sctp_sack_chunk { | |||
395 | 395 | ||
396 | typedef struct sctp_heartbeathdr { | 396 | typedef struct sctp_heartbeathdr { |
397 | sctp_paramhdr_t info; | 397 | sctp_paramhdr_t info; |
398 | } __attribute__((packed)) sctp_heartbeathdr_t; | 398 | } __packed sctp_heartbeathdr_t; |
399 | 399 | ||
400 | typedef struct sctp_heartbeat_chunk { | 400 | typedef struct sctp_heartbeat_chunk { |
401 | sctp_chunkhdr_t chunk_hdr; | 401 | sctp_chunkhdr_t chunk_hdr; |
402 | sctp_heartbeathdr_t hb_hdr; | 402 | sctp_heartbeathdr_t hb_hdr; |
403 | } __attribute__((packed)) sctp_heartbeat_chunk_t; | 403 | } __packed sctp_heartbeat_chunk_t; |
404 | 404 | ||
405 | 405 | ||
406 | /* For the abort and shutdown ACK we must carry the init tag in the | 406 | /* For the abort and shutdown ACK we must carry the init tag in the |
@@ -409,7 +409,7 @@ typedef struct sctp_heartbeat_chunk { | |||
409 | */ | 409 | */ |
410 | typedef struct sctp_abort_chunk { | 410 | typedef struct sctp_abort_chunk { |
411 | sctp_chunkhdr_t uh; | 411 | sctp_chunkhdr_t uh; |
412 | } __attribute__((packed)) sctp_abort_chunk_t; | 412 | } __packed sctp_abort_chunk_t; |
413 | 413 | ||
414 | 414 | ||
415 | /* For the graceful shutdown we must carry the tag (in common header) | 415 | /* For the graceful shutdown we must carry the tag (in common header) |
@@ -417,12 +417,12 @@ typedef struct sctp_abort_chunk { | |||
417 | */ | 417 | */ |
418 | typedef struct sctp_shutdownhdr { | 418 | typedef struct sctp_shutdownhdr { |
419 | __be32 cum_tsn_ack; | 419 | __be32 cum_tsn_ack; |
420 | } __attribute__((packed)) sctp_shutdownhdr_t; | 420 | } __packed sctp_shutdownhdr_t; |
421 | 421 | ||
422 | struct sctp_shutdown_chunk_t { | 422 | struct sctp_shutdown_chunk_t { |
423 | sctp_chunkhdr_t chunk_hdr; | 423 | sctp_chunkhdr_t chunk_hdr; |
424 | sctp_shutdownhdr_t shutdown_hdr; | 424 | sctp_shutdownhdr_t shutdown_hdr; |
425 | } __attribute__ ((packed)); | 425 | } __packed; |
426 | 426 | ||
427 | /* RFC 2960. Section 3.3.10 Operation Error (ERROR) (9) */ | 427 | /* RFC 2960. Section 3.3.10 Operation Error (ERROR) (9) */ |
428 | 428 | ||
@@ -430,12 +430,12 @@ typedef struct sctp_errhdr { | |||
430 | __be16 cause; | 430 | __be16 cause; |
431 | __be16 length; | 431 | __be16 length; |
432 | __u8 variable[0]; | 432 | __u8 variable[0]; |
433 | } __attribute__((packed)) sctp_errhdr_t; | 433 | } __packed sctp_errhdr_t; |
434 | 434 | ||
435 | typedef struct sctp_operr_chunk { | 435 | typedef struct sctp_operr_chunk { |
436 | sctp_chunkhdr_t chunk_hdr; | 436 | sctp_chunkhdr_t chunk_hdr; |
437 | sctp_errhdr_t err_hdr; | 437 | sctp_errhdr_t err_hdr; |
438 | } __attribute__((packed)) sctp_operr_chunk_t; | 438 | } __packed sctp_operr_chunk_t; |
439 | 439 | ||
440 | /* RFC 2960 3.3.10 - Operation Error | 440 | /* RFC 2960 3.3.10 - Operation Error |
441 | * | 441 | * |
@@ -525,7 +525,7 @@ typedef struct sctp_ecnehdr { | |||
525 | typedef struct sctp_ecne_chunk { | 525 | typedef struct sctp_ecne_chunk { |
526 | sctp_chunkhdr_t chunk_hdr; | 526 | sctp_chunkhdr_t chunk_hdr; |
527 | sctp_ecnehdr_t ence_hdr; | 527 | sctp_ecnehdr_t ence_hdr; |
528 | } __attribute__((packed)) sctp_ecne_chunk_t; | 528 | } __packed sctp_ecne_chunk_t; |
529 | 529 | ||
530 | /* RFC 2960. Appendix A. Explicit Congestion Notification. | 530 | /* RFC 2960. Appendix A. Explicit Congestion Notification. |
531 | * Congestion Window Reduced (CWR) (13) | 531 | * Congestion Window Reduced (CWR) (13) |
@@ -537,7 +537,7 @@ typedef struct sctp_cwrhdr { | |||
537 | typedef struct sctp_cwr_chunk { | 537 | typedef struct sctp_cwr_chunk { |
538 | sctp_chunkhdr_t chunk_hdr; | 538 | sctp_chunkhdr_t chunk_hdr; |
539 | sctp_cwrhdr_t cwr_hdr; | 539 | sctp_cwrhdr_t cwr_hdr; |
540 | } __attribute__((packed)) sctp_cwr_chunk_t; | 540 | } __packed sctp_cwr_chunk_t; |
541 | 541 | ||
542 | /* PR-SCTP | 542 | /* PR-SCTP |
543 | * 3.2 Forward Cumulative TSN Chunk Definition (FORWARD TSN) | 543 | * 3.2 Forward Cumulative TSN Chunk Definition (FORWARD TSN) |
@@ -588,17 +588,17 @@ typedef struct sctp_cwr_chunk { | |||
588 | struct sctp_fwdtsn_skip { | 588 | struct sctp_fwdtsn_skip { |
589 | __be16 stream; | 589 | __be16 stream; |
590 | __be16 ssn; | 590 | __be16 ssn; |
591 | } __attribute__((packed)); | 591 | } __packed; |
592 | 592 | ||
593 | struct sctp_fwdtsn_hdr { | 593 | struct sctp_fwdtsn_hdr { |
594 | __be32 new_cum_tsn; | 594 | __be32 new_cum_tsn; |
595 | struct sctp_fwdtsn_skip skip[0]; | 595 | struct sctp_fwdtsn_skip skip[0]; |
596 | } __attribute((packed)); | 596 | } __packed; |
597 | 597 | ||
598 | struct sctp_fwdtsn_chunk { | 598 | struct sctp_fwdtsn_chunk { |
599 | struct sctp_chunkhdr chunk_hdr; | 599 | struct sctp_chunkhdr chunk_hdr; |
600 | struct sctp_fwdtsn_hdr fwdtsn_hdr; | 600 | struct sctp_fwdtsn_hdr fwdtsn_hdr; |
601 | } __attribute((packed)); | 601 | } __packed; |
602 | 602 | ||
603 | 603 | ||
604 | /* ADDIP | 604 | /* ADDIP |
@@ -636,17 +636,17 @@ struct sctp_fwdtsn_chunk { | |||
636 | typedef struct sctp_addip_param { | 636 | typedef struct sctp_addip_param { |
637 | sctp_paramhdr_t param_hdr; | 637 | sctp_paramhdr_t param_hdr; |
638 | __be32 crr_id; | 638 | __be32 crr_id; |
639 | } __attribute__((packed)) sctp_addip_param_t; | 639 | } __packed sctp_addip_param_t; |
640 | 640 | ||
641 | typedef struct sctp_addiphdr { | 641 | typedef struct sctp_addiphdr { |
642 | __be32 serial; | 642 | __be32 serial; |
643 | __u8 params[0]; | 643 | __u8 params[0]; |
644 | } __attribute__((packed)) sctp_addiphdr_t; | 644 | } __packed sctp_addiphdr_t; |
645 | 645 | ||
646 | typedef struct sctp_addip_chunk { | 646 | typedef struct sctp_addip_chunk { |
647 | sctp_chunkhdr_t chunk_hdr; | 647 | sctp_chunkhdr_t chunk_hdr; |
648 | sctp_addiphdr_t addip_hdr; | 648 | sctp_addiphdr_t addip_hdr; |
649 | } __attribute__((packed)) sctp_addip_chunk_t; | 649 | } __packed sctp_addip_chunk_t; |
650 | 650 | ||
651 | /* AUTH | 651 | /* AUTH |
652 | * Section 4.1 Authentication Chunk (AUTH) | 652 | * Section 4.1 Authentication Chunk (AUTH) |
@@ -701,11 +701,11 @@ typedef struct sctp_authhdr { | |||
701 | __be16 shkey_id; | 701 | __be16 shkey_id; |
702 | __be16 hmac_id; | 702 | __be16 hmac_id; |
703 | __u8 hmac[0]; | 703 | __u8 hmac[0]; |
704 | } __attribute__((packed)) sctp_authhdr_t; | 704 | } __packed sctp_authhdr_t; |
705 | 705 | ||
706 | typedef struct sctp_auth_chunk { | 706 | typedef struct sctp_auth_chunk { |
707 | sctp_chunkhdr_t chunk_hdr; | 707 | sctp_chunkhdr_t chunk_hdr; |
708 | sctp_authhdr_t auth_hdr; | 708 | sctp_authhdr_t auth_hdr; |
709 | } __attribute__((packed)) sctp_auth_chunk_t; | 709 | } __packed sctp_auth_chunk_t; |
710 | 710 | ||
711 | #endif /* __LINUX_SCTP_H__ */ | 711 | #endif /* __LINUX_SCTP_H__ */ |
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index bf243fc54959..645e78d395fd 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h | |||
@@ -2129,7 +2129,8 @@ static inline bool skb_warn_if_lro(const struct sk_buff *skb) | |||
2129 | /* LRO sets gso_size but not gso_type, whereas if GSO is really | 2129 | /* LRO sets gso_size but not gso_type, whereas if GSO is really |
2130 | * wanted then gso_type will be set. */ | 2130 | * wanted then gso_type will be set. */ |
2131 | struct skb_shared_info *shinfo = skb_shinfo(skb); | 2131 | struct skb_shared_info *shinfo = skb_shinfo(skb); |
2132 | if (shinfo->gso_size != 0 && unlikely(shinfo->gso_type == 0)) { | 2132 | if (skb_is_nonlinear(skb) && shinfo->gso_size != 0 && |
2133 | unlikely(shinfo->gso_type == 0)) { | ||
2133 | __skb_warn_lro_forwarding(skb); | 2134 | __skb_warn_lro_forwarding(skb); |
2134 | return true; | 2135 | return true; |
2135 | } | 2136 | } |
diff --git a/include/linux/snmp.h b/include/linux/snmp.h index 52797714ade7..ebb0c80ffd6e 100644 --- a/include/linux/snmp.h +++ b/include/linux/snmp.h | |||
@@ -229,6 +229,7 @@ enum | |||
229 | LINUX_MIB_TCPBACKLOGDROP, | 229 | LINUX_MIB_TCPBACKLOGDROP, |
230 | LINUX_MIB_TCPMINTTLDROP, /* RFC 5082 */ | 230 | LINUX_MIB_TCPMINTTLDROP, /* RFC 5082 */ |
231 | LINUX_MIB_TCPDEFERACCEPTDROP, | 231 | LINUX_MIB_TCPDEFERACCEPTDROP, |
232 | LINUX_MIB_IPRPFILTER, /* IP Reverse Path Filter (rp_filter) */ | ||
232 | __LINUX_MIB_MAX | 233 | __LINUX_MIB_MAX |
233 | }; | 234 | }; |
234 | 235 | ||
diff --git a/include/linux/wlp.h b/include/linux/wlp.h index ac95ce6606ac..c76fe2392506 100644 --- a/include/linux/wlp.h +++ b/include/linux/wlp.h | |||
@@ -300,7 +300,7 @@ struct wlp_ie { | |||
300 | __le16 cycle_param; | 300 | __le16 cycle_param; |
301 | __le16 acw_anchor_addr; | 301 | __le16 acw_anchor_addr; |
302 | u8 wssid_hash_list[]; | 302 | u8 wssid_hash_list[]; |
303 | } __attribute__((packed)); | 303 | } __packed; |
304 | 304 | ||
305 | static inline int wlp_ie_hash_length(struct wlp_ie *ie) | 305 | static inline int wlp_ie_hash_length(struct wlp_ie *ie) |
306 | { | 306 | { |
@@ -324,7 +324,7 @@ static inline void wlp_ie_set_hash_length(struct wlp_ie *ie, int hash_length) | |||
324 | */ | 324 | */ |
325 | struct wlp_nonce { | 325 | struct wlp_nonce { |
326 | u8 data[16]; | 326 | u8 data[16]; |
327 | } __attribute__((packed)); | 327 | } __packed; |
328 | 328 | ||
329 | /** | 329 | /** |
330 | * WLP UUID | 330 | * WLP UUID |
@@ -336,7 +336,7 @@ struct wlp_nonce { | |||
336 | */ | 336 | */ |
337 | struct wlp_uuid { | 337 | struct wlp_uuid { |
338 | u8 data[16]; | 338 | u8 data[16]; |
339 | } __attribute__((packed)); | 339 | } __packed; |
340 | 340 | ||
341 | 341 | ||
342 | /** | 342 | /** |
@@ -348,7 +348,7 @@ struct wlp_dev_type { | |||
348 | u8 OUI[3]; | 348 | u8 OUI[3]; |
349 | u8 OUIsubdiv; | 349 | u8 OUIsubdiv; |
350 | __le16 subID; | 350 | __le16 subID; |
351 | } __attribute__((packed)); | 351 | } __packed; |
352 | 352 | ||
353 | /** | 353 | /** |
354 | * WLP frame header | 354 | * WLP frame header |
@@ -357,7 +357,7 @@ struct wlp_dev_type { | |||
357 | struct wlp_frame_hdr { | 357 | struct wlp_frame_hdr { |
358 | __le16 mux_hdr; /* WLP_PROTOCOL_ID */ | 358 | __le16 mux_hdr; /* WLP_PROTOCOL_ID */ |
359 | enum wlp_frame_type type:8; | 359 | enum wlp_frame_type type:8; |
360 | } __attribute__((packed)); | 360 | } __packed; |
361 | 361 | ||
362 | /** | 362 | /** |
363 | * WLP attribute field header | 363 | * WLP attribute field header |
@@ -368,7 +368,7 @@ struct wlp_frame_hdr { | |||
368 | struct wlp_attr_hdr { | 368 | struct wlp_attr_hdr { |
369 | __le16 type; | 369 | __le16 type; |
370 | __le16 length; | 370 | __le16 length; |
371 | } __attribute__((packed)); | 371 | } __packed; |
372 | 372 | ||
373 | /** | 373 | /** |
374 | * Device information commonly used together | 374 | * Device information commonly used together |
@@ -401,13 +401,13 @@ struct wlp_device_info { | |||
401 | struct wlp_attr_##name { \ | 401 | struct wlp_attr_##name { \ |
402 | struct wlp_attr_hdr hdr; \ | 402 | struct wlp_attr_hdr hdr; \ |
403 | type name; \ | 403 | type name; \ |
404 | } __attribute__((packed)); | 404 | } __packed; |
405 | 405 | ||
406 | #define wlp_attr_array(type, name) \ | 406 | #define wlp_attr_array(type, name) \ |
407 | struct wlp_attr_##name { \ | 407 | struct wlp_attr_##name { \ |
408 | struct wlp_attr_hdr hdr; \ | 408 | struct wlp_attr_hdr hdr; \ |
409 | type name[]; \ | 409 | type name[]; \ |
410 | } __attribute__((packed)); | 410 | } __packed; |
411 | 411 | ||
412 | /** | 412 | /** |
413 | * WLP association attribute fields | 413 | * WLP association attribute fields |
@@ -483,7 +483,7 @@ struct wlp_wss_info { | |||
483 | struct wlp_attr_accept_enrl accept; | 483 | struct wlp_attr_accept_enrl accept; |
484 | struct wlp_attr_wss_sec_status sec_stat; | 484 | struct wlp_attr_wss_sec_status sec_stat; |
485 | struct wlp_attr_wss_bcast bcast; | 485 | struct wlp_attr_wss_bcast bcast; |
486 | } __attribute__((packed)); | 486 | } __packed; |
487 | 487 | ||
488 | /* WLP WSS Information */ | 488 | /* WLP WSS Information */ |
489 | wlp_attr_array(struct wlp_wss_info, wss_info) | 489 | wlp_attr_array(struct wlp_wss_info, wss_info) |
@@ -520,7 +520,7 @@ wlp_attr(u8, wlp_assc_err) | |||
520 | struct wlp_frame_std_abbrv_hdr { | 520 | struct wlp_frame_std_abbrv_hdr { |
521 | struct wlp_frame_hdr hdr; | 521 | struct wlp_frame_hdr hdr; |
522 | u8 tag; | 522 | u8 tag; |
523 | } __attribute__((packed)); | 523 | } __packed; |
524 | 524 | ||
525 | /** | 525 | /** |
526 | * WLP association frames | 526 | * WLP association frames |
@@ -533,7 +533,7 @@ struct wlp_frame_assoc { | |||
533 | struct wlp_attr_version version; | 533 | struct wlp_attr_version version; |
534 | struct wlp_attr_msg_type msg_type; | 534 | struct wlp_attr_msg_type msg_type; |
535 | u8 attr[]; | 535 | u8 attr[]; |
536 | } __attribute__((packed)); | 536 | } __packed; |
537 | 537 | ||
538 | /* Ethernet to dev address mapping */ | 538 | /* Ethernet to dev address mapping */ |
539 | struct wlp_eda { | 539 | struct wlp_eda { |
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index b44a2e5321a3..e7ebeb8bdf71 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h | |||
@@ -1330,26 +1330,15 @@ struct wiphy { | |||
1330 | char priv[0] __attribute__((__aligned__(NETDEV_ALIGN))); | 1330 | char priv[0] __attribute__((__aligned__(NETDEV_ALIGN))); |
1331 | }; | 1331 | }; |
1332 | 1332 | ||
1333 | #ifdef CONFIG_NET_NS | ||
1334 | static inline struct net *wiphy_net(struct wiphy *wiphy) | ||
1335 | { | ||
1336 | return wiphy->_net; | ||
1337 | } | ||
1338 | |||
1339 | static inline void wiphy_net_set(struct wiphy *wiphy, struct net *net) | ||
1340 | { | ||
1341 | wiphy->_net = net; | ||
1342 | } | ||
1343 | #else | ||
1344 | static inline struct net *wiphy_net(struct wiphy *wiphy) | 1333 | static inline struct net *wiphy_net(struct wiphy *wiphy) |
1345 | { | 1334 | { |
1346 | return &init_net; | 1335 | return read_pnet(&wiphy->_net); |
1347 | } | 1336 | } |
1348 | 1337 | ||
1349 | static inline void wiphy_net_set(struct wiphy *wiphy, struct net *net) | 1338 | static inline void wiphy_net_set(struct wiphy *wiphy, struct net *net) |
1350 | { | 1339 | { |
1340 | write_pnet(&wiphy->_net, net); | ||
1351 | } | 1341 | } |
1352 | #endif | ||
1353 | 1342 | ||
1354 | /** | 1343 | /** |
1355 | * wiphy_priv - return priv from wiphy | 1344 | * wiphy_priv - return priv from wiphy |
diff --git a/include/net/dn_dev.h b/include/net/dn_dev.h index 511a459ec10f..0916bbf3bdff 100644 --- a/include/net/dn_dev.h +++ b/include/net/dn_dev.h | |||
@@ -101,7 +101,7 @@ struct dn_short_packet { | |||
101 | __le16 dstnode; | 101 | __le16 dstnode; |
102 | __le16 srcnode; | 102 | __le16 srcnode; |
103 | __u8 forward; | 103 | __u8 forward; |
104 | } __attribute__((packed)); | 104 | } __packed; |
105 | 105 | ||
106 | struct dn_long_packet { | 106 | struct dn_long_packet { |
107 | __u8 msgflg; | 107 | __u8 msgflg; |
@@ -115,7 +115,7 @@ struct dn_long_packet { | |||
115 | __u8 visit_ct; | 115 | __u8 visit_ct; |
116 | __u8 s_class; | 116 | __u8 s_class; |
117 | __u8 pt; | 117 | __u8 pt; |
118 | } __attribute__((packed)); | 118 | } __packed; |
119 | 119 | ||
120 | /*------------------------- DRP - Routing messages ---------------------*/ | 120 | /*------------------------- DRP - Routing messages ---------------------*/ |
121 | 121 | ||
@@ -132,7 +132,7 @@ struct endnode_hello_message { | |||
132 | __u8 mpd; | 132 | __u8 mpd; |
133 | __u8 datalen; | 133 | __u8 datalen; |
134 | __u8 data[2]; | 134 | __u8 data[2]; |
135 | } __attribute__((packed)); | 135 | } __packed; |
136 | 136 | ||
137 | struct rtnode_hello_message { | 137 | struct rtnode_hello_message { |
138 | __u8 msgflg; | 138 | __u8 msgflg; |
@@ -144,7 +144,7 @@ struct rtnode_hello_message { | |||
144 | __u8 area; | 144 | __u8 area; |
145 | __le16 timer; | 145 | __le16 timer; |
146 | __u8 mpd; | 146 | __u8 mpd; |
147 | } __attribute__((packed)); | 147 | } __packed; |
148 | 148 | ||
149 | 149 | ||
150 | extern void dn_dev_init(void); | 150 | extern void dn_dev_init(void); |
diff --git a/include/net/dn_nsp.h b/include/net/dn_nsp.h index 17d43d2db5ec..e43a2893f132 100644 --- a/include/net/dn_nsp.h +++ b/include/net/dn_nsp.h | |||
@@ -74,18 +74,18 @@ struct nsp_data_seg_msg { | |||
74 | __u8 msgflg; | 74 | __u8 msgflg; |
75 | __le16 dstaddr; | 75 | __le16 dstaddr; |
76 | __le16 srcaddr; | 76 | __le16 srcaddr; |
77 | } __attribute__((packed)); | 77 | } __packed; |
78 | 78 | ||
79 | struct nsp_data_opt_msg { | 79 | struct nsp_data_opt_msg { |
80 | __le16 acknum; | 80 | __le16 acknum; |
81 | __le16 segnum; | 81 | __le16 segnum; |
82 | __le16 lsflgs; | 82 | __le16 lsflgs; |
83 | } __attribute__((packed)); | 83 | } __packed; |
84 | 84 | ||
85 | struct nsp_data_opt_msg1 { | 85 | struct nsp_data_opt_msg1 { |
86 | __le16 acknum; | 86 | __le16 acknum; |
87 | __le16 segnum; | 87 | __le16 segnum; |
88 | } __attribute__((packed)); | 88 | } __packed; |
89 | 89 | ||
90 | 90 | ||
91 | /* Acknowledgment Message (data/other data) */ | 91 | /* Acknowledgment Message (data/other data) */ |
@@ -94,13 +94,13 @@ struct nsp_data_ack_msg { | |||
94 | __le16 dstaddr; | 94 | __le16 dstaddr; |
95 | __le16 srcaddr; | 95 | __le16 srcaddr; |
96 | __le16 acknum; | 96 | __le16 acknum; |
97 | } __attribute__((packed)); | 97 | } __packed; |
98 | 98 | ||
99 | /* Connect Acknowledgment Message */ | 99 | /* Connect Acknowledgment Message */ |
100 | struct nsp_conn_ack_msg { | 100 | struct nsp_conn_ack_msg { |
101 | __u8 msgflg; | 101 | __u8 msgflg; |
102 | __le16 dstaddr; | 102 | __le16 dstaddr; |
103 | } __attribute__((packed)); | 103 | } __packed; |
104 | 104 | ||
105 | 105 | ||
106 | /* Connect Initiate/Retransmit Initiate/Connect Confirm */ | 106 | /* Connect Initiate/Retransmit Initiate/Connect Confirm */ |
@@ -117,7 +117,7 @@ struct nsp_conn_init_msg { | |||
117 | #define NSP_FC_MASK 0x0c /* FC type mask */ | 117 | #define NSP_FC_MASK 0x0c /* FC type mask */ |
118 | __u8 info; | 118 | __u8 info; |
119 | __le16 segsize; | 119 | __le16 segsize; |
120 | } __attribute__((packed)); | 120 | } __packed; |
121 | 121 | ||
122 | /* Disconnect Initiate/Disconnect Confirm */ | 122 | /* Disconnect Initiate/Disconnect Confirm */ |
123 | struct nsp_disconn_init_msg { | 123 | struct nsp_disconn_init_msg { |
@@ -125,7 +125,7 @@ struct nsp_disconn_init_msg { | |||
125 | __le16 dstaddr; | 125 | __le16 dstaddr; |
126 | __le16 srcaddr; | 126 | __le16 srcaddr; |
127 | __le16 reason; | 127 | __le16 reason; |
128 | } __attribute__((packed)); | 128 | } __packed; |
129 | 129 | ||
130 | 130 | ||
131 | 131 | ||
@@ -135,7 +135,7 @@ struct srcobj_fmt { | |||
135 | __le16 grpcode; | 135 | __le16 grpcode; |
136 | __le16 usrcode; | 136 | __le16 usrcode; |
137 | __u8 dlen; | 137 | __u8 dlen; |
138 | } __attribute__((packed)); | 138 | } __packed; |
139 | 139 | ||
140 | /* | 140 | /* |
141 | * A collection of functions for manipulating the sequence | 141 | * A collection of functions for manipulating the sequence |
diff --git a/include/net/genetlink.h b/include/net/genetlink.h index eb551baafc04..f7dcd2c70412 100644 --- a/include/net/genetlink.h +++ b/include/net/genetlink.h | |||
@@ -68,26 +68,15 @@ struct genl_info { | |||
68 | #endif | 68 | #endif |
69 | }; | 69 | }; |
70 | 70 | ||
71 | #ifdef CONFIG_NET_NS | ||
72 | static inline struct net *genl_info_net(struct genl_info *info) | 71 | static inline struct net *genl_info_net(struct genl_info *info) |
73 | { | 72 | { |
74 | return info->_net; | 73 | return read_pnet(&info->_net); |
75 | } | 74 | } |
76 | 75 | ||
77 | static inline void genl_info_net_set(struct genl_info *info, struct net *net) | 76 | static inline void genl_info_net_set(struct genl_info *info, struct net *net) |
78 | { | 77 | { |
79 | info->_net = net; | 78 | write_pnet(&info->_net, net); |
80 | } | 79 | } |
81 | #else | ||
82 | static inline struct net *genl_info_net(struct genl_info *info) | ||
83 | { | ||
84 | return &init_net; | ||
85 | } | ||
86 | |||
87 | static inline void genl_info_net_set(struct genl_info *info, struct net *net) | ||
88 | { | ||
89 | } | ||
90 | #endif | ||
91 | 80 | ||
92 | /** | 81 | /** |
93 | * struct genl_ops - generic netlink operations | 82 | * struct genl_ops - generic netlink operations |
diff --git a/include/net/ip6_tunnel.h b/include/net/ip6_tunnel.h index fbf9d1cda27b..fc94ec568a50 100644 --- a/include/net/ip6_tunnel.h +++ b/include/net/ip6_tunnel.h | |||
@@ -27,6 +27,6 @@ struct ipv6_tlv_tnl_enc_lim { | |||
27 | __u8 type; /* type-code for option */ | 27 | __u8 type; /* type-code for option */ |
28 | __u8 length; /* option length */ | 28 | __u8 length; /* option length */ |
29 | __u8 encap_limit; /* tunnel encapsulation limit */ | 29 | __u8 encap_limit; /* tunnel encapsulation limit */ |
30 | } __attribute__ ((packed)); | 30 | } __packed; |
31 | 31 | ||
32 | #endif | 32 | #endif |
diff --git a/include/net/ipv6.h b/include/net/ipv6.h index 2600b69757b8..f5808d596aab 100644 --- a/include/net/ipv6.h +++ b/include/net/ipv6.h | |||
@@ -551,6 +551,10 @@ extern int ipv6_ext_hdr(u8 nexthdr); | |||
551 | 551 | ||
552 | extern int ipv6_find_tlv(struct sk_buff *skb, int offset, int type); | 552 | extern int ipv6_find_tlv(struct sk_buff *skb, int offset, int type); |
553 | 553 | ||
554 | extern struct in6_addr *fl6_update_dst(struct flowi *fl, | ||
555 | const struct ipv6_txoptions *opt, | ||
556 | struct in6_addr *orig); | ||
557 | |||
554 | /* | 558 | /* |
555 | * socket options (ipv6_sockglue.c) | 559 | * socket options (ipv6_sockglue.c) |
556 | */ | 560 | */ |
diff --git a/include/net/ipx.h b/include/net/ipx.h index ef51a668ba19..05d7e4a88b49 100644 --- a/include/net/ipx.h +++ b/include/net/ipx.h | |||
@@ -27,9 +27,9 @@ struct ipx_address { | |||
27 | #define IPX_MAX_PPROP_HOPS 8 | 27 | #define IPX_MAX_PPROP_HOPS 8 |
28 | 28 | ||
29 | struct ipxhdr { | 29 | struct ipxhdr { |
30 | __be16 ipx_checksum __attribute__ ((packed)); | 30 | __be16 ipx_checksum __packed; |
31 | #define IPX_NO_CHECKSUM cpu_to_be16(0xFFFF) | 31 | #define IPX_NO_CHECKSUM cpu_to_be16(0xFFFF) |
32 | __be16 ipx_pktsize __attribute__ ((packed)); | 32 | __be16 ipx_pktsize __packed; |
33 | __u8 ipx_tctrl; | 33 | __u8 ipx_tctrl; |
34 | __u8 ipx_type; | 34 | __u8 ipx_type; |
35 | #define IPX_TYPE_UNKNOWN 0x00 | 35 | #define IPX_TYPE_UNKNOWN 0x00 |
@@ -38,8 +38,8 @@ struct ipxhdr { | |||
38 | #define IPX_TYPE_SPX 0x05 /* SPX protocol */ | 38 | #define IPX_TYPE_SPX 0x05 /* SPX protocol */ |
39 | #define IPX_TYPE_NCP 0x11 /* $lots for docs on this (SPIT) */ | 39 | #define IPX_TYPE_NCP 0x11 /* $lots for docs on this (SPIT) */ |
40 | #define IPX_TYPE_PPROP 0x14 /* complicated flood fill brdcast */ | 40 | #define IPX_TYPE_PPROP 0x14 /* complicated flood fill brdcast */ |
41 | struct ipx_address ipx_dest __attribute__ ((packed)); | 41 | struct ipx_address ipx_dest __packed; |
42 | struct ipx_address ipx_source __attribute__ ((packed)); | 42 | struct ipx_address ipx_source __packed; |
43 | }; | 43 | }; |
44 | 44 | ||
45 | static __inline__ struct ipxhdr *ipx_hdr(struct sk_buff *skb) | 45 | static __inline__ struct ipxhdr *ipx_hdr(struct sk_buff *skb) |
diff --git a/include/net/mip6.h b/include/net/mip6.h index a83ad1982a90..26ba99b5a4b1 100644 --- a/include/net/mip6.h +++ b/include/net/mip6.h | |||
@@ -39,7 +39,7 @@ struct ip6_mh { | |||
39 | __u16 ip6mh_cksum; | 39 | __u16 ip6mh_cksum; |
40 | /* Followed by type specific messages */ | 40 | /* Followed by type specific messages */ |
41 | __u8 data[0]; | 41 | __u8 data[0]; |
42 | } __attribute__ ((__packed__)); | 42 | } __packed; |
43 | 43 | ||
44 | #define IP6_MH_TYPE_BRR 0 /* Binding Refresh Request */ | 44 | #define IP6_MH_TYPE_BRR 0 /* Binding Refresh Request */ |
45 | #define IP6_MH_TYPE_HOTI 1 /* HOTI Message */ | 45 | #define IP6_MH_TYPE_HOTI 1 /* HOTI Message */ |
diff --git a/include/net/ndisc.h b/include/net/ndisc.h index f76f22d05721..895997bc2ead 100644 --- a/include/net/ndisc.h +++ b/include/net/ndisc.h | |||
@@ -82,7 +82,7 @@ struct ra_msg { | |||
82 | struct nd_opt_hdr { | 82 | struct nd_opt_hdr { |
83 | __u8 nd_opt_type; | 83 | __u8 nd_opt_type; |
84 | __u8 nd_opt_len; | 84 | __u8 nd_opt_len; |
85 | } __attribute__((__packed__)); | 85 | } __packed; |
86 | 86 | ||
87 | 87 | ||
88 | extern int ndisc_init(void); | 88 | extern int ndisc_init(void); |
diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h index bde095f7e845..bbfdd9453087 100644 --- a/include/net/netfilter/nf_conntrack.h +++ b/include/net/netfilter/nf_conntrack.h | |||
@@ -152,11 +152,7 @@ extern struct net init_net; | |||
152 | 152 | ||
153 | static inline struct net *nf_ct_net(const struct nf_conn *ct) | 153 | static inline struct net *nf_ct_net(const struct nf_conn *ct) |
154 | { | 154 | { |
155 | #ifdef CONFIG_NET_NS | 155 | return read_pnet(&ct->ct_net); |
156 | return ct->ct_net; | ||
157 | #else | ||
158 | return &init_net; | ||
159 | #endif | ||
160 | } | 156 | } |
161 | 157 | ||
162 | /* Alter reply tuple (maybe alter helper). */ | 158 | /* Alter reply tuple (maybe alter helper). */ |
diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h index 9d4d87cc970e..d9549af6929a 100644 --- a/include/net/pkt_sched.h +++ b/include/net/pkt_sched.h | |||
@@ -95,7 +95,7 @@ extern void __qdisc_run(struct Qdisc *q); | |||
95 | 95 | ||
96 | static inline void qdisc_run(struct Qdisc *q) | 96 | static inline void qdisc_run(struct Qdisc *q) |
97 | { | 97 | { |
98 | if (!test_and_set_bit(__QDISC_STATE_RUNNING, &q->state)) | 98 | if (qdisc_run_begin(q)) |
99 | __qdisc_run(q); | 99 | __qdisc_run(q); |
100 | } | 100 | } |
101 | 101 | ||
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h index 03ca5d826757..b35301b0c7b6 100644 --- a/include/net/sch_generic.h +++ b/include/net/sch_generic.h | |||
@@ -23,11 +23,17 @@ struct qdisc_rate_table { | |||
23 | }; | 23 | }; |
24 | 24 | ||
25 | enum qdisc_state_t { | 25 | enum qdisc_state_t { |
26 | __QDISC_STATE_RUNNING, | ||
27 | __QDISC_STATE_SCHED, | 26 | __QDISC_STATE_SCHED, |
28 | __QDISC_STATE_DEACTIVATED, | 27 | __QDISC_STATE_DEACTIVATED, |
29 | }; | 28 | }; |
30 | 29 | ||
30 | /* | ||
31 | * following bits are only changed while qdisc lock is held | ||
32 | */ | ||
33 | enum qdisc___state_t { | ||
34 | __QDISC___STATE_RUNNING, | ||
35 | }; | ||
36 | |||
31 | struct qdisc_size_table { | 37 | struct qdisc_size_table { |
32 | struct list_head list; | 38 | struct list_head list; |
33 | struct tc_sizespec szopts; | 39 | struct tc_sizespec szopts; |
@@ -72,10 +78,27 @@ struct Qdisc { | |||
72 | unsigned long state; | 78 | unsigned long state; |
73 | struct sk_buff_head q; | 79 | struct sk_buff_head q; |
74 | struct gnet_stats_basic_packed bstats; | 80 | struct gnet_stats_basic_packed bstats; |
81 | unsigned long __state; | ||
75 | struct gnet_stats_queue qstats; | 82 | struct gnet_stats_queue qstats; |
76 | struct rcu_head rcu_head; | 83 | struct rcu_head rcu_head; |
84 | spinlock_t busylock; | ||
77 | }; | 85 | }; |
78 | 86 | ||
87 | static inline bool qdisc_is_running(struct Qdisc *qdisc) | ||
88 | { | ||
89 | return test_bit(__QDISC___STATE_RUNNING, &qdisc->__state); | ||
90 | } | ||
91 | |||
92 | static inline bool qdisc_run_begin(struct Qdisc *qdisc) | ||
93 | { | ||
94 | return !__test_and_set_bit(__QDISC___STATE_RUNNING, &qdisc->__state); | ||
95 | } | ||
96 | |||
97 | static inline void qdisc_run_end(struct Qdisc *qdisc) | ||
98 | { | ||
99 | __clear_bit(__QDISC___STATE_RUNNING, &qdisc->__state); | ||
100 | } | ||
101 | |||
79 | struct Qdisc_class_ops { | 102 | struct Qdisc_class_ops { |
80 | /* Child qdisc manipulation */ | 103 | /* Child qdisc manipulation */ |
81 | struct netdev_queue * (*select_queue)(struct Qdisc *, struct tcmsg *); | 104 | struct netdev_queue * (*select_queue)(struct Qdisc *, struct tcmsg *); |
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h index 4b860116e096..f9e7473613bd 100644 --- a/include/net/sctp/structs.h +++ b/include/net/sctp/structs.h | |||
@@ -443,7 +443,7 @@ struct sctp_signed_cookie { | |||
443 | __u8 signature[SCTP_SECRET_SIZE]; | 443 | __u8 signature[SCTP_SECRET_SIZE]; |
444 | __u32 __pad; /* force sctp_cookie alignment to 64 bits */ | 444 | __u32 __pad; /* force sctp_cookie alignment to 64 bits */ |
445 | struct sctp_cookie c; | 445 | struct sctp_cookie c; |
446 | } __attribute__((packed)); | 446 | } __packed; |
447 | 447 | ||
448 | /* This is another convenience type to allocate memory for address | 448 | /* This is another convenience type to allocate memory for address |
449 | * params for the maximum size and pass such structures around | 449 | * params for the maximum size and pass such structures around |
@@ -488,7 +488,7 @@ typedef struct sctp_sender_hb_info { | |||
488 | union sctp_addr daddr; | 488 | union sctp_addr daddr; |
489 | unsigned long sent_at; | 489 | unsigned long sent_at; |
490 | __u64 hb_nonce; | 490 | __u64 hb_nonce; |
491 | } __attribute__((packed)) sctp_sender_hb_info_t; | 491 | } __packed sctp_sender_hb_info_t; |
492 | 492 | ||
493 | /* | 493 | /* |
494 | * RFC 2960 1.3.2 Sequenced Delivery within Streams | 494 | * RFC 2960 1.3.2 Sequenced Delivery within Streams |
diff --git a/include/net/sock.h b/include/net/sock.h index 731150d52799..f8acf38f092f 100644 --- a/include/net/sock.h +++ b/include/net/sock.h | |||
@@ -1711,19 +1711,13 @@ static inline void sk_eat_skb(struct sock *sk, struct sk_buff *skb, int copied_e | |||
1711 | static inline | 1711 | static inline |
1712 | struct net *sock_net(const struct sock *sk) | 1712 | struct net *sock_net(const struct sock *sk) |
1713 | { | 1713 | { |
1714 | #ifdef CONFIG_NET_NS | 1714 | return read_pnet(&sk->sk_net); |
1715 | return sk->sk_net; | ||
1716 | #else | ||
1717 | return &init_net; | ||
1718 | #endif | ||
1719 | } | 1715 | } |
1720 | 1716 | ||
1721 | static inline | 1717 | static inline |
1722 | void sock_net_set(struct sock *sk, struct net *net) | 1718 | void sock_net_set(struct sock *sk, struct net *net) |
1723 | { | 1719 | { |
1724 | #ifdef CONFIG_NET_NS | 1720 | write_pnet(&sk->sk_net, net); |
1725 | sk->sk_net = net; | ||
1726 | #endif | ||
1727 | } | 1721 | } |
1728 | 1722 | ||
1729 | /* | 1723 | /* |
diff --git a/include/rxrpc/packet.h b/include/rxrpc/packet.h index b69e6e173ea1..9b2c30897e50 100644 --- a/include/rxrpc/packet.h +++ b/include/rxrpc/packet.h | |||
@@ -65,7 +65,7 @@ struct rxrpc_header { | |||
65 | }; | 65 | }; |
66 | __be16 serviceId; /* service ID */ | 66 | __be16 serviceId; /* service ID */ |
67 | 67 | ||
68 | } __attribute__((packed)); | 68 | } __packed; |
69 | 69 | ||
70 | #define __rxrpc_header_off(X) offsetof(struct rxrpc_header,X) | 70 | #define __rxrpc_header_off(X) offsetof(struct rxrpc_header,X) |
71 | 71 | ||
@@ -120,7 +120,7 @@ struct rxrpc_ackpacket { | |||
120 | #define RXRPC_ACK_TYPE_NACK 0 | 120 | #define RXRPC_ACK_TYPE_NACK 0 |
121 | #define RXRPC_ACK_TYPE_ACK 1 | 121 | #define RXRPC_ACK_TYPE_ACK 1 |
122 | 122 | ||
123 | } __attribute__((packed)); | 123 | } __packed; |
124 | 124 | ||
125 | /* | 125 | /* |
126 | * ACK packets can have a further piece of information tagged on the end | 126 | * ACK packets can have a further piece of information tagged on the end |
@@ -141,7 +141,7 @@ struct rxkad_challenge { | |||
141 | __be32 nonce; /* encrypted random number */ | 141 | __be32 nonce; /* encrypted random number */ |
142 | __be32 min_level; /* minimum security level */ | 142 | __be32 min_level; /* minimum security level */ |
143 | __be32 __padding; /* padding to 8-byte boundary */ | 143 | __be32 __padding; /* padding to 8-byte boundary */ |
144 | } __attribute__((packed)); | 144 | } __packed; |
145 | 145 | ||
146 | /*****************************************************************************/ | 146 | /*****************************************************************************/ |
147 | /* | 147 | /* |
@@ -164,7 +164,7 @@ struct rxkad_response { | |||
164 | 164 | ||
165 | __be32 kvno; /* Kerberos key version number */ | 165 | __be32 kvno; /* Kerberos key version number */ |
166 | __be32 ticket_len; /* Kerberos ticket length */ | 166 | __be32 ticket_len; /* Kerberos ticket length */ |
167 | } __attribute__((packed)); | 167 | } __packed; |
168 | 168 | ||
169 | /*****************************************************************************/ | 169 | /*****************************************************************************/ |
170 | /* | 170 | /* |
diff --git a/net/bluetooth/bnep/bnep.h b/net/bluetooth/bnep/bnep.h index 0d9e506f5d5a..70672544db86 100644 --- a/net/bluetooth/bnep/bnep.h +++ b/net/bluetooth/bnep/bnep.h | |||
@@ -86,26 +86,26 @@ struct bnep_setup_conn_req { | |||
86 | __u8 ctrl; | 86 | __u8 ctrl; |
87 | __u8 uuid_size; | 87 | __u8 uuid_size; |
88 | __u8 service[0]; | 88 | __u8 service[0]; |
89 | } __attribute__((packed)); | 89 | } __packed; |
90 | 90 | ||
91 | struct bnep_set_filter_req { | 91 | struct bnep_set_filter_req { |
92 | __u8 type; | 92 | __u8 type; |
93 | __u8 ctrl; | 93 | __u8 ctrl; |
94 | __be16 len; | 94 | __be16 len; |
95 | __u8 list[0]; | 95 | __u8 list[0]; |
96 | } __attribute__((packed)); | 96 | } __packed; |
97 | 97 | ||
98 | struct bnep_control_rsp { | 98 | struct bnep_control_rsp { |
99 | __u8 type; | 99 | __u8 type; |
100 | __u8 ctrl; | 100 | __u8 ctrl; |
101 | __be16 resp; | 101 | __be16 resp; |
102 | } __attribute__((packed)); | 102 | } __packed; |
103 | 103 | ||
104 | struct bnep_ext_hdr { | 104 | struct bnep_ext_hdr { |
105 | __u8 type; | 105 | __u8 type; |
106 | __u8 len; | 106 | __u8 len; |
107 | __u8 data[0]; | 107 | __u8 data[0]; |
108 | } __attribute__((packed)); | 108 | } __packed; |
109 | 109 | ||
110 | /* BNEP ioctl defines */ | 110 | /* BNEP ioctl defines */ |
111 | #define BNEPCONNADD _IOW('B', 200, int) | 111 | #define BNEPCONNADD _IOW('B', 200, int) |
diff --git a/net/bridge/br.c b/net/bridge/br.c index 76357b547752..c8436fa31344 100644 --- a/net/bridge/br.c +++ b/net/bridge/br.c | |||
@@ -63,7 +63,6 @@ static int __init br_init(void) | |||
63 | goto err_out4; | 63 | goto err_out4; |
64 | 64 | ||
65 | brioctl_set(br_ioctl_deviceless_stub); | 65 | brioctl_set(br_ioctl_deviceless_stub); |
66 | br_handle_frame_hook = br_handle_frame; | ||
67 | 66 | ||
68 | #if defined(CONFIG_ATM_LANE) || defined(CONFIG_ATM_LANE_MODULE) | 67 | #if defined(CONFIG_ATM_LANE) || defined(CONFIG_ATM_LANE_MODULE) |
69 | br_fdb_test_addr_hook = br_fdb_test_addr; | 68 | br_fdb_test_addr_hook = br_fdb_test_addr; |
@@ -100,7 +99,6 @@ static void __exit br_deinit(void) | |||
100 | br_fdb_test_addr_hook = NULL; | 99 | br_fdb_test_addr_hook = NULL; |
101 | #endif | 100 | #endif |
102 | 101 | ||
103 | br_handle_frame_hook = NULL; | ||
104 | br_fdb_fini(); | 102 | br_fdb_fini(); |
105 | } | 103 | } |
106 | 104 | ||
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c index 18b245e2c00e..d9242342837e 100644 --- a/net/bridge/br_if.c +++ b/net/bridge/br_if.c | |||
@@ -147,6 +147,7 @@ static void del_nbp(struct net_bridge_port *p) | |||
147 | 147 | ||
148 | list_del_rcu(&p->list); | 148 | list_del_rcu(&p->list); |
149 | 149 | ||
150 | netdev_rx_handler_unregister(dev); | ||
150 | rcu_assign_pointer(dev->br_port, NULL); | 151 | rcu_assign_pointer(dev->br_port, NULL); |
151 | 152 | ||
152 | br_multicast_del_port(p); | 153 | br_multicast_del_port(p); |
@@ -429,6 +430,11 @@ int br_add_if(struct net_bridge *br, struct net_device *dev) | |||
429 | goto err2; | 430 | goto err2; |
430 | 431 | ||
431 | rcu_assign_pointer(dev->br_port, p); | 432 | rcu_assign_pointer(dev->br_port, p); |
433 | |||
434 | err = netdev_rx_handler_register(dev, br_handle_frame); | ||
435 | if (err) | ||
436 | goto err3; | ||
437 | |||
432 | dev_disable_lro(dev); | 438 | dev_disable_lro(dev); |
433 | 439 | ||
434 | list_add_rcu(&p->list, &br->port_list); | 440 | list_add_rcu(&p->list, &br->port_list); |
@@ -451,6 +457,8 @@ int br_add_if(struct net_bridge *br, struct net_device *dev) | |||
451 | br_netpoll_enable(br, dev); | 457 | br_netpoll_enable(br, dev); |
452 | 458 | ||
453 | return 0; | 459 | return 0; |
460 | err3: | ||
461 | rcu_assign_pointer(dev->br_port, NULL); | ||
454 | err2: | 462 | err2: |
455 | br_fdb_delete_by_port(br, p, 1); | 463 | br_fdb_delete_by_port(br, p, 1); |
456 | err1: | 464 | err1: |
diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c index d36e700f7a26..99647d8f95c8 100644 --- a/net/bridge/br_input.c +++ b/net/bridge/br_input.c | |||
@@ -131,15 +131,19 @@ static inline int is_link_local(const unsigned char *dest) | |||
131 | } | 131 | } |
132 | 132 | ||
133 | /* | 133 | /* |
134 | * Called via br_handle_frame_hook. | ||
135 | * Return NULL if skb is handled | 134 | * Return NULL if skb is handled |
136 | * note: already called with rcu_read_lock (preempt_disabled) | 135 | * note: already called with rcu_read_lock (preempt_disabled) from |
136 | * netif_receive_skb | ||
137 | */ | 137 | */ |
138 | struct sk_buff *br_handle_frame(struct net_bridge_port *p, struct sk_buff *skb) | 138 | struct sk_buff *br_handle_frame(struct sk_buff *skb) |
139 | { | 139 | { |
140 | struct net_bridge_port *p; | ||
140 | const unsigned char *dest = eth_hdr(skb)->h_dest; | 141 | const unsigned char *dest = eth_hdr(skb)->h_dest; |
141 | int (*rhook)(struct sk_buff *skb); | 142 | int (*rhook)(struct sk_buff *skb); |
142 | 143 | ||
144 | if (skb->pkt_type == PACKET_LOOPBACK) | ||
145 | return skb; | ||
146 | |||
143 | if (!is_valid_ether_addr(eth_hdr(skb)->h_source)) | 147 | if (!is_valid_ether_addr(eth_hdr(skb)->h_source)) |
144 | goto drop; | 148 | goto drop; |
145 | 149 | ||
@@ -147,6 +151,8 @@ struct sk_buff *br_handle_frame(struct net_bridge_port *p, struct sk_buff *skb) | |||
147 | if (!skb) | 151 | if (!skb) |
148 | return NULL; | 152 | return NULL; |
149 | 153 | ||
154 | p = rcu_dereference(skb->dev->br_port); | ||
155 | |||
150 | if (unlikely(is_link_local(dest))) { | 156 | if (unlikely(is_link_local(dest))) { |
151 | /* Pause frames shouldn't be passed up by driver anyway */ | 157 | /* Pause frames shouldn't be passed up by driver anyway */ |
152 | if (skb->protocol == htons(ETH_P_PAUSE)) | 158 | if (skb->protocol == htons(ETH_P_PAUSE)) |
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h index 0f4a74bc6a9b..c83519b555bb 100644 --- a/net/bridge/br_private.h +++ b/net/bridge/br_private.h | |||
@@ -331,8 +331,7 @@ extern void br_features_recompute(struct net_bridge *br); | |||
331 | 331 | ||
332 | /* br_input.c */ | 332 | /* br_input.c */ |
333 | extern int br_handle_frame_finish(struct sk_buff *skb); | 333 | extern int br_handle_frame_finish(struct sk_buff *skb); |
334 | extern struct sk_buff *br_handle_frame(struct net_bridge_port *p, | 334 | extern struct sk_buff *br_handle_frame(struct sk_buff *skb); |
335 | struct sk_buff *skb); | ||
336 | 335 | ||
337 | /* br_ioctl.c */ | 336 | /* br_ioctl.c */ |
338 | extern int br_dev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); | 337 | extern int br_dev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); |
diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c index 3d0e09584fae..791249316ef3 100644 --- a/net/caif/caif_socket.c +++ b/net/caif/caif_socket.c | |||
@@ -874,8 +874,7 @@ static int caif_connect(struct socket *sock, struct sockaddr *uaddr, | |||
874 | sk_stream_kill_queues(&cf_sk->sk); | 874 | sk_stream_kill_queues(&cf_sk->sk); |
875 | 875 | ||
876 | err = -EINVAL; | 876 | err = -EINVAL; |
877 | if (addr_len != sizeof(struct sockaddr_caif) || | 877 | if (addr_len != sizeof(struct sockaddr_caif)) |
878 | !uaddr) | ||
879 | goto out; | 878 | goto out; |
880 | 879 | ||
881 | memcpy(&cf_sk->conn_req.sockaddr, uaddr, | 880 | memcpy(&cf_sk->conn_req.sockaddr, uaddr, |
diff --git a/net/caif/cfcnfg.c b/net/caif/cfcnfg.c index df43f264d9fb..7c81974a45c4 100644 --- a/net/caif/cfcnfg.c +++ b/net/caif/cfcnfg.c | |||
@@ -308,19 +308,15 @@ cfcnfg_linkup_rsp(struct cflayer *layer, u8 channel_id, enum cfctrl_srv serv, | |||
308 | caif_assert(cnfg != NULL); | 308 | caif_assert(cnfg != NULL); |
309 | caif_assert(phyid != 0); | 309 | caif_assert(phyid != 0); |
310 | phyinfo = &cnfg->phy_layers[phyid]; | 310 | phyinfo = &cnfg->phy_layers[phyid]; |
311 | caif_assert(phyinfo != NULL); | ||
312 | caif_assert(phyinfo->id == phyid); | 311 | caif_assert(phyinfo->id == phyid); |
313 | caif_assert(phyinfo->phy_layer != NULL); | 312 | caif_assert(phyinfo->phy_layer != NULL); |
314 | caif_assert(phyinfo->phy_layer->id == phyid); | 313 | caif_assert(phyinfo->phy_layer->id == phyid); |
315 | 314 | ||
316 | if (phyinfo != NULL && | 315 | phyinfo->phy_ref_count++; |
317 | phyinfo->phy_ref_count++ == 0 && | 316 | if (phyinfo->phy_ref_count == 1 && |
318 | phyinfo->phy_layer != NULL && | ||
319 | phyinfo->phy_layer->modemcmd != NULL) { | 317 | phyinfo->phy_layer->modemcmd != NULL) { |
320 | caif_assert(phyinfo->phy_layer->id == phyid); | ||
321 | phyinfo->phy_layer->modemcmd(phyinfo->phy_layer, | 318 | phyinfo->phy_layer->modemcmd(phyinfo->phy_layer, |
322 | _CAIF_MODEMCMD_PHYIF_USEFULL); | 319 | _CAIF_MODEMCMD_PHYIF_USEFULL); |
323 | |||
324 | } | 320 | } |
325 | adapt_layer->id = channel_id; | 321 | adapt_layer->id = channel_id; |
326 | 322 | ||
diff --git a/net/can/raw.c b/net/can/raw.c index da99cf153b33..ccfe633eec8e 100644 --- a/net/can/raw.c +++ b/net/can/raw.c | |||
@@ -436,14 +436,9 @@ static int raw_setsockopt(struct socket *sock, int level, int optname, | |||
436 | 436 | ||
437 | if (count > 1) { | 437 | if (count > 1) { |
438 | /* filter does not fit into dfilter => alloc space */ | 438 | /* filter does not fit into dfilter => alloc space */ |
439 | filter = kmalloc(optlen, GFP_KERNEL); | 439 | filter = memdup_user(optval, optlen); |
440 | if (!filter) | 440 | if (IS_ERR(filter)) |
441 | return -ENOMEM; | 441 | return PTR_ERR(filter); |
442 | |||
443 | if (copy_from_user(filter, optval, optlen)) { | ||
444 | kfree(filter); | ||
445 | return -EFAULT; | ||
446 | } | ||
447 | } else if (count == 1) { | 442 | } else if (count == 1) { |
448 | if (copy_from_user(&sfilter, optval, sizeof(sfilter))) | 443 | if (copy_from_user(&sfilter, optval, sizeof(sfilter))) |
449 | return -EFAULT; | 444 | return -EFAULT; |
diff --git a/net/compat.c b/net/compat.c index ec24d9edb025..63d260e81472 100644 --- a/net/compat.c +++ b/net/compat.c | |||
@@ -81,7 +81,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov, | |||
81 | int tot_len; | 81 | int tot_len; |
82 | 82 | ||
83 | if (kern_msg->msg_namelen) { | 83 | if (kern_msg->msg_namelen) { |
84 | if (mode==VERIFY_READ) { | 84 | if (mode == VERIFY_READ) { |
85 | int err = move_addr_to_kernel(kern_msg->msg_name, | 85 | int err = move_addr_to_kernel(kern_msg->msg_name, |
86 | kern_msg->msg_namelen, | 86 | kern_msg->msg_namelen, |
87 | kern_address); | 87 | kern_address); |
@@ -354,7 +354,7 @@ static int do_set_attach_filter(struct socket *sock, int level, int optname, | |||
354 | static int do_set_sock_timeout(struct socket *sock, int level, | 354 | static int do_set_sock_timeout(struct socket *sock, int level, |
355 | int optname, char __user *optval, unsigned int optlen) | 355 | int optname, char __user *optval, unsigned int optlen) |
356 | { | 356 | { |
357 | struct compat_timeval __user *up = (struct compat_timeval __user *) optval; | 357 | struct compat_timeval __user *up = (struct compat_timeval __user *)optval; |
358 | struct timeval ktime; | 358 | struct timeval ktime; |
359 | mm_segment_t old_fs; | 359 | mm_segment_t old_fs; |
360 | int err; | 360 | int err; |
@@ -367,7 +367,7 @@ static int do_set_sock_timeout(struct socket *sock, int level, | |||
367 | return -EFAULT; | 367 | return -EFAULT; |
368 | old_fs = get_fs(); | 368 | old_fs = get_fs(); |
369 | set_fs(KERNEL_DS); | 369 | set_fs(KERNEL_DS); |
370 | err = sock_setsockopt(sock, level, optname, (char *) &ktime, sizeof(ktime)); | 370 | err = sock_setsockopt(sock, level, optname, (char *)&ktime, sizeof(ktime)); |
371 | set_fs(old_fs); | 371 | set_fs(old_fs); |
372 | 372 | ||
373 | return err; | 373 | return err; |
@@ -389,11 +389,10 @@ asmlinkage long compat_sys_setsockopt(int fd, int level, int optname, | |||
389 | char __user *optval, unsigned int optlen) | 389 | char __user *optval, unsigned int optlen) |
390 | { | 390 | { |
391 | int err; | 391 | int err; |
392 | struct socket *sock; | 392 | struct socket *sock = sockfd_lookup(fd, &err); |
393 | 393 | ||
394 | if ((sock = sockfd_lookup(fd, &err))!=NULL) | 394 | if (sock) { |
395 | { | 395 | err = security_socket_setsockopt(sock, level, optname); |
396 | err = security_socket_setsockopt(sock,level,optname); | ||
397 | if (err) { | 396 | if (err) { |
398 | sockfd_put(sock); | 397 | sockfd_put(sock); |
399 | return err; | 398 | return err; |
@@ -453,7 +452,7 @@ static int compat_sock_getsockopt(struct socket *sock, int level, int optname, | |||
453 | int compat_sock_get_timestamp(struct sock *sk, struct timeval __user *userstamp) | 452 | int compat_sock_get_timestamp(struct sock *sk, struct timeval __user *userstamp) |
454 | { | 453 | { |
455 | struct compat_timeval __user *ctv = | 454 | struct compat_timeval __user *ctv = |
456 | (struct compat_timeval __user*) userstamp; | 455 | (struct compat_timeval __user *) userstamp; |
457 | int err = -ENOENT; | 456 | int err = -ENOENT; |
458 | struct timeval tv; | 457 | struct timeval tv; |
459 | 458 | ||
@@ -477,7 +476,7 @@ EXPORT_SYMBOL(compat_sock_get_timestamp); | |||
477 | int compat_sock_get_timestampns(struct sock *sk, struct timespec __user *userstamp) | 476 | int compat_sock_get_timestampns(struct sock *sk, struct timespec __user *userstamp) |
478 | { | 477 | { |
479 | struct compat_timespec __user *ctv = | 478 | struct compat_timespec __user *ctv = |
480 | (struct compat_timespec __user*) userstamp; | 479 | (struct compat_timespec __user *) userstamp; |
481 | int err = -ENOENT; | 480 | int err = -ENOENT; |
482 | struct timespec ts; | 481 | struct timespec ts; |
483 | 482 | ||
@@ -502,12 +501,10 @@ asmlinkage long compat_sys_getsockopt(int fd, int level, int optname, | |||
502 | char __user *optval, int __user *optlen) | 501 | char __user *optval, int __user *optlen) |
503 | { | 502 | { |
504 | int err; | 503 | int err; |
505 | struct socket *sock; | 504 | struct socket *sock = sockfd_lookup(fd, &err); |
506 | 505 | ||
507 | if ((sock = sockfd_lookup(fd, &err))!=NULL) | 506 | if (sock) { |
508 | { | 507 | err = security_socket_getsockopt(sock, level, optname); |
509 | err = security_socket_getsockopt(sock, level, | ||
510 | optname); | ||
511 | if (err) { | 508 | if (err) { |
512 | sockfd_put(sock); | 509 | sockfd_put(sock); |
513 | return err; | 510 | return err; |
@@ -531,7 +528,7 @@ struct compat_group_req { | |||
531 | __u32 gr_interface; | 528 | __u32 gr_interface; |
532 | struct __kernel_sockaddr_storage gr_group | 529 | struct __kernel_sockaddr_storage gr_group |
533 | __attribute__ ((aligned(4))); | 530 | __attribute__ ((aligned(4))); |
534 | } __attribute__ ((packed)); | 531 | } __packed; |
535 | 532 | ||
536 | struct compat_group_source_req { | 533 | struct compat_group_source_req { |
537 | __u32 gsr_interface; | 534 | __u32 gsr_interface; |
@@ -539,7 +536,7 @@ struct compat_group_source_req { | |||
539 | __attribute__ ((aligned(4))); | 536 | __attribute__ ((aligned(4))); |
540 | struct __kernel_sockaddr_storage gsr_source | 537 | struct __kernel_sockaddr_storage gsr_source |
541 | __attribute__ ((aligned(4))); | 538 | __attribute__ ((aligned(4))); |
542 | } __attribute__ ((packed)); | 539 | } __packed; |
543 | 540 | ||
544 | struct compat_group_filter { | 541 | struct compat_group_filter { |
545 | __u32 gf_interface; | 542 | __u32 gf_interface; |
@@ -549,7 +546,7 @@ struct compat_group_filter { | |||
549 | __u32 gf_numsrc; | 546 | __u32 gf_numsrc; |
550 | struct __kernel_sockaddr_storage gf_slist[1] | 547 | struct __kernel_sockaddr_storage gf_slist[1] |
551 | __attribute__ ((aligned(4))); | 548 | __attribute__ ((aligned(4))); |
552 | } __attribute__ ((packed)); | 549 | } __packed; |
553 | 550 | ||
554 | #define __COMPAT_GF0_SIZE (sizeof(struct compat_group_filter) - \ | 551 | #define __COMPAT_GF0_SIZE (sizeof(struct compat_group_filter) - \ |
555 | sizeof(struct __kernel_sockaddr_storage)) | 552 | sizeof(struct __kernel_sockaddr_storage)) |
@@ -557,7 +554,7 @@ struct compat_group_filter { | |||
557 | 554 | ||
558 | int compat_mc_setsockopt(struct sock *sock, int level, int optname, | 555 | int compat_mc_setsockopt(struct sock *sock, int level, int optname, |
559 | char __user *optval, unsigned int optlen, | 556 | char __user *optval, unsigned int optlen, |
560 | int (*setsockopt)(struct sock *,int,int,char __user *,unsigned int)) | 557 | int (*setsockopt)(struct sock *, int, int, char __user *, unsigned int)) |
561 | { | 558 | { |
562 | char __user *koptval = optval; | 559 | char __user *koptval = optval; |
563 | int koptlen = optlen; | 560 | int koptlen = optlen; |
@@ -640,12 +637,11 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname, | |||
640 | } | 637 | } |
641 | return setsockopt(sock, level, optname, koptval, koptlen); | 638 | return setsockopt(sock, level, optname, koptval, koptlen); |
642 | } | 639 | } |
643 | |||
644 | EXPORT_SYMBOL(compat_mc_setsockopt); | 640 | EXPORT_SYMBOL(compat_mc_setsockopt); |
645 | 641 | ||
646 | int compat_mc_getsockopt(struct sock *sock, int level, int optname, | 642 | int compat_mc_getsockopt(struct sock *sock, int level, int optname, |
647 | char __user *optval, int __user *optlen, | 643 | char __user *optval, int __user *optlen, |
648 | int (*getsockopt)(struct sock *,int,int,char __user *,int __user *)) | 644 | int (*getsockopt)(struct sock *, int, int, char __user *, int __user *)) |
649 | { | 645 | { |
650 | struct compat_group_filter __user *gf32 = (void *)optval; | 646 | struct compat_group_filter __user *gf32 = (void *)optval; |
651 | struct group_filter __user *kgf; | 647 | struct group_filter __user *kgf; |
@@ -681,7 +677,7 @@ int compat_mc_getsockopt(struct sock *sock, int level, int optname, | |||
681 | __put_user(interface, &kgf->gf_interface) || | 677 | __put_user(interface, &kgf->gf_interface) || |
682 | __put_user(fmode, &kgf->gf_fmode) || | 678 | __put_user(fmode, &kgf->gf_fmode) || |
683 | __put_user(numsrc, &kgf->gf_numsrc) || | 679 | __put_user(numsrc, &kgf->gf_numsrc) || |
684 | copy_in_user(&kgf->gf_group,&gf32->gf_group,sizeof(kgf->gf_group))) | 680 | copy_in_user(&kgf->gf_group, &gf32->gf_group, sizeof(kgf->gf_group))) |
685 | return -EFAULT; | 681 | return -EFAULT; |
686 | 682 | ||
687 | err = getsockopt(sock, level, optname, (char __user *)kgf, koptlen); | 683 | err = getsockopt(sock, level, optname, (char __user *)kgf, koptlen); |
@@ -714,21 +710,22 @@ int compat_mc_getsockopt(struct sock *sock, int level, int optname, | |||
714 | copylen = numsrc * sizeof(gf32->gf_slist[0]); | 710 | copylen = numsrc * sizeof(gf32->gf_slist[0]); |
715 | if (copylen > klen) | 711 | if (copylen > klen) |
716 | copylen = klen; | 712 | copylen = klen; |
717 | if (copy_in_user(gf32->gf_slist, kgf->gf_slist, copylen)) | 713 | if (copy_in_user(gf32->gf_slist, kgf->gf_slist, copylen)) |
718 | return -EFAULT; | 714 | return -EFAULT; |
719 | } | 715 | } |
720 | return err; | 716 | return err; |
721 | } | 717 | } |
722 | |||
723 | EXPORT_SYMBOL(compat_mc_getsockopt); | 718 | EXPORT_SYMBOL(compat_mc_getsockopt); |
724 | 719 | ||
725 | 720 | ||
726 | /* Argument list sizes for compat_sys_socketcall */ | 721 | /* Argument list sizes for compat_sys_socketcall */ |
727 | #define AL(x) ((x) * sizeof(u32)) | 722 | #define AL(x) ((x) * sizeof(u32)) |
728 | static unsigned char nas[20]={AL(0),AL(3),AL(3),AL(3),AL(2),AL(3), | 723 | static unsigned char nas[20] = { |
729 | AL(3),AL(3),AL(4),AL(4),AL(4),AL(6), | 724 | AL(0), AL(3), AL(3), AL(3), AL(2), AL(3), |
730 | AL(6),AL(2),AL(5),AL(5),AL(3),AL(3), | 725 | AL(3), AL(3), AL(4), AL(4), AL(4), AL(6), |
731 | AL(4),AL(5)}; | 726 | AL(6), AL(2), AL(5), AL(5), AL(3), AL(3), |
727 | AL(4), AL(5) | ||
728 | }; | ||
732 | #undef AL | 729 | #undef AL |
733 | 730 | ||
734 | asmlinkage long compat_sys_sendmsg(int fd, struct compat_msghdr __user *msg, unsigned flags) | 731 | asmlinkage long compat_sys_sendmsg(int fd, struct compat_msghdr __user *msg, unsigned flags) |
@@ -827,7 +824,7 @@ asmlinkage long compat_sys_socketcall(int call, u32 __user *args) | |||
827 | compat_ptr(a[4]), compat_ptr(a[5])); | 824 | compat_ptr(a[4]), compat_ptr(a[5])); |
828 | break; | 825 | break; |
829 | case SYS_SHUTDOWN: | 826 | case SYS_SHUTDOWN: |
830 | ret = sys_shutdown(a0,a1); | 827 | ret = sys_shutdown(a0, a1); |
831 | break; | 828 | break; |
832 | case SYS_SETSOCKOPT: | 829 | case SYS_SETSOCKOPT: |
833 | ret = compat_sys_setsockopt(a0, a1, a[2], | 830 | ret = compat_sys_setsockopt(a0, a1, a[2], |
diff --git a/net/core/dev.c b/net/core/dev.c index d03470f5260a..b65347c2cf2a 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -1577,7 +1577,9 @@ EXPORT_SYMBOL(__netif_schedule); | |||
1577 | 1577 | ||
1578 | void dev_kfree_skb_irq(struct sk_buff *skb) | 1578 | void dev_kfree_skb_irq(struct sk_buff *skb) |
1579 | { | 1579 | { |
1580 | if (atomic_dec_and_test(&skb->users)) { | 1580 | if (!skb->destructor) |
1581 | dev_kfree_skb(skb); | ||
1582 | else if (atomic_dec_and_test(&skb->users)) { | ||
1581 | struct softnet_data *sd; | 1583 | struct softnet_data *sd; |
1582 | unsigned long flags; | 1584 | unsigned long flags; |
1583 | 1585 | ||
@@ -2038,14 +2040,24 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q, | |||
2038 | struct netdev_queue *txq) | 2040 | struct netdev_queue *txq) |
2039 | { | 2041 | { |
2040 | spinlock_t *root_lock = qdisc_lock(q); | 2042 | spinlock_t *root_lock = qdisc_lock(q); |
2043 | bool contended = qdisc_is_running(q); | ||
2041 | int rc; | 2044 | int rc; |
2042 | 2045 | ||
2046 | /* | ||
2047 | * Heuristic to force contended enqueues to serialize on a | ||
2048 | * separate lock before trying to get qdisc main lock. | ||
2049 | * This permits __QDISC_STATE_RUNNING owner to get the lock more often | ||
2050 | * and dequeue packets faster. | ||
2051 | */ | ||
2052 | if (unlikely(contended)) | ||
2053 | spin_lock(&q->busylock); | ||
2054 | |||
2043 | spin_lock(root_lock); | 2055 | spin_lock(root_lock); |
2044 | if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) { | 2056 | if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) { |
2045 | kfree_skb(skb); | 2057 | kfree_skb(skb); |
2046 | rc = NET_XMIT_DROP; | 2058 | rc = NET_XMIT_DROP; |
2047 | } else if ((q->flags & TCQ_F_CAN_BYPASS) && !qdisc_qlen(q) && | 2059 | } else if ((q->flags & TCQ_F_CAN_BYPASS) && !qdisc_qlen(q) && |
2048 | !test_and_set_bit(__QDISC_STATE_RUNNING, &q->state)) { | 2060 | qdisc_run_begin(q)) { |
2049 | /* | 2061 | /* |
2050 | * This is a work-conserving queue; there are no old skbs | 2062 | * This is a work-conserving queue; there are no old skbs |
2051 | * waiting to be sent out; and the qdisc is not running - | 2063 | * waiting to be sent out; and the qdisc is not running - |
@@ -2054,19 +2066,30 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q, | |||
2054 | if (!(dev->priv_flags & IFF_XMIT_DST_RELEASE)) | 2066 | if (!(dev->priv_flags & IFF_XMIT_DST_RELEASE)) |
2055 | skb_dst_force(skb); | 2067 | skb_dst_force(skb); |
2056 | __qdisc_update_bstats(q, skb->len); | 2068 | __qdisc_update_bstats(q, skb->len); |
2057 | if (sch_direct_xmit(skb, q, dev, txq, root_lock)) | 2069 | if (sch_direct_xmit(skb, q, dev, txq, root_lock)) { |
2070 | if (unlikely(contended)) { | ||
2071 | spin_unlock(&q->busylock); | ||
2072 | contended = false; | ||
2073 | } | ||
2058 | __qdisc_run(q); | 2074 | __qdisc_run(q); |
2059 | else | 2075 | } else |
2060 | clear_bit(__QDISC_STATE_RUNNING, &q->state); | 2076 | qdisc_run_end(q); |
2061 | 2077 | ||
2062 | rc = NET_XMIT_SUCCESS; | 2078 | rc = NET_XMIT_SUCCESS; |
2063 | } else { | 2079 | } else { |
2064 | skb_dst_force(skb); | 2080 | skb_dst_force(skb); |
2065 | rc = qdisc_enqueue_root(skb, q); | 2081 | rc = qdisc_enqueue_root(skb, q); |
2066 | qdisc_run(q); | 2082 | if (qdisc_run_begin(q)) { |
2083 | if (unlikely(contended)) { | ||
2084 | spin_unlock(&q->busylock); | ||
2085 | contended = false; | ||
2086 | } | ||
2087 | __qdisc_run(q); | ||
2088 | } | ||
2067 | } | 2089 | } |
2068 | spin_unlock(root_lock); | 2090 | spin_unlock(root_lock); |
2069 | 2091 | if (unlikely(contended)) | |
2092 | spin_unlock(&q->busylock); | ||
2070 | return rc; | 2093 | return rc; |
2071 | } | 2094 | } |
2072 | 2095 | ||
@@ -2080,9 +2103,10 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q, | |||
2080 | static inline int skb_needs_linearize(struct sk_buff *skb, | 2103 | static inline int skb_needs_linearize(struct sk_buff *skb, |
2081 | struct net_device *dev) | 2104 | struct net_device *dev) |
2082 | { | 2105 | { |
2083 | return (skb_has_frags(skb) && !(dev->features & NETIF_F_FRAGLIST)) || | 2106 | return skb_is_nonlinear(skb) && |
2084 | (skb_shinfo(skb)->nr_frags && (!(dev->features & NETIF_F_SG) || | 2107 | ((skb_has_frags(skb) && !(dev->features & NETIF_F_FRAGLIST)) || |
2085 | illegal_highdma(dev, skb))); | 2108 | (skb_shinfo(skb)->nr_frags && (!(dev->features & NETIF_F_SG) || |
2109 | illegal_highdma(dev, skb)))); | ||
2086 | } | 2110 | } |
2087 | 2111 | ||
2088 | /** | 2112 | /** |
@@ -2581,70 +2605,14 @@ static inline int deliver_skb(struct sk_buff *skb, | |||
2581 | return pt_prev->func(skb, skb->dev, pt_prev, orig_dev); | 2605 | return pt_prev->func(skb, skb->dev, pt_prev, orig_dev); |
2582 | } | 2606 | } |
2583 | 2607 | ||
2584 | #if defined(CONFIG_BRIDGE) || defined (CONFIG_BRIDGE_MODULE) | 2608 | #if (defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)) && \ |
2585 | 2609 | (defined(CONFIG_ATM_LANE) || defined(CONFIG_ATM_LANE_MODULE)) | |
2586 | #if defined(CONFIG_ATM_LANE) || defined(CONFIG_ATM_LANE_MODULE) | ||
2587 | /* This hook is defined here for ATM LANE */ | 2610 | /* This hook is defined here for ATM LANE */ |
2588 | int (*br_fdb_test_addr_hook)(struct net_device *dev, | 2611 | int (*br_fdb_test_addr_hook)(struct net_device *dev, |
2589 | unsigned char *addr) __read_mostly; | 2612 | unsigned char *addr) __read_mostly; |
2590 | EXPORT_SYMBOL_GPL(br_fdb_test_addr_hook); | 2613 | EXPORT_SYMBOL_GPL(br_fdb_test_addr_hook); |
2591 | #endif | 2614 | #endif |
2592 | 2615 | ||
2593 | /* | ||
2594 | * If bridge module is loaded call bridging hook. | ||
2595 | * returns NULL if packet was consumed. | ||
2596 | */ | ||
2597 | struct sk_buff *(*br_handle_frame_hook)(struct net_bridge_port *p, | ||
2598 | struct sk_buff *skb) __read_mostly; | ||
2599 | EXPORT_SYMBOL_GPL(br_handle_frame_hook); | ||
2600 | |||
2601 | static inline struct sk_buff *handle_bridge(struct sk_buff *skb, | ||
2602 | struct packet_type **pt_prev, int *ret, | ||
2603 | struct net_device *orig_dev) | ||
2604 | { | ||
2605 | struct net_bridge_port *port; | ||
2606 | |||
2607 | if (skb->pkt_type == PACKET_LOOPBACK || | ||
2608 | (port = rcu_dereference(skb->dev->br_port)) == NULL) | ||
2609 | return skb; | ||
2610 | |||
2611 | if (*pt_prev) { | ||
2612 | *ret = deliver_skb(skb, *pt_prev, orig_dev); | ||
2613 | *pt_prev = NULL; | ||
2614 | } | ||
2615 | |||
2616 | return br_handle_frame_hook(port, skb); | ||
2617 | } | ||
2618 | #else | ||
2619 | #define handle_bridge(skb, pt_prev, ret, orig_dev) (skb) | ||
2620 | #endif | ||
2621 | |||
2622 | #if defined(CONFIG_MACVLAN) || defined(CONFIG_MACVLAN_MODULE) | ||
2623 | struct sk_buff *(*macvlan_handle_frame_hook)(struct macvlan_port *p, | ||
2624 | struct sk_buff *skb) __read_mostly; | ||
2625 | EXPORT_SYMBOL_GPL(macvlan_handle_frame_hook); | ||
2626 | |||
2627 | static inline struct sk_buff *handle_macvlan(struct sk_buff *skb, | ||
2628 | struct packet_type **pt_prev, | ||
2629 | int *ret, | ||
2630 | struct net_device *orig_dev) | ||
2631 | { | ||
2632 | struct macvlan_port *port; | ||
2633 | |||
2634 | port = rcu_dereference(skb->dev->macvlan_port); | ||
2635 | if (!port) | ||
2636 | return skb; | ||
2637 | |||
2638 | if (*pt_prev) { | ||
2639 | *ret = deliver_skb(skb, *pt_prev, orig_dev); | ||
2640 | *pt_prev = NULL; | ||
2641 | } | ||
2642 | return macvlan_handle_frame_hook(port, skb); | ||
2643 | } | ||
2644 | #else | ||
2645 | #define handle_macvlan(skb, pt_prev, ret, orig_dev) (skb) | ||
2646 | #endif | ||
2647 | |||
2648 | #ifdef CONFIG_NET_CLS_ACT | 2616 | #ifdef CONFIG_NET_CLS_ACT |
2649 | /* TODO: Maybe we should just force sch_ingress to be compiled in | 2617 | /* TODO: Maybe we should just force sch_ingress to be compiled in |
2650 | * when CONFIG_NET_CLS_ACT is? otherwise some useless instructions | 2618 | * when CONFIG_NET_CLS_ACT is? otherwise some useless instructions |
@@ -2740,6 +2708,47 @@ void netif_nit_deliver(struct sk_buff *skb) | |||
2740 | rcu_read_unlock(); | 2708 | rcu_read_unlock(); |
2741 | } | 2709 | } |
2742 | 2710 | ||
2711 | /** | ||
2712 | * netdev_rx_handler_register - register receive handler | ||
2713 | * @dev: device to register a handler for | ||
2714 | * @rx_handler: receive handler to register | ||
2715 | * | ||
2716 | * Register a receive hander for a device. This handler will then be | ||
2717 | * called from __netif_receive_skb. A negative errno code is returned | ||
2718 | * on a failure. | ||
2719 | * | ||
2720 | * The caller must hold the rtnl_mutex. | ||
2721 | */ | ||
2722 | int netdev_rx_handler_register(struct net_device *dev, | ||
2723 | rx_handler_func_t *rx_handler) | ||
2724 | { | ||
2725 | ASSERT_RTNL(); | ||
2726 | |||
2727 | if (dev->rx_handler) | ||
2728 | return -EBUSY; | ||
2729 | |||
2730 | rcu_assign_pointer(dev->rx_handler, rx_handler); | ||
2731 | |||
2732 | return 0; | ||
2733 | } | ||
2734 | EXPORT_SYMBOL_GPL(netdev_rx_handler_register); | ||
2735 | |||
2736 | /** | ||
2737 | * netdev_rx_handler_unregister - unregister receive handler | ||
2738 | * @dev: device to unregister a handler from | ||
2739 | * | ||
2740 | * Unregister a receive hander from a device. | ||
2741 | * | ||
2742 | * The caller must hold the rtnl_mutex. | ||
2743 | */ | ||
2744 | void netdev_rx_handler_unregister(struct net_device *dev) | ||
2745 | { | ||
2746 | |||
2747 | ASSERT_RTNL(); | ||
2748 | rcu_assign_pointer(dev->rx_handler, NULL); | ||
2749 | } | ||
2750 | EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister); | ||
2751 | |||
2743 | static inline void skb_bond_set_mac_by_master(struct sk_buff *skb, | 2752 | static inline void skb_bond_set_mac_by_master(struct sk_buff *skb, |
2744 | struct net_device *master) | 2753 | struct net_device *master) |
2745 | { | 2754 | { |
@@ -2792,6 +2801,7 @@ EXPORT_SYMBOL(__skb_bond_should_drop); | |||
2792 | static int __netif_receive_skb(struct sk_buff *skb) | 2801 | static int __netif_receive_skb(struct sk_buff *skb) |
2793 | { | 2802 | { |
2794 | struct packet_type *ptype, *pt_prev; | 2803 | struct packet_type *ptype, *pt_prev; |
2804 | rx_handler_func_t *rx_handler; | ||
2795 | struct net_device *orig_dev; | 2805 | struct net_device *orig_dev; |
2796 | struct net_device *master; | 2806 | struct net_device *master; |
2797 | struct net_device *null_or_orig; | 2807 | struct net_device *null_or_orig; |
@@ -2822,8 +2832,7 @@ static int __netif_receive_skb(struct sk_buff *skb) | |||
2822 | skb->dev = master; | 2832 | skb->dev = master; |
2823 | } | 2833 | } |
2824 | 2834 | ||
2825 | __get_cpu_var(softnet_data).processed++; | 2835 | __this_cpu_inc(softnet_data.processed); |
2826 | |||
2827 | skb_reset_network_header(skb); | 2836 | skb_reset_network_header(skb); |
2828 | skb_reset_transport_header(skb); | 2837 | skb_reset_transport_header(skb); |
2829 | skb->mac_len = skb->network_header - skb->mac_header; | 2838 | skb->mac_len = skb->network_header - skb->mac_header; |
@@ -2855,12 +2864,17 @@ static int __netif_receive_skb(struct sk_buff *skb) | |||
2855 | ncls: | 2864 | ncls: |
2856 | #endif | 2865 | #endif |
2857 | 2866 | ||
2858 | skb = handle_bridge(skb, &pt_prev, &ret, orig_dev); | 2867 | /* Handle special case of bridge or macvlan */ |
2859 | if (!skb) | 2868 | rx_handler = rcu_dereference(skb->dev->rx_handler); |
2860 | goto out; | 2869 | if (rx_handler) { |
2861 | skb = handle_macvlan(skb, &pt_prev, &ret, orig_dev); | 2870 | if (pt_prev) { |
2862 | if (!skb) | 2871 | ret = deliver_skb(skb, pt_prev, orig_dev); |
2863 | goto out; | 2872 | pt_prev = NULL; |
2873 | } | ||
2874 | skb = rx_handler(skb); | ||
2875 | if (!skb) | ||
2876 | goto out; | ||
2877 | } | ||
2864 | 2878 | ||
2865 | /* | 2879 | /* |
2866 | * Make sure frames received on VLAN interfaces stacked on | 2880 | * Make sure frames received on VLAN interfaces stacked on |
diff --git a/net/core/netpoll.c b/net/core/netpoll.c index 94825b109551..e034342c819c 100644 --- a/net/core/netpoll.c +++ b/net/core/netpoll.c | |||
@@ -49,7 +49,6 @@ static atomic_t trapped; | |||
49 | (MAX_UDP_CHUNK + sizeof(struct udphdr) + \ | 49 | (MAX_UDP_CHUNK + sizeof(struct udphdr) + \ |
50 | sizeof(struct iphdr) + sizeof(struct ethhdr)) | 50 | sizeof(struct iphdr) + sizeof(struct ethhdr)) |
51 | 51 | ||
52 | static void zap_completion_queue(void); | ||
53 | static void arp_reply(struct sk_buff *skb); | 52 | static void arp_reply(struct sk_buff *skb); |
54 | 53 | ||
55 | static unsigned int carrier_timeout = 4; | 54 | static unsigned int carrier_timeout = 4; |
@@ -197,7 +196,6 @@ void netpoll_poll_dev(struct net_device *dev) | |||
197 | 196 | ||
198 | service_arp_queue(dev->npinfo); | 197 | service_arp_queue(dev->npinfo); |
199 | 198 | ||
200 | zap_completion_queue(); | ||
201 | } | 199 | } |
202 | 200 | ||
203 | void netpoll_poll(struct netpoll *np) | 201 | void netpoll_poll(struct netpoll *np) |
@@ -221,40 +219,11 @@ static void refill_skbs(void) | |||
221 | spin_unlock_irqrestore(&skb_pool.lock, flags); | 219 | spin_unlock_irqrestore(&skb_pool.lock, flags); |
222 | } | 220 | } |
223 | 221 | ||
224 | static void zap_completion_queue(void) | ||
225 | { | ||
226 | unsigned long flags; | ||
227 | struct softnet_data *sd = &get_cpu_var(softnet_data); | ||
228 | |||
229 | if (sd->completion_queue) { | ||
230 | struct sk_buff *clist; | ||
231 | |||
232 | local_irq_save(flags); | ||
233 | clist = sd->completion_queue; | ||
234 | sd->completion_queue = NULL; | ||
235 | local_irq_restore(flags); | ||
236 | |||
237 | while (clist != NULL) { | ||
238 | struct sk_buff *skb = clist; | ||
239 | clist = clist->next; | ||
240 | if (skb->destructor) { | ||
241 | atomic_inc(&skb->users); | ||
242 | dev_kfree_skb_any(skb); /* put this one back */ | ||
243 | } else { | ||
244 | __kfree_skb(skb); | ||
245 | } | ||
246 | } | ||
247 | } | ||
248 | |||
249 | put_cpu_var(softnet_data); | ||
250 | } | ||
251 | |||
252 | static struct sk_buff *find_skb(struct netpoll *np, int len, int reserve) | 222 | static struct sk_buff *find_skb(struct netpoll *np, int len, int reserve) |
253 | { | 223 | { |
254 | int count = 0; | 224 | int count = 0; |
255 | struct sk_buff *skb; | 225 | struct sk_buff *skb; |
256 | 226 | ||
257 | zap_completion_queue(); | ||
258 | refill_skbs(); | 227 | refill_skbs(); |
259 | repeat: | 228 | repeat: |
260 | 229 | ||
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c index 091698899594..6e3f32575df7 100644 --- a/net/dccp/ipv6.c +++ b/net/dccp/ipv6.c | |||
@@ -248,7 +248,7 @@ static int dccp_v6_send_response(struct sock *sk, struct request_sock *req, | |||
248 | struct ipv6_pinfo *np = inet6_sk(sk); | 248 | struct ipv6_pinfo *np = inet6_sk(sk); |
249 | struct sk_buff *skb; | 249 | struct sk_buff *skb; |
250 | struct ipv6_txoptions *opt = NULL; | 250 | struct ipv6_txoptions *opt = NULL; |
251 | struct in6_addr *final_p = NULL, final; | 251 | struct in6_addr *final_p, final; |
252 | struct flowi fl; | 252 | struct flowi fl; |
253 | int err = -1; | 253 | int err = -1; |
254 | struct dst_entry *dst; | 254 | struct dst_entry *dst; |
@@ -265,13 +265,7 @@ static int dccp_v6_send_response(struct sock *sk, struct request_sock *req, | |||
265 | 265 | ||
266 | opt = np->opt; | 266 | opt = np->opt; |
267 | 267 | ||
268 | if (opt != NULL && opt->srcrt != NULL) { | 268 | final_p = fl6_update_dst(&fl, opt, &final); |
269 | const struct rt0_hdr *rt0 = (struct rt0_hdr *)opt->srcrt; | ||
270 | |||
271 | ipv6_addr_copy(&final, &fl.fl6_dst); | ||
272 | ipv6_addr_copy(&fl.fl6_dst, rt0->addr); | ||
273 | final_p = &final; | ||
274 | } | ||
275 | 269 | ||
276 | err = ip6_dst_lookup(sk, &dst, &fl); | 270 | err = ip6_dst_lookup(sk, &dst, &fl); |
277 | if (err) | 271 | if (err) |
@@ -545,19 +539,13 @@ static struct sock *dccp_v6_request_recv_sock(struct sock *sk, | |||
545 | goto out_overflow; | 539 | goto out_overflow; |
546 | 540 | ||
547 | if (dst == NULL) { | 541 | if (dst == NULL) { |
548 | struct in6_addr *final_p = NULL, final; | 542 | struct in6_addr *final_p, final; |
549 | struct flowi fl; | 543 | struct flowi fl; |
550 | 544 | ||
551 | memset(&fl, 0, sizeof(fl)); | 545 | memset(&fl, 0, sizeof(fl)); |
552 | fl.proto = IPPROTO_DCCP; | 546 | fl.proto = IPPROTO_DCCP; |
553 | ipv6_addr_copy(&fl.fl6_dst, &ireq6->rmt_addr); | 547 | ipv6_addr_copy(&fl.fl6_dst, &ireq6->rmt_addr); |
554 | if (opt != NULL && opt->srcrt != NULL) { | 548 | final_p = fl6_update_dst(&fl, opt, &final); |
555 | const struct rt0_hdr *rt0 = (struct rt0_hdr *)opt->srcrt; | ||
556 | |||
557 | ipv6_addr_copy(&final, &fl.fl6_dst); | ||
558 | ipv6_addr_copy(&fl.fl6_dst, rt0->addr); | ||
559 | final_p = &final; | ||
560 | } | ||
561 | ipv6_addr_copy(&fl.fl6_src, &ireq6->loc_addr); | 549 | ipv6_addr_copy(&fl.fl6_src, &ireq6->loc_addr); |
562 | fl.oif = sk->sk_bound_dev_if; | 550 | fl.oif = sk->sk_bound_dev_if; |
563 | fl.fl_ip_dport = inet_rsk(req)->rmt_port; | 551 | fl.fl_ip_dport = inet_rsk(req)->rmt_port; |
@@ -885,7 +873,7 @@ static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr, | |||
885 | struct inet_sock *inet = inet_sk(sk); | 873 | struct inet_sock *inet = inet_sk(sk); |
886 | struct ipv6_pinfo *np = inet6_sk(sk); | 874 | struct ipv6_pinfo *np = inet6_sk(sk); |
887 | struct dccp_sock *dp = dccp_sk(sk); | 875 | struct dccp_sock *dp = dccp_sk(sk); |
888 | struct in6_addr *saddr = NULL, *final_p = NULL, final; | 876 | struct in6_addr *saddr = NULL, *final_p, final; |
889 | struct flowi fl; | 877 | struct flowi fl; |
890 | struct dst_entry *dst; | 878 | struct dst_entry *dst; |
891 | int addr_type; | 879 | int addr_type; |
@@ -988,13 +976,7 @@ static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr, | |||
988 | fl.fl_ip_sport = inet->inet_sport; | 976 | fl.fl_ip_sport = inet->inet_sport; |
989 | security_sk_classify_flow(sk, &fl); | 977 | security_sk_classify_flow(sk, &fl); |
990 | 978 | ||
991 | if (np->opt != NULL && np->opt->srcrt != NULL) { | 979 | final_p = fl6_update_dst(&fl, np->opt, &final); |
992 | const struct rt0_hdr *rt0 = (struct rt0_hdr *)np->opt->srcrt; | ||
993 | |||
994 | ipv6_addr_copy(&final, &fl.fl6_dst); | ||
995 | ipv6_addr_copy(&fl.fl6_dst, rt0->addr); | ||
996 | final_p = &final; | ||
997 | } | ||
998 | 980 | ||
999 | err = ip6_dst_lookup(sk, &dst, &fl); | 981 | err = ip6_dst_lookup(sk, &dst, &fl); |
1000 | if (err) | 982 | if (err) |
diff --git a/net/dccp/proto.c b/net/dccp/proto.c index b03ecf6b2bb0..f79bcef5088f 100644 --- a/net/dccp/proto.c +++ b/net/dccp/proto.c | |||
@@ -473,14 +473,9 @@ static int dccp_setsockopt_ccid(struct sock *sk, int type, | |||
473 | if (optlen < 1 || optlen > DCCP_FEAT_MAX_SP_VALS) | 473 | if (optlen < 1 || optlen > DCCP_FEAT_MAX_SP_VALS) |
474 | return -EINVAL; | 474 | return -EINVAL; |
475 | 475 | ||
476 | val = kmalloc(optlen, GFP_KERNEL); | 476 | val = memdup_user(optval, optlen); |
477 | if (val == NULL) | 477 | if (IS_ERR(val)) |
478 | return -ENOMEM; | 478 | return PTR_ERR(val); |
479 | |||
480 | if (copy_from_user(val, optval, optlen)) { | ||
481 | kfree(val); | ||
482 | return -EFAULT; | ||
483 | } | ||
484 | 479 | ||
485 | lock_sock(sk); | 480 | lock_sock(sk); |
486 | if (type == DCCP_SOCKOPT_TX_CCID || type == DCCP_SOCKOPT_CCID) | 481 | if (type == DCCP_SOCKOPT_TX_CCID || type == DCCP_SOCKOPT_CCID) |
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c index f094b75810db..917d2d66162e 100644 --- a/net/ipv4/arp.c +++ b/net/ipv4/arp.c | |||
@@ -545,10 +545,10 @@ static inline int arp_fwd_proxy(struct in_device *in_dev, | |||
545 | 545 | ||
546 | /* place to check for proxy_arp for routes */ | 546 | /* place to check for proxy_arp for routes */ |
547 | 547 | ||
548 | if ((out_dev = in_dev_get(rt->u.dst.dev)) != NULL) { | 548 | out_dev = __in_dev_get_rcu(rt->u.dst.dev); |
549 | if (out_dev) | ||
549 | omi = IN_DEV_MEDIUM_ID(out_dev); | 550 | omi = IN_DEV_MEDIUM_ID(out_dev); |
550 | in_dev_put(out_dev); | 551 | |
551 | } | ||
552 | return (omi != imi && omi != -1); | 552 | return (omi != imi && omi != -1); |
553 | } | 553 | } |
554 | 554 | ||
@@ -741,7 +741,7 @@ void arp_send(int type, int ptype, __be32 dest_ip, | |||
741 | static int arp_process(struct sk_buff *skb) | 741 | static int arp_process(struct sk_buff *skb) |
742 | { | 742 | { |
743 | struct net_device *dev = skb->dev; | 743 | struct net_device *dev = skb->dev; |
744 | struct in_device *in_dev = in_dev_get(dev); | 744 | struct in_device *in_dev = __in_dev_get_rcu(dev); |
745 | struct arphdr *arp; | 745 | struct arphdr *arp; |
746 | unsigned char *arp_ptr; | 746 | unsigned char *arp_ptr; |
747 | struct rtable *rt; | 747 | struct rtable *rt; |
@@ -890,7 +890,6 @@ static int arp_process(struct sk_buff *skb) | |||
890 | arp_send(ARPOP_REPLY,ETH_P_ARP,sip,dev,tip,sha,dev->dev_addr,sha); | 890 | arp_send(ARPOP_REPLY,ETH_P_ARP,sip,dev,tip,sha,dev->dev_addr,sha); |
891 | } else { | 891 | } else { |
892 | pneigh_enqueue(&arp_tbl, in_dev->arp_parms, skb); | 892 | pneigh_enqueue(&arp_tbl, in_dev->arp_parms, skb); |
893 | in_dev_put(in_dev); | ||
894 | return 0; | 893 | return 0; |
895 | } | 894 | } |
896 | goto out; | 895 | goto out; |
@@ -936,8 +935,6 @@ static int arp_process(struct sk_buff *skb) | |||
936 | } | 935 | } |
937 | 936 | ||
938 | out: | 937 | out: |
939 | if (in_dev) | ||
940 | in_dev_put(in_dev); | ||
941 | consume_skb(skb); | 938 | consume_skb(skb); |
942 | return 0; | 939 | return 0; |
943 | } | 940 | } |
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c index 382bc768ed56..da14c49284f4 100644 --- a/net/ipv4/devinet.c +++ b/net/ipv4/devinet.c | |||
@@ -1081,6 +1081,7 @@ static int inetdev_event(struct notifier_block *this, unsigned long event, | |||
1081 | } | 1081 | } |
1082 | ip_mc_up(in_dev); | 1082 | ip_mc_up(in_dev); |
1083 | /* fall through */ | 1083 | /* fall through */ |
1084 | case NETDEV_NOTIFY_PEERS: | ||
1084 | case NETDEV_CHANGEADDR: | 1085 | case NETDEV_CHANGEADDR: |
1085 | /* Send gratuitous ARP to notify of link change */ | 1086 | /* Send gratuitous ARP to notify of link change */ |
1086 | if (IN_DEV_ARP_NOTIFY(in_dev)) { | 1087 | if (IN_DEV_ARP_NOTIFY(in_dev)) { |
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c index 4f0ed458c883..e830f7a123bd 100644 --- a/net/ipv4/fib_frontend.c +++ b/net/ipv4/fib_frontend.c | |||
@@ -284,7 +284,7 @@ int fib_validate_source(__be32 src, __be32 dst, u8 tos, int oif, | |||
284 | if (no_addr) | 284 | if (no_addr) |
285 | goto last_resort; | 285 | goto last_resort; |
286 | if (rpf == 1) | 286 | if (rpf == 1) |
287 | goto e_inval; | 287 | goto e_rpf; |
288 | fl.oif = dev->ifindex; | 288 | fl.oif = dev->ifindex; |
289 | 289 | ||
290 | ret = 0; | 290 | ret = 0; |
@@ -299,7 +299,7 @@ int fib_validate_source(__be32 src, __be32 dst, u8 tos, int oif, | |||
299 | 299 | ||
300 | last_resort: | 300 | last_resort: |
301 | if (rpf) | 301 | if (rpf) |
302 | goto e_inval; | 302 | goto e_rpf; |
303 | *spec_dst = inet_select_addr(dev, 0, RT_SCOPE_UNIVERSE); | 303 | *spec_dst = inet_select_addr(dev, 0, RT_SCOPE_UNIVERSE); |
304 | *itag = 0; | 304 | *itag = 0; |
305 | return 0; | 305 | return 0; |
@@ -308,6 +308,8 @@ e_inval_res: | |||
308 | fib_res_put(&res); | 308 | fib_res_put(&res); |
309 | e_inval: | 309 | e_inval: |
310 | return -EINVAL; | 310 | return -EINVAL; |
311 | e_rpf: | ||
312 | return -EXDEV; | ||
311 | } | 313 | } |
312 | 314 | ||
313 | static inline __be32 sk_extract_addr(struct sockaddr *addr) | 315 | static inline __be32 sk_extract_addr(struct sockaddr *addr) |
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c index 5fff865a4fa7..250cb5e1af48 100644 --- a/net/ipv4/igmp.c +++ b/net/ipv4/igmp.c | |||
@@ -1646,8 +1646,7 @@ static int sf_setstate(struct ip_mc_list *pmc) | |||
1646 | if (dpsf->sf_inaddr == psf->sf_inaddr) | 1646 | if (dpsf->sf_inaddr == psf->sf_inaddr) |
1647 | break; | 1647 | break; |
1648 | if (!dpsf) { | 1648 | if (!dpsf) { |
1649 | dpsf = (struct ip_sf_list *) | 1649 | dpsf = kmalloc(sizeof(*dpsf), GFP_ATOMIC); |
1650 | kmalloc(sizeof(*dpsf), GFP_ATOMIC); | ||
1651 | if (!dpsf) | 1650 | if (!dpsf) |
1652 | continue; | 1651 | continue; |
1653 | *dpsf = *psf; | 1652 | *dpsf = *psf; |
diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c index d930dc5e4d85..d52c9da644cf 100644 --- a/net/ipv4/ip_input.c +++ b/net/ipv4/ip_input.c | |||
@@ -340,6 +340,9 @@ static int ip_rcv_finish(struct sk_buff *skb) | |||
340 | else if (err == -ENETUNREACH) | 340 | else if (err == -ENETUNREACH) |
341 | IP_INC_STATS_BH(dev_net(skb->dev), | 341 | IP_INC_STATS_BH(dev_net(skb->dev), |
342 | IPSTATS_MIB_INNOROUTES); | 342 | IPSTATS_MIB_INNOROUTES); |
343 | else if (err == -EXDEV) | ||
344 | NET_INC_STATS_BH(dev_net(skb->dev), | ||
345 | LINUX_MIB_IPRPFILTER); | ||
343 | goto drop; | 346 | goto drop; |
344 | } | 347 | } |
345 | } | 348 | } |
diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c index b9d84e800cf4..3a6e1ec5e9ae 100644 --- a/net/ipv4/ipconfig.c +++ b/net/ipv4/ipconfig.c | |||
@@ -665,6 +665,13 @@ ic_dhcp_init_options(u8 *options) | |||
665 | memcpy(e, ic_req_params, sizeof(ic_req_params)); | 665 | memcpy(e, ic_req_params, sizeof(ic_req_params)); |
666 | e += sizeof(ic_req_params); | 666 | e += sizeof(ic_req_params); |
667 | 667 | ||
668 | if (ic_host_name_set) { | ||
669 | *e++ = 12; /* host-name */ | ||
670 | len = strlen(utsname()->nodename); | ||
671 | *e++ = len; | ||
672 | memcpy(e, utsname()->nodename, len); | ||
673 | e += len; | ||
674 | } | ||
668 | if (*vendor_class_identifier) { | 675 | if (*vendor_class_identifier) { |
669 | printk(KERN_INFO "DHCP: sending class identifier \"%s\"\n", | 676 | printk(KERN_INFO "DHCP: sending class identifier \"%s\"\n", |
670 | vendor_class_identifier); | 677 | vendor_class_identifier); |
diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c index 3dc9914c1dce..e320ca6b3ef3 100644 --- a/net/ipv4/proc.c +++ b/net/ipv4/proc.c | |||
@@ -252,6 +252,7 @@ static const struct snmp_mib snmp4_net_list[] = { | |||
252 | SNMP_MIB_ITEM("TCPBacklogDrop", LINUX_MIB_TCPBACKLOGDROP), | 252 | SNMP_MIB_ITEM("TCPBacklogDrop", LINUX_MIB_TCPBACKLOGDROP), |
253 | SNMP_MIB_ITEM("TCPMinTTLDrop", LINUX_MIB_TCPMINTTLDROP), | 253 | SNMP_MIB_ITEM("TCPMinTTLDrop", LINUX_MIB_TCPMINTTLDROP), |
254 | SNMP_MIB_ITEM("TCPDeferAcceptDrop", LINUX_MIB_TCPDEFERACCEPTDROP), | 254 | SNMP_MIB_ITEM("TCPDeferAcceptDrop", LINUX_MIB_TCPDEFERACCEPTDROP), |
255 | SNMP_MIB_ITEM("IPReversePathFilter", LINUX_MIB_IPRPFILTER), | ||
255 | SNMP_MIB_SENTINEL | 256 | SNMP_MIB_SENTINEL |
256 | }; | 257 | }; |
257 | 258 | ||
diff --git a/net/ipv4/route.c b/net/ipv4/route.c index 560acc677ce4..7b8eacd5ac26 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c | |||
@@ -253,8 +253,7 @@ static unsigned rt_hash_mask __read_mostly; | |||
253 | static unsigned int rt_hash_log __read_mostly; | 253 | static unsigned int rt_hash_log __read_mostly; |
254 | 254 | ||
255 | static DEFINE_PER_CPU(struct rt_cache_stat, rt_cache_stat); | 255 | static DEFINE_PER_CPU(struct rt_cache_stat, rt_cache_stat); |
256 | #define RT_CACHE_STAT_INC(field) \ | 256 | #define RT_CACHE_STAT_INC(field) __this_cpu_inc(rt_cache_stat.field) |
257 | (__raw_get_cpu_var(rt_cache_stat).field++) | ||
258 | 257 | ||
259 | static inline unsigned int rt_hash(__be32 daddr, __be32 saddr, int idx, | 258 | static inline unsigned int rt_hash(__be32 daddr, __be32 saddr, int idx, |
260 | int genid) | 259 | int genid) |
@@ -1844,14 +1843,16 @@ static void rt_set_nexthop(struct rtable *rt, struct fib_result *res, u32 itag) | |||
1844 | rt->rt_type = res->type; | 1843 | rt->rt_type = res->type; |
1845 | } | 1844 | } |
1846 | 1845 | ||
1846 | /* called in rcu_read_lock() section */ | ||
1847 | static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr, | 1847 | static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr, |
1848 | u8 tos, struct net_device *dev, int our) | 1848 | u8 tos, struct net_device *dev, int our) |
1849 | { | 1849 | { |
1850 | unsigned hash; | 1850 | unsigned int hash; |
1851 | struct rtable *rth; | 1851 | struct rtable *rth; |
1852 | __be32 spec_dst; | 1852 | __be32 spec_dst; |
1853 | struct in_device *in_dev = in_dev_get(dev); | 1853 | struct in_device *in_dev = __in_dev_get_rcu(dev); |
1854 | u32 itag = 0; | 1854 | u32 itag = 0; |
1855 | int err; | ||
1855 | 1856 | ||
1856 | /* Primary sanity checks. */ | 1857 | /* Primary sanity checks. */ |
1857 | 1858 | ||
@@ -1866,10 +1867,12 @@ static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr, | |||
1866 | if (!ipv4_is_local_multicast(daddr)) | 1867 | if (!ipv4_is_local_multicast(daddr)) |
1867 | goto e_inval; | 1868 | goto e_inval; |
1868 | spec_dst = inet_select_addr(dev, 0, RT_SCOPE_LINK); | 1869 | spec_dst = inet_select_addr(dev, 0, RT_SCOPE_LINK); |
1869 | } else if (fib_validate_source(saddr, 0, tos, 0, | 1870 | } else { |
1870 | dev, &spec_dst, &itag, 0) < 0) | 1871 | err = fib_validate_source(saddr, 0, tos, 0, dev, &spec_dst, |
1871 | goto e_inval; | 1872 | &itag, 0); |
1872 | 1873 | if (err < 0) | |
1874 | goto e_err; | ||
1875 | } | ||
1873 | rth = dst_alloc(&ipv4_dst_ops); | 1876 | rth = dst_alloc(&ipv4_dst_ops); |
1874 | if (!rth) | 1877 | if (!rth) |
1875 | goto e_nobufs; | 1878 | goto e_nobufs; |
@@ -1912,17 +1915,15 @@ static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr, | |||
1912 | #endif | 1915 | #endif |
1913 | RT_CACHE_STAT_INC(in_slow_mc); | 1916 | RT_CACHE_STAT_INC(in_slow_mc); |
1914 | 1917 | ||
1915 | in_dev_put(in_dev); | ||
1916 | hash = rt_hash(daddr, saddr, dev->ifindex, rt_genid(dev_net(dev))); | 1918 | hash = rt_hash(daddr, saddr, dev->ifindex, rt_genid(dev_net(dev))); |
1917 | return rt_intern_hash(hash, rth, NULL, skb, dev->ifindex); | 1919 | return rt_intern_hash(hash, rth, NULL, skb, dev->ifindex); |
1918 | 1920 | ||
1919 | e_nobufs: | 1921 | e_nobufs: |
1920 | in_dev_put(in_dev); | ||
1921 | return -ENOBUFS; | 1922 | return -ENOBUFS; |
1922 | |||
1923 | e_inval: | 1923 | e_inval: |
1924 | in_dev_put(in_dev); | ||
1925 | return -EINVAL; | 1924 | return -EINVAL; |
1925 | e_err: | ||
1926 | return err; | ||
1926 | } | 1927 | } |
1927 | 1928 | ||
1928 | 1929 | ||
@@ -1956,22 +1957,22 @@ static void ip_handle_martian_source(struct net_device *dev, | |||
1956 | #endif | 1957 | #endif |
1957 | } | 1958 | } |
1958 | 1959 | ||
1960 | /* called in rcu_read_lock() section */ | ||
1959 | static int __mkroute_input(struct sk_buff *skb, | 1961 | static int __mkroute_input(struct sk_buff *skb, |
1960 | struct fib_result *res, | 1962 | struct fib_result *res, |
1961 | struct in_device *in_dev, | 1963 | struct in_device *in_dev, |
1962 | __be32 daddr, __be32 saddr, u32 tos, | 1964 | __be32 daddr, __be32 saddr, u32 tos, |
1963 | struct rtable **result) | 1965 | struct rtable **result) |
1964 | { | 1966 | { |
1965 | |||
1966 | struct rtable *rth; | 1967 | struct rtable *rth; |
1967 | int err; | 1968 | int err; |
1968 | struct in_device *out_dev; | 1969 | struct in_device *out_dev; |
1969 | unsigned flags = 0; | 1970 | unsigned int flags = 0; |
1970 | __be32 spec_dst; | 1971 | __be32 spec_dst; |
1971 | u32 itag; | 1972 | u32 itag; |
1972 | 1973 | ||
1973 | /* get a working reference to the output device */ | 1974 | /* get a working reference to the output device */ |
1974 | out_dev = in_dev_get(FIB_RES_DEV(*res)); | 1975 | out_dev = __in_dev_get_rcu(FIB_RES_DEV(*res)); |
1975 | if (out_dev == NULL) { | 1976 | if (out_dev == NULL) { |
1976 | if (net_ratelimit()) | 1977 | if (net_ratelimit()) |
1977 | printk(KERN_CRIT "Bug in ip_route_input" \ | 1978 | printk(KERN_CRIT "Bug in ip_route_input" \ |
@@ -1986,7 +1987,6 @@ static int __mkroute_input(struct sk_buff *skb, | |||
1986 | ip_handle_martian_source(in_dev->dev, in_dev, skb, daddr, | 1987 | ip_handle_martian_source(in_dev->dev, in_dev, skb, daddr, |
1987 | saddr); | 1988 | saddr); |
1988 | 1989 | ||
1989 | err = -EINVAL; | ||
1990 | goto cleanup; | 1990 | goto cleanup; |
1991 | } | 1991 | } |
1992 | 1992 | ||
@@ -2053,8 +2053,6 @@ static int __mkroute_input(struct sk_buff *skb, | |||
2053 | *result = rth; | 2053 | *result = rth; |
2054 | err = 0; | 2054 | err = 0; |
2055 | cleanup: | 2055 | cleanup: |
2056 | /* release the working reference to the output device */ | ||
2057 | in_dev_put(out_dev); | ||
2058 | return err; | 2056 | return err; |
2059 | } | 2057 | } |
2060 | 2058 | ||
@@ -2098,7 +2096,7 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr, | |||
2098 | u8 tos, struct net_device *dev) | 2096 | u8 tos, struct net_device *dev) |
2099 | { | 2097 | { |
2100 | struct fib_result res; | 2098 | struct fib_result res; |
2101 | struct in_device *in_dev = in_dev_get(dev); | 2099 | struct in_device *in_dev = __in_dev_get_rcu(dev); |
2102 | struct flowi fl = { .nl_u = { .ip4_u = | 2100 | struct flowi fl = { .nl_u = { .ip4_u = |
2103 | { .daddr = daddr, | 2101 | { .daddr = daddr, |
2104 | .saddr = saddr, | 2102 | .saddr = saddr, |
@@ -2158,13 +2156,12 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr, | |||
2158 | goto brd_input; | 2156 | goto brd_input; |
2159 | 2157 | ||
2160 | if (res.type == RTN_LOCAL) { | 2158 | if (res.type == RTN_LOCAL) { |
2161 | int result; | 2159 | err = fib_validate_source(saddr, daddr, tos, |
2162 | result = fib_validate_source(saddr, daddr, tos, | ||
2163 | net->loopback_dev->ifindex, | 2160 | net->loopback_dev->ifindex, |
2164 | dev, &spec_dst, &itag, skb->mark); | 2161 | dev, &spec_dst, &itag, skb->mark); |
2165 | if (result < 0) | 2162 | if (err < 0) |
2166 | goto martian_source; | 2163 | goto martian_source_keep_err; |
2167 | if (result) | 2164 | if (err) |
2168 | flags |= RTCF_DIRECTSRC; | 2165 | flags |= RTCF_DIRECTSRC; |
2169 | spec_dst = daddr; | 2166 | spec_dst = daddr; |
2170 | goto local_input; | 2167 | goto local_input; |
@@ -2177,7 +2174,6 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr, | |||
2177 | 2174 | ||
2178 | err = ip_mkroute_input(skb, &res, &fl, in_dev, daddr, saddr, tos); | 2175 | err = ip_mkroute_input(skb, &res, &fl, in_dev, daddr, saddr, tos); |
2179 | done: | 2176 | done: |
2180 | in_dev_put(in_dev); | ||
2181 | if (free_res) | 2177 | if (free_res) |
2182 | fib_res_put(&res); | 2178 | fib_res_put(&res); |
2183 | out: return err; | 2179 | out: return err; |
@@ -2192,7 +2188,7 @@ brd_input: | |||
2192 | err = fib_validate_source(saddr, 0, tos, 0, dev, &spec_dst, | 2188 | err = fib_validate_source(saddr, 0, tos, 0, dev, &spec_dst, |
2193 | &itag, skb->mark); | 2189 | &itag, skb->mark); |
2194 | if (err < 0) | 2190 | if (err < 0) |
2195 | goto martian_source; | 2191 | goto martian_source_keep_err; |
2196 | if (err) | 2192 | if (err) |
2197 | flags |= RTCF_DIRECTSRC; | 2193 | flags |= RTCF_DIRECTSRC; |
2198 | } | 2194 | } |
@@ -2273,8 +2269,10 @@ e_nobufs: | |||
2273 | goto done; | 2269 | goto done; |
2274 | 2270 | ||
2275 | martian_source: | 2271 | martian_source: |
2272 | err = -EINVAL; | ||
2273 | martian_source_keep_err: | ||
2276 | ip_handle_martian_source(dev, in_dev, skb, daddr, saddr); | 2274 | ip_handle_martian_source(dev, in_dev, skb, daddr, saddr); |
2277 | goto e_inval; | 2275 | goto done; |
2278 | } | 2276 | } |
2279 | 2277 | ||
2280 | int ip_route_input_common(struct sk_buff *skb, __be32 daddr, __be32 saddr, | 2278 | int ip_route_input_common(struct sk_buff *skb, __be32 daddr, __be32 saddr, |
@@ -2284,16 +2282,18 @@ int ip_route_input_common(struct sk_buff *skb, __be32 daddr, __be32 saddr, | |||
2284 | unsigned hash; | 2282 | unsigned hash; |
2285 | int iif = dev->ifindex; | 2283 | int iif = dev->ifindex; |
2286 | struct net *net; | 2284 | struct net *net; |
2285 | int res; | ||
2287 | 2286 | ||
2288 | net = dev_net(dev); | 2287 | net = dev_net(dev); |
2289 | 2288 | ||
2289 | rcu_read_lock(); | ||
2290 | |||
2290 | if (!rt_caching(net)) | 2291 | if (!rt_caching(net)) |
2291 | goto skip_cache; | 2292 | goto skip_cache; |
2292 | 2293 | ||
2293 | tos &= IPTOS_RT_MASK; | 2294 | tos &= IPTOS_RT_MASK; |
2294 | hash = rt_hash(daddr, saddr, iif, rt_genid(net)); | 2295 | hash = rt_hash(daddr, saddr, iif, rt_genid(net)); |
2295 | 2296 | ||
2296 | rcu_read_lock(); | ||
2297 | for (rth = rcu_dereference(rt_hash_table[hash].chain); rth; | 2297 | for (rth = rcu_dereference(rt_hash_table[hash].chain); rth; |
2298 | rth = rcu_dereference(rth->u.dst.rt_next)) { | 2298 | rth = rcu_dereference(rth->u.dst.rt_next)) { |
2299 | if ((((__force u32)rth->fl.fl4_dst ^ (__force u32)daddr) | | 2299 | if ((((__force u32)rth->fl.fl4_dst ^ (__force u32)daddr) | |
@@ -2317,7 +2317,6 @@ int ip_route_input_common(struct sk_buff *skb, __be32 daddr, __be32 saddr, | |||
2317 | } | 2317 | } |
2318 | RT_CACHE_STAT_INC(in_hlist_search); | 2318 | RT_CACHE_STAT_INC(in_hlist_search); |
2319 | } | 2319 | } |
2320 | rcu_read_unlock(); | ||
2321 | 2320 | ||
2322 | skip_cache: | 2321 | skip_cache: |
2323 | /* Multicast recognition logic is moved from route cache to here. | 2322 | /* Multicast recognition logic is moved from route cache to here. |
@@ -2332,12 +2331,11 @@ skip_cache: | |||
2332 | route cache entry is created eventually. | 2331 | route cache entry is created eventually. |
2333 | */ | 2332 | */ |
2334 | if (ipv4_is_multicast(daddr)) { | 2333 | if (ipv4_is_multicast(daddr)) { |
2335 | struct in_device *in_dev; | 2334 | struct in_device *in_dev = __in_dev_get_rcu(dev); |
2336 | 2335 | ||
2337 | rcu_read_lock(); | 2336 | if (in_dev) { |
2338 | if ((in_dev = __in_dev_get_rcu(dev)) != NULL) { | ||
2339 | int our = ip_check_mc(in_dev, daddr, saddr, | 2337 | int our = ip_check_mc(in_dev, daddr, saddr, |
2340 | ip_hdr(skb)->protocol); | 2338 | ip_hdr(skb)->protocol); |
2341 | if (our | 2339 | if (our |
2342 | #ifdef CONFIG_IP_MROUTE | 2340 | #ifdef CONFIG_IP_MROUTE |
2343 | || | 2341 | || |
@@ -2345,15 +2343,18 @@ skip_cache: | |||
2345 | IN_DEV_MFORWARD(in_dev)) | 2343 | IN_DEV_MFORWARD(in_dev)) |
2346 | #endif | 2344 | #endif |
2347 | ) { | 2345 | ) { |
2346 | int res = ip_route_input_mc(skb, daddr, saddr, | ||
2347 | tos, dev, our); | ||
2348 | rcu_read_unlock(); | 2348 | rcu_read_unlock(); |
2349 | return ip_route_input_mc(skb, daddr, saddr, | 2349 | return res; |
2350 | tos, dev, our); | ||
2351 | } | 2350 | } |
2352 | } | 2351 | } |
2353 | rcu_read_unlock(); | 2352 | rcu_read_unlock(); |
2354 | return -EINVAL; | 2353 | return -EINVAL; |
2355 | } | 2354 | } |
2356 | return ip_route_input_slow(skb, daddr, saddr, tos, dev); | 2355 | res = ip_route_input_slow(skb, daddr, saddr, tos, dev); |
2356 | rcu_read_unlock(); | ||
2357 | return res; | ||
2357 | } | 2358 | } |
2358 | EXPORT_SYMBOL(ip_route_input_common); | 2359 | EXPORT_SYMBOL(ip_route_input_common); |
2359 | 2360 | ||
diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c index 9f6b22206c52..5c48124332de 100644 --- a/net/ipv4/syncookies.c +++ b/net/ipv4/syncookies.c | |||
@@ -138,23 +138,23 @@ static __u32 check_tcp_syn_cookie(__u32 cookie, __be32 saddr, __be32 daddr, | |||
138 | } | 138 | } |
139 | 139 | ||
140 | /* | 140 | /* |
141 | * This table has to be sorted and terminated with (__u16)-1. | 141 | * MSS Values are taken from the 2009 paper |
142 | * XXX generate a better table. | 142 | * 'Measuring TCP Maximum Segment Size' by S. Alcock and R. Nelson: |
143 | * Unresolved Issues: HIPPI with a 64k MSS is not well supported. | 143 | * - values 1440 to 1460 accounted for 80% of observed mss values |
144 | * - values outside the 536-1460 range are rare (<0.2%). | ||
145 | * | ||
146 | * Table must be sorted. | ||
144 | */ | 147 | */ |
145 | static __u16 const msstab[] = { | 148 | static __u16 const msstab[] = { |
146 | 64 - 1, | 149 | 64, |
147 | 256 - 1, | 150 | 512, |
148 | 512 - 1, | 151 | 536, |
149 | 536 - 1, | 152 | 1024, |
150 | 1024 - 1, | 153 | 1440, |
151 | 1440 - 1, | 154 | 1460, |
152 | 1460 - 1, | 155 | 4312, |
153 | 4312 - 1, | 156 | 8960, |
154 | (__u16)-1 | ||
155 | }; | 157 | }; |
156 | /* The number doesn't include the -1 terminator */ | ||
157 | #define NUM_MSS (ARRAY_SIZE(msstab) - 1) | ||
158 | 158 | ||
159 | /* | 159 | /* |
160 | * Generate a syncookie. mssp points to the mss, which is returned | 160 | * Generate a syncookie. mssp points to the mss, which is returned |
@@ -169,10 +169,10 @@ __u32 cookie_v4_init_sequence(struct sock *sk, struct sk_buff *skb, __u16 *mssp) | |||
169 | 169 | ||
170 | tcp_synq_overflow(sk); | 170 | tcp_synq_overflow(sk); |
171 | 171 | ||
172 | /* XXX sort msstab[] by probability? Binary search? */ | 172 | for (mssind = ARRAY_SIZE(msstab) - 1; mssind ; mssind--) |
173 | for (mssind = 0; mss > msstab[mssind + 1]; mssind++) | 173 | if (mss >= msstab[mssind]) |
174 | ; | 174 | break; |
175 | *mssp = msstab[mssind] + 1; | 175 | *mssp = msstab[mssind]; |
176 | 176 | ||
177 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SYNCOOKIESSENT); | 177 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SYNCOOKIESSENT); |
178 | 178 | ||
@@ -202,7 +202,7 @@ static inline int cookie_check(struct sk_buff *skb, __u32 cookie) | |||
202 | jiffies / (HZ * 60), | 202 | jiffies / (HZ * 60), |
203 | COUNTER_TRIES); | 203 | COUNTER_TRIES); |
204 | 204 | ||
205 | return mssind < NUM_MSS ? msstab[mssind] + 1 : 0; | 205 | return mssind < ARRAY_SIZE(msstab) ? msstab[mssind] : 0; |
206 | } | 206 | } |
207 | 207 | ||
208 | static inline struct sock *get_cookie_sock(struct sock *sk, struct sk_buff *skb, | 208 | static inline struct sock *get_cookie_sock(struct sock *sk, struct sk_buff *skb, |
@@ -266,7 +266,7 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb, | |||
266 | struct rtable *rt; | 266 | struct rtable *rt; |
267 | __u8 rcv_wscale; | 267 | __u8 rcv_wscale; |
268 | 268 | ||
269 | if (!sysctl_tcp_syncookies || !th->ack) | 269 | if (!sysctl_tcp_syncookies || !th->ack || th->rst) |
270 | goto out; | 270 | goto out; |
271 | 271 | ||
272 | if (tcp_synq_no_recent_overflow(sk) || | 272 | if (tcp_synq_no_recent_overflow(sk) || |
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 6596b4feeddc..49d0d2b8900c 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c | |||
@@ -2999,6 +2999,7 @@ int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *hp, | |||
2999 | const unsigned head_data_len = skb_headlen(skb) > header_len ? | 2999 | const unsigned head_data_len = skb_headlen(skb) > header_len ? |
3000 | skb_headlen(skb) - header_len : 0; | 3000 | skb_headlen(skb) - header_len : 0; |
3001 | const struct skb_shared_info *shi = skb_shinfo(skb); | 3001 | const struct skb_shared_info *shi = skb_shinfo(skb); |
3002 | struct sk_buff *frag_iter; | ||
3002 | 3003 | ||
3003 | sg_init_table(&sg, 1); | 3004 | sg_init_table(&sg, 1); |
3004 | 3005 | ||
@@ -3013,6 +3014,10 @@ int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *hp, | |||
3013 | return 1; | 3014 | return 1; |
3014 | } | 3015 | } |
3015 | 3016 | ||
3017 | skb_walk_frags(skb, frag_iter) | ||
3018 | if (tcp_md5_hash_skb_data(hp, frag_iter, 0)) | ||
3019 | return 1; | ||
3020 | |||
3016 | return 0; | 3021 | return 0; |
3017 | } | 3022 | } |
3018 | 3023 | ||
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index fe193e53af44..acdc4c989853 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c | |||
@@ -793,19 +793,20 @@ static void tcp_v4_reqsk_destructor(struct request_sock *req) | |||
793 | kfree(inet_rsk(req)->opt); | 793 | kfree(inet_rsk(req)->opt); |
794 | } | 794 | } |
795 | 795 | ||
796 | #ifdef CONFIG_SYN_COOKIES | 796 | static void syn_flood_warning(const struct sk_buff *skb) |
797 | static void syn_flood_warning(struct sk_buff *skb) | ||
798 | { | 797 | { |
799 | static unsigned long warntime; | 798 | const char *msg; |
800 | 799 | ||
801 | if (time_after(jiffies, (warntime + HZ * 60))) { | 800 | #ifdef CONFIG_SYN_COOKIES |
802 | warntime = jiffies; | 801 | if (sysctl_tcp_syncookies) |
803 | printk(KERN_INFO | 802 | msg = "Sending cookies"; |
804 | "possible SYN flooding on port %d. Sending cookies.\n", | 803 | else |
805 | ntohs(tcp_hdr(skb)->dest)); | ||
806 | } | ||
807 | } | ||
808 | #endif | 804 | #endif |
805 | msg = "Dropping request"; | ||
806 | |||
807 | pr_info("TCP: Possible SYN flooding on port %d. %s.\n", | ||
808 | ntohs(tcp_hdr(skb)->dest), msg); | ||
809 | } | ||
809 | 810 | ||
810 | /* | 811 | /* |
811 | * Save and compile IPv4 options into the request_sock if needed. | 812 | * Save and compile IPv4 options into the request_sock if needed. |
@@ -1243,6 +1244,8 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb) | |||
1243 | * evidently real one. | 1244 | * evidently real one. |
1244 | */ | 1245 | */ |
1245 | if (inet_csk_reqsk_queue_is_full(sk) && !isn) { | 1246 | if (inet_csk_reqsk_queue_is_full(sk) && !isn) { |
1247 | if (net_ratelimit()) | ||
1248 | syn_flood_warning(skb); | ||
1246 | #ifdef CONFIG_SYN_COOKIES | 1249 | #ifdef CONFIG_SYN_COOKIES |
1247 | if (sysctl_tcp_syncookies) { | 1250 | if (sysctl_tcp_syncookies) { |
1248 | want_cookie = 1; | 1251 | want_cookie = 1; |
@@ -1328,7 +1331,6 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb) | |||
1328 | 1331 | ||
1329 | if (want_cookie) { | 1332 | if (want_cookie) { |
1330 | #ifdef CONFIG_SYN_COOKIES | 1333 | #ifdef CONFIG_SYN_COOKIES |
1331 | syn_flood_warning(skb); | ||
1332 | req->cookie_ts = tmp_opt.tstamp_ok; | 1334 | req->cookie_ts = tmp_opt.tstamp_ok; |
1333 | #endif | 1335 | #endif |
1334 | isn = cookie_v4_init_sequence(sk, skb, &req->mss); | 1336 | isn = cookie_v4_init_sequence(sk, skb, &req->mss); |
@@ -1504,7 +1506,7 @@ static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb) | |||
1504 | } | 1506 | } |
1505 | 1507 | ||
1506 | #ifdef CONFIG_SYN_COOKIES | 1508 | #ifdef CONFIG_SYN_COOKIES |
1507 | if (!th->rst && !th->syn && th->ack) | 1509 | if (!th->syn) |
1508 | sk = cookie_v4_check(sk, skb, &(IPCB(skb)->opt)); | 1510 | sk = cookie_v4_check(sk, skb, &(IPCB(skb)->opt)); |
1509 | #endif | 1511 | #endif |
1510 | return sk; | 1512 | return sk; |
diff --git a/net/ipv6/addrlabel.c b/net/ipv6/addrlabel.c index 8c4348cb1950..f0e774cea386 100644 --- a/net/ipv6/addrlabel.c +++ b/net/ipv6/addrlabel.c | |||
@@ -53,11 +53,7 @@ static struct ip6addrlbl_table | |||
53 | static inline | 53 | static inline |
54 | struct net *ip6addrlbl_net(const struct ip6addrlbl_entry *lbl) | 54 | struct net *ip6addrlbl_net(const struct ip6addrlbl_entry *lbl) |
55 | { | 55 | { |
56 | #ifdef CONFIG_NET_NS | 56 | return read_pnet(&lbl->lbl_net); |
57 | return lbl->lbl_net; | ||
58 | #else | ||
59 | return &init_net; | ||
60 | #endif | ||
61 | } | 57 | } |
62 | 58 | ||
63 | /* | 59 | /* |
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c index e733942dafe1..94b1b9c954bf 100644 --- a/net/ipv6/af_inet6.c +++ b/net/ipv6/af_inet6.c | |||
@@ -651,7 +651,7 @@ int inet6_sk_rebuild_header(struct sock *sk) | |||
651 | 651 | ||
652 | if (dst == NULL) { | 652 | if (dst == NULL) { |
653 | struct inet_sock *inet = inet_sk(sk); | 653 | struct inet_sock *inet = inet_sk(sk); |
654 | struct in6_addr *final_p = NULL, final; | 654 | struct in6_addr *final_p, final; |
655 | struct flowi fl; | 655 | struct flowi fl; |
656 | 656 | ||
657 | memset(&fl, 0, sizeof(fl)); | 657 | memset(&fl, 0, sizeof(fl)); |
@@ -665,12 +665,7 @@ int inet6_sk_rebuild_header(struct sock *sk) | |||
665 | fl.fl_ip_sport = inet->inet_sport; | 665 | fl.fl_ip_sport = inet->inet_sport; |
666 | security_sk_classify_flow(sk, &fl); | 666 | security_sk_classify_flow(sk, &fl); |
667 | 667 | ||
668 | if (np->opt && np->opt->srcrt) { | 668 | final_p = fl6_update_dst(&fl, np->opt, &final); |
669 | struct rt0_hdr *rt0 = (struct rt0_hdr *) np->opt->srcrt; | ||
670 | ipv6_addr_copy(&final, &fl.fl6_dst); | ||
671 | ipv6_addr_copy(&fl.fl6_dst, rt0->addr); | ||
672 | final_p = &final; | ||
673 | } | ||
674 | 669 | ||
675 | err = ip6_dst_lookup(sk, &dst, &fl); | 670 | err = ip6_dst_lookup(sk, &dst, &fl); |
676 | if (err) { | 671 | if (err) { |
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c index 712684687c9a..7d929a22cbc2 100644 --- a/net/ipv6/datagram.c +++ b/net/ipv6/datagram.c | |||
@@ -38,10 +38,11 @@ int ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) | |||
38 | struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr; | 38 | struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr; |
39 | struct inet_sock *inet = inet_sk(sk); | 39 | struct inet_sock *inet = inet_sk(sk); |
40 | struct ipv6_pinfo *np = inet6_sk(sk); | 40 | struct ipv6_pinfo *np = inet6_sk(sk); |
41 | struct in6_addr *daddr, *final_p = NULL, final; | 41 | struct in6_addr *daddr, *final_p, final; |
42 | struct dst_entry *dst; | 42 | struct dst_entry *dst; |
43 | struct flowi fl; | 43 | struct flowi fl; |
44 | struct ip6_flowlabel *flowlabel = NULL; | 44 | struct ip6_flowlabel *flowlabel = NULL; |
45 | struct ipv6_txoptions *opt; | ||
45 | int addr_type; | 46 | int addr_type; |
46 | int err; | 47 | int err; |
47 | 48 | ||
@@ -155,19 +156,8 @@ ipv4_connected: | |||
155 | 156 | ||
156 | security_sk_classify_flow(sk, &fl); | 157 | security_sk_classify_flow(sk, &fl); |
157 | 158 | ||
158 | if (flowlabel) { | 159 | opt = flowlabel ? flowlabel->opt : np->opt; |
159 | if (flowlabel->opt && flowlabel->opt->srcrt) { | 160 | final_p = fl6_update_dst(&fl, opt, &final); |
160 | struct rt0_hdr *rt0 = (struct rt0_hdr *) flowlabel->opt->srcrt; | ||
161 | ipv6_addr_copy(&final, &fl.fl6_dst); | ||
162 | ipv6_addr_copy(&fl.fl6_dst, rt0->addr); | ||
163 | final_p = &final; | ||
164 | } | ||
165 | } else if (np->opt && np->opt->srcrt) { | ||
166 | struct rt0_hdr *rt0 = (struct rt0_hdr *)np->opt->srcrt; | ||
167 | ipv6_addr_copy(&final, &fl.fl6_dst); | ||
168 | ipv6_addr_copy(&fl.fl6_dst, rt0->addr); | ||
169 | final_p = &final; | ||
170 | } | ||
171 | 161 | ||
172 | err = ip6_dst_lookup(sk, &dst, &fl); | 162 | err = ip6_dst_lookup(sk, &dst, &fl); |
173 | if (err) | 163 | if (err) |
diff --git a/net/ipv6/exthdrs.c b/net/ipv6/exthdrs.c index 8a659f92d17a..853a633a94d4 100644 --- a/net/ipv6/exthdrs.c +++ b/net/ipv6/exthdrs.c | |||
@@ -874,3 +874,27 @@ struct ipv6_txoptions *ipv6_fixup_options(struct ipv6_txoptions *opt_space, | |||
874 | return opt; | 874 | return opt; |
875 | } | 875 | } |
876 | 876 | ||
877 | /** | ||
878 | * fl6_update_dst - update flowi destination address with info given | ||
879 | * by srcrt option, if any. | ||
880 | * | ||
881 | * @fl: flowi for which fl6_dst is to be updated | ||
882 | * @opt: struct ipv6_txoptions in which to look for srcrt opt | ||
883 | * @orig: copy of original fl6_dst address if modified | ||
884 | * | ||
885 | * Returns NULL if no txoptions or no srcrt, otherwise returns orig | ||
886 | * and initial value of fl->fl6_dst set in orig | ||
887 | */ | ||
888 | struct in6_addr *fl6_update_dst(struct flowi *fl, | ||
889 | const struct ipv6_txoptions *opt, | ||
890 | struct in6_addr *orig) | ||
891 | { | ||
892 | if (!opt || !opt->srcrt) | ||
893 | return NULL; | ||
894 | |||
895 | ipv6_addr_copy(orig, &fl->fl6_dst); | ||
896 | ipv6_addr_copy(&fl->fl6_dst, ((struct rt0_hdr *)opt->srcrt)->addr); | ||
897 | return orig; | ||
898 | } | ||
899 | |||
900 | EXPORT_SYMBOL_GPL(fl6_update_dst); | ||
diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c index 0c5e3c3b7fd5..8a1628023bd1 100644 --- a/net/ipv6/inet6_connection_sock.c +++ b/net/ipv6/inet6_connection_sock.c | |||
@@ -185,7 +185,7 @@ int inet6_csk_xmit(struct sk_buff *skb) | |||
185 | struct ipv6_pinfo *np = inet6_sk(sk); | 185 | struct ipv6_pinfo *np = inet6_sk(sk); |
186 | struct flowi fl; | 186 | struct flowi fl; |
187 | struct dst_entry *dst; | 187 | struct dst_entry *dst; |
188 | struct in6_addr *final_p = NULL, final; | 188 | struct in6_addr *final_p, final; |
189 | 189 | ||
190 | memset(&fl, 0, sizeof(fl)); | 190 | memset(&fl, 0, sizeof(fl)); |
191 | fl.proto = sk->sk_protocol; | 191 | fl.proto = sk->sk_protocol; |
@@ -199,12 +199,7 @@ int inet6_csk_xmit(struct sk_buff *skb) | |||
199 | fl.fl_ip_dport = inet->inet_dport; | 199 | fl.fl_ip_dport = inet->inet_dport; |
200 | security_sk_classify_flow(sk, &fl); | 200 | security_sk_classify_flow(sk, &fl); |
201 | 201 | ||
202 | if (np->opt && np->opt->srcrt) { | 202 | final_p = fl6_update_dst(&fl, np->opt, &final); |
203 | struct rt0_hdr *rt0 = (struct rt0_hdr *)np->opt->srcrt; | ||
204 | ipv6_addr_copy(&final, &fl.fl6_dst); | ||
205 | ipv6_addr_copy(&fl.fl6_dst, rt0->addr); | ||
206 | final_p = &final; | ||
207 | } | ||
208 | 203 | ||
209 | dst = __inet6_csk_dst_check(sk, np->dst_cookie); | 204 | dst = __inet6_csk_dst_check(sk, np->dst_cookie); |
210 | 205 | ||
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c index ab1622d7d409..8752e8084806 100644 --- a/net/ipv6/mcast.c +++ b/net/ipv6/mcast.c | |||
@@ -1998,8 +1998,7 @@ static int sf_setstate(struct ifmcaddr6 *pmc) | |||
1998 | &psf->sf_addr)) | 1998 | &psf->sf_addr)) |
1999 | break; | 1999 | break; |
2000 | if (!dpsf) { | 2000 | if (!dpsf) { |
2001 | dpsf = (struct ip6_sf_list *) | 2001 | dpsf = kmalloc(sizeof(*dpsf), GFP_ATOMIC); |
2002 | kmalloc(sizeof(*dpsf), GFP_ATOMIC); | ||
2003 | if (!dpsf) | 2002 | if (!dpsf) |
2004 | continue; | 2003 | continue; |
2005 | *dpsf = *psf; | 2004 | *dpsf = *psf; |
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c index 4a4dcbe4f8b2..864eb8e03b1b 100644 --- a/net/ipv6/raw.c +++ b/net/ipv6/raw.c | |||
@@ -725,7 +725,7 @@ static int rawv6_sendmsg(struct kiocb *iocb, struct sock *sk, | |||
725 | { | 725 | { |
726 | struct ipv6_txoptions opt_space; | 726 | struct ipv6_txoptions opt_space; |
727 | struct sockaddr_in6 * sin6 = (struct sockaddr_in6 *) msg->msg_name; | 727 | struct sockaddr_in6 * sin6 = (struct sockaddr_in6 *) msg->msg_name; |
728 | struct in6_addr *daddr, *final_p = NULL, final; | 728 | struct in6_addr *daddr, *final_p, final; |
729 | struct inet_sock *inet = inet_sk(sk); | 729 | struct inet_sock *inet = inet_sk(sk); |
730 | struct ipv6_pinfo *np = inet6_sk(sk); | 730 | struct ipv6_pinfo *np = inet6_sk(sk); |
731 | struct raw6_sock *rp = raw6_sk(sk); | 731 | struct raw6_sock *rp = raw6_sk(sk); |
@@ -847,13 +847,7 @@ static int rawv6_sendmsg(struct kiocb *iocb, struct sock *sk, | |||
847 | if (ipv6_addr_any(&fl.fl6_src) && !ipv6_addr_any(&np->saddr)) | 847 | if (ipv6_addr_any(&fl.fl6_src) && !ipv6_addr_any(&np->saddr)) |
848 | ipv6_addr_copy(&fl.fl6_src, &np->saddr); | 848 | ipv6_addr_copy(&fl.fl6_src, &np->saddr); |
849 | 849 | ||
850 | /* merge ip6_build_xmit from ip6_output */ | 850 | final_p = fl6_update_dst(&fl, opt, &final); |
851 | if (opt && opt->srcrt) { | ||
852 | struct rt0_hdr *rt0 = (struct rt0_hdr *) opt->srcrt; | ||
853 | ipv6_addr_copy(&final, &fl.fl6_dst); | ||
854 | ipv6_addr_copy(&fl.fl6_dst, rt0->addr); | ||
855 | final_p = &final; | ||
856 | } | ||
857 | 851 | ||
858 | if (!fl.oif && ipv6_addr_is_multicast(&fl.fl6_dst)) | 852 | if (!fl.oif && ipv6_addr_is_multicast(&fl.fl6_dst)) |
859 | fl.oif = np->mcast_oif; | 853 | fl.oif = np->mcast_oif; |
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c index e51e650ea80b..702c532ec21e 100644 --- a/net/ipv6/sit.c +++ b/net/ipv6/sit.c | |||
@@ -249,8 +249,6 @@ failed: | |||
249 | return NULL; | 249 | return NULL; |
250 | } | 250 | } |
251 | 251 | ||
252 | static DEFINE_SPINLOCK(ipip6_prl_lock); | ||
253 | |||
254 | #define for_each_prl_rcu(start) \ | 252 | #define for_each_prl_rcu(start) \ |
255 | for (prl = rcu_dereference(start); \ | 253 | for (prl = rcu_dereference(start); \ |
256 | prl; \ | 254 | prl; \ |
@@ -340,7 +338,7 @@ ipip6_tunnel_add_prl(struct ip_tunnel *t, struct ip_tunnel_prl *a, int chg) | |||
340 | if (a->addr == htonl(INADDR_ANY)) | 338 | if (a->addr == htonl(INADDR_ANY)) |
341 | return -EINVAL; | 339 | return -EINVAL; |
342 | 340 | ||
343 | spin_lock(&ipip6_prl_lock); | 341 | ASSERT_RTNL(); |
344 | 342 | ||
345 | for (p = t->prl; p; p = p->next) { | 343 | for (p = t->prl; p; p = p->next) { |
346 | if (p->addr == a->addr) { | 344 | if (p->addr == a->addr) { |
@@ -370,7 +368,6 @@ ipip6_tunnel_add_prl(struct ip_tunnel *t, struct ip_tunnel_prl *a, int chg) | |||
370 | t->prl_count++; | 368 | t->prl_count++; |
371 | rcu_assign_pointer(t->prl, p); | 369 | rcu_assign_pointer(t->prl, p); |
372 | out: | 370 | out: |
373 | spin_unlock(&ipip6_prl_lock); | ||
374 | return err; | 371 | return err; |
375 | } | 372 | } |
376 | 373 | ||
@@ -397,7 +394,7 @@ ipip6_tunnel_del_prl(struct ip_tunnel *t, struct ip_tunnel_prl *a) | |||
397 | struct ip_tunnel_prl_entry *x, **p; | 394 | struct ip_tunnel_prl_entry *x, **p; |
398 | int err = 0; | 395 | int err = 0; |
399 | 396 | ||
400 | spin_lock(&ipip6_prl_lock); | 397 | ASSERT_RTNL(); |
401 | 398 | ||
402 | if (a && a->addr != htonl(INADDR_ANY)) { | 399 | if (a && a->addr != htonl(INADDR_ANY)) { |
403 | for (p = &t->prl; *p; p = &(*p)->next) { | 400 | for (p = &t->prl; *p; p = &(*p)->next) { |
@@ -419,7 +416,6 @@ ipip6_tunnel_del_prl(struct ip_tunnel *t, struct ip_tunnel_prl *a) | |||
419 | } | 416 | } |
420 | } | 417 | } |
421 | out: | 418 | out: |
422 | spin_unlock(&ipip6_prl_lock); | ||
423 | return err; | 419 | return err; |
424 | } | 420 | } |
425 | 421 | ||
diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c index 34d1f0690d7e..70d330f8c990 100644 --- a/net/ipv6/syncookies.c +++ b/net/ipv6/syncookies.c | |||
@@ -27,28 +27,17 @@ extern __u32 syncookie_secret[2][16-4+SHA_DIGEST_WORDS]; | |||
27 | #define COOKIEBITS 24 /* Upper bits store count */ | 27 | #define COOKIEBITS 24 /* Upper bits store count */ |
28 | #define COOKIEMASK (((__u32)1 << COOKIEBITS) - 1) | 28 | #define COOKIEMASK (((__u32)1 << COOKIEBITS) - 1) |
29 | 29 | ||
30 | /* | 30 | /* Table must be sorted. */ |
31 | * This table has to be sorted and terminated with (__u16)-1. | ||
32 | * XXX generate a better table. | ||
33 | * Unresolved Issues: HIPPI with a 64k MSS is not well supported. | ||
34 | * | ||
35 | * Taken directly from ipv4 implementation. | ||
36 | * Should this list be modified for ipv6 use or is it close enough? | ||
37 | * rfc 2460 8.3 suggests mss values 20 bytes less than ipv4 counterpart | ||
38 | */ | ||
39 | static __u16 const msstab[] = { | 31 | static __u16 const msstab[] = { |
40 | 64 - 1, | 32 | 64, |
41 | 256 - 1, | 33 | 512, |
42 | 512 - 1, | 34 | 536, |
43 | 536 - 1, | 35 | 1280 - 60, |
44 | 1024 - 1, | 36 | 1480 - 60, |
45 | 1440 - 1, | 37 | 1500 - 60, |
46 | 1460 - 1, | 38 | 4460 - 60, |
47 | 4312 - 1, | 39 | 9000 - 60, |
48 | (__u16)-1 | ||
49 | }; | 40 | }; |
50 | /* The number doesn't include the -1 terminator */ | ||
51 | #define NUM_MSS (ARRAY_SIZE(msstab) - 1) | ||
52 | 41 | ||
53 | /* | 42 | /* |
54 | * This (misnamed) value is the age of syncookie which is permitted. | 43 | * This (misnamed) value is the age of syncookie which is permitted. |
@@ -134,9 +123,11 @@ __u32 cookie_v6_init_sequence(struct sock *sk, struct sk_buff *skb, __u16 *mssp) | |||
134 | 123 | ||
135 | tcp_synq_overflow(sk); | 124 | tcp_synq_overflow(sk); |
136 | 125 | ||
137 | for (mssind = 0; mss > msstab[mssind + 1]; mssind++) | 126 | for (mssind = ARRAY_SIZE(msstab) - 1; mssind ; mssind--) |
138 | ; | 127 | if (mss >= msstab[mssind]) |
139 | *mssp = msstab[mssind] + 1; | 128 | break; |
129 | |||
130 | *mssp = msstab[mssind]; | ||
140 | 131 | ||
141 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SYNCOOKIESSENT); | 132 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SYNCOOKIESSENT); |
142 | 133 | ||
@@ -154,7 +145,7 @@ static inline int cookie_check(struct sk_buff *skb, __u32 cookie) | |||
154 | th->source, th->dest, seq, | 145 | th->source, th->dest, seq, |
155 | jiffies / (HZ * 60), COUNTER_TRIES); | 146 | jiffies / (HZ * 60), COUNTER_TRIES); |
156 | 147 | ||
157 | return mssind < NUM_MSS ? msstab[mssind] + 1 : 0; | 148 | return mssind < ARRAY_SIZE(msstab) ? msstab[mssind] : 0; |
158 | } | 149 | } |
159 | 150 | ||
160 | struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb) | 151 | struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb) |
@@ -174,7 +165,7 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb) | |||
174 | struct dst_entry *dst; | 165 | struct dst_entry *dst; |
175 | __u8 rcv_wscale; | 166 | __u8 rcv_wscale; |
176 | 167 | ||
177 | if (!sysctl_tcp_syncookies || !th->ack) | 168 | if (!sysctl_tcp_syncookies || !th->ack || th->rst) |
178 | goto out; | 169 | goto out; |
179 | 170 | ||
180 | if (tcp_synq_no_recent_overflow(sk) || | 171 | if (tcp_synq_no_recent_overflow(sk) || |
@@ -240,17 +231,12 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb) | |||
240 | * me if there is a preferred way. | 231 | * me if there is a preferred way. |
241 | */ | 232 | */ |
242 | { | 233 | { |
243 | struct in6_addr *final_p = NULL, final; | 234 | struct in6_addr *final_p, final; |
244 | struct flowi fl; | 235 | struct flowi fl; |
245 | memset(&fl, 0, sizeof(fl)); | 236 | memset(&fl, 0, sizeof(fl)); |
246 | fl.proto = IPPROTO_TCP; | 237 | fl.proto = IPPROTO_TCP; |
247 | ipv6_addr_copy(&fl.fl6_dst, &ireq6->rmt_addr); | 238 | ipv6_addr_copy(&fl.fl6_dst, &ireq6->rmt_addr); |
248 | if (np->opt && np->opt->srcrt) { | 239 | final_p = fl6_update_dst(&fl, np->opt, &final); |
249 | struct rt0_hdr *rt0 = (struct rt0_hdr *) np->opt->srcrt; | ||
250 | ipv6_addr_copy(&final, &fl.fl6_dst); | ||
251 | ipv6_addr_copy(&fl.fl6_dst, rt0->addr); | ||
252 | final_p = &final; | ||
253 | } | ||
254 | ipv6_addr_copy(&fl.fl6_src, &ireq6->loc_addr); | 240 | ipv6_addr_copy(&fl.fl6_src, &ireq6->loc_addr); |
255 | fl.oif = sk->sk_bound_dev_if; | 241 | fl.oif = sk->sk_bound_dev_if; |
256 | fl.mark = sk->sk_mark; | 242 | fl.mark = sk->sk_mark; |
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 2b7c3a100e2c..5887141ad641 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c | |||
@@ -129,7 +129,7 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr, | |||
129 | struct inet_connection_sock *icsk = inet_csk(sk); | 129 | struct inet_connection_sock *icsk = inet_csk(sk); |
130 | struct ipv6_pinfo *np = inet6_sk(sk); | 130 | struct ipv6_pinfo *np = inet6_sk(sk); |
131 | struct tcp_sock *tp = tcp_sk(sk); | 131 | struct tcp_sock *tp = tcp_sk(sk); |
132 | struct in6_addr *saddr = NULL, *final_p = NULL, final; | 132 | struct in6_addr *saddr = NULL, *final_p, final; |
133 | struct flowi fl; | 133 | struct flowi fl; |
134 | struct dst_entry *dst; | 134 | struct dst_entry *dst; |
135 | int addr_type; | 135 | int addr_type; |
@@ -250,12 +250,7 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr, | |||
250 | fl.fl_ip_dport = usin->sin6_port; | 250 | fl.fl_ip_dport = usin->sin6_port; |
251 | fl.fl_ip_sport = inet->inet_sport; | 251 | fl.fl_ip_sport = inet->inet_sport; |
252 | 252 | ||
253 | if (np->opt && np->opt->srcrt) { | 253 | final_p = fl6_update_dst(&fl, np->opt, &final); |
254 | struct rt0_hdr *rt0 = (struct rt0_hdr *)np->opt->srcrt; | ||
255 | ipv6_addr_copy(&final, &fl.fl6_dst); | ||
256 | ipv6_addr_copy(&fl.fl6_dst, rt0->addr); | ||
257 | final_p = &final; | ||
258 | } | ||
259 | 254 | ||
260 | security_sk_classify_flow(sk, &fl); | 255 | security_sk_classify_flow(sk, &fl); |
261 | 256 | ||
@@ -477,7 +472,7 @@ static int tcp_v6_send_synack(struct sock *sk, struct request_sock *req, | |||
477 | struct ipv6_pinfo *np = inet6_sk(sk); | 472 | struct ipv6_pinfo *np = inet6_sk(sk); |
478 | struct sk_buff * skb; | 473 | struct sk_buff * skb; |
479 | struct ipv6_txoptions *opt = NULL; | 474 | struct ipv6_txoptions *opt = NULL; |
480 | struct in6_addr * final_p = NULL, final; | 475 | struct in6_addr * final_p, final; |
481 | struct flowi fl; | 476 | struct flowi fl; |
482 | struct dst_entry *dst; | 477 | struct dst_entry *dst; |
483 | int err = -1; | 478 | int err = -1; |
@@ -494,12 +489,7 @@ static int tcp_v6_send_synack(struct sock *sk, struct request_sock *req, | |||
494 | security_req_classify_flow(req, &fl); | 489 | security_req_classify_flow(req, &fl); |
495 | 490 | ||
496 | opt = np->opt; | 491 | opt = np->opt; |
497 | if (opt && opt->srcrt) { | 492 | final_p = fl6_update_dst(&fl, opt, &final); |
498 | struct rt0_hdr *rt0 = (struct rt0_hdr *) opt->srcrt; | ||
499 | ipv6_addr_copy(&final, &fl.fl6_dst); | ||
500 | ipv6_addr_copy(&fl.fl6_dst, rt0->addr); | ||
501 | final_p = &final; | ||
502 | } | ||
503 | 493 | ||
504 | err = ip6_dst_lookup(sk, &dst, &fl); | 494 | err = ip6_dst_lookup(sk, &dst, &fl); |
505 | if (err) | 495 | if (err) |
@@ -1167,7 +1157,7 @@ static struct sock *tcp_v6_hnd_req(struct sock *sk,struct sk_buff *skb) | |||
1167 | } | 1157 | } |
1168 | 1158 | ||
1169 | #ifdef CONFIG_SYN_COOKIES | 1159 | #ifdef CONFIG_SYN_COOKIES |
1170 | if (!th->rst && !th->syn && th->ack) | 1160 | if (!th->syn) |
1171 | sk = cookie_v6_check(sk, skb); | 1161 | sk = cookie_v6_check(sk, skb); |
1172 | #endif | 1162 | #endif |
1173 | return sk; | 1163 | return sk; |
@@ -1392,18 +1382,13 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb, | |||
1392 | goto out_overflow; | 1382 | goto out_overflow; |
1393 | 1383 | ||
1394 | if (dst == NULL) { | 1384 | if (dst == NULL) { |
1395 | struct in6_addr *final_p = NULL, final; | 1385 | struct in6_addr *final_p, final; |
1396 | struct flowi fl; | 1386 | struct flowi fl; |
1397 | 1387 | ||
1398 | memset(&fl, 0, sizeof(fl)); | 1388 | memset(&fl, 0, sizeof(fl)); |
1399 | fl.proto = IPPROTO_TCP; | 1389 | fl.proto = IPPROTO_TCP; |
1400 | ipv6_addr_copy(&fl.fl6_dst, &treq->rmt_addr); | 1390 | ipv6_addr_copy(&fl.fl6_dst, &treq->rmt_addr); |
1401 | if (opt && opt->srcrt) { | 1391 | final_p = fl6_update_dst(&fl, opt, &final); |
1402 | struct rt0_hdr *rt0 = (struct rt0_hdr *) opt->srcrt; | ||
1403 | ipv6_addr_copy(&final, &fl.fl6_dst); | ||
1404 | ipv6_addr_copy(&fl.fl6_dst, rt0->addr); | ||
1405 | final_p = &final; | ||
1406 | } | ||
1407 | ipv6_addr_copy(&fl.fl6_src, &treq->loc_addr); | 1392 | ipv6_addr_copy(&fl.fl6_src, &treq->loc_addr); |
1408 | fl.oif = sk->sk_bound_dev_if; | 1393 | fl.oif = sk->sk_bound_dev_if; |
1409 | fl.mark = sk->sk_mark; | 1394 | fl.mark = sk->sk_mark; |
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index 87be58673b55..1dd1affdead2 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c | |||
@@ -927,7 +927,7 @@ int udpv6_sendmsg(struct kiocb *iocb, struct sock *sk, | |||
927 | struct inet_sock *inet = inet_sk(sk); | 927 | struct inet_sock *inet = inet_sk(sk); |
928 | struct ipv6_pinfo *np = inet6_sk(sk); | 928 | struct ipv6_pinfo *np = inet6_sk(sk); |
929 | struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *) msg->msg_name; | 929 | struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *) msg->msg_name; |
930 | struct in6_addr *daddr, *final_p = NULL, final; | 930 | struct in6_addr *daddr, *final_p, final; |
931 | struct ipv6_txoptions *opt = NULL; | 931 | struct ipv6_txoptions *opt = NULL; |
932 | struct ip6_flowlabel *flowlabel = NULL; | 932 | struct ip6_flowlabel *flowlabel = NULL; |
933 | struct flowi fl; | 933 | struct flowi fl; |
@@ -1097,14 +1097,9 @@ do_udp_sendmsg: | |||
1097 | ipv6_addr_copy(&fl.fl6_src, &np->saddr); | 1097 | ipv6_addr_copy(&fl.fl6_src, &np->saddr); |
1098 | fl.fl_ip_sport = inet->inet_sport; | 1098 | fl.fl_ip_sport = inet->inet_sport; |
1099 | 1099 | ||
1100 | /* merge ip6_build_xmit from ip6_output */ | 1100 | final_p = fl6_update_dst(&fl, opt, &final); |
1101 | if (opt && opt->srcrt) { | 1101 | if (final_p) |
1102 | struct rt0_hdr *rt0 = (struct rt0_hdr *) opt->srcrt; | ||
1103 | ipv6_addr_copy(&final, &fl.fl6_dst); | ||
1104 | ipv6_addr_copy(&fl.fl6_dst, rt0->addr); | ||
1105 | final_p = &final; | ||
1106 | connected = 0; | 1102 | connected = 0; |
1107 | } | ||
1108 | 1103 | ||
1109 | if (!fl.oif && ipv6_addr_is_multicast(&fl.fl6_dst)) { | 1104 | if (!fl.oif && ipv6_addr_is_multicast(&fl.fl6_dst)) { |
1110 | fl.oif = np->mcast_oif; | 1105 | fl.oif = np->mcast_oif; |
diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c index f28ad2cc8428..499c045d6910 100644 --- a/net/iucv/iucv.c +++ b/net/iucv/iucv.c | |||
@@ -1463,7 +1463,7 @@ struct iucv_path_pending { | |||
1463 | u32 res3; | 1463 | u32 res3; |
1464 | u8 ippollfg; | 1464 | u8 ippollfg; |
1465 | u8 res4[3]; | 1465 | u8 res4[3]; |
1466 | } __attribute__ ((packed)); | 1466 | } __packed; |
1467 | 1467 | ||
1468 | static void iucv_path_pending(struct iucv_irq_data *data) | 1468 | static void iucv_path_pending(struct iucv_irq_data *data) |
1469 | { | 1469 | { |
@@ -1524,7 +1524,7 @@ struct iucv_path_complete { | |||
1524 | u32 res3; | 1524 | u32 res3; |
1525 | u8 ippollfg; | 1525 | u8 ippollfg; |
1526 | u8 res4[3]; | 1526 | u8 res4[3]; |
1527 | } __attribute__ ((packed)); | 1527 | } __packed; |
1528 | 1528 | ||
1529 | static void iucv_path_complete(struct iucv_irq_data *data) | 1529 | static void iucv_path_complete(struct iucv_irq_data *data) |
1530 | { | 1530 | { |
@@ -1554,7 +1554,7 @@ struct iucv_path_severed { | |||
1554 | u32 res4; | 1554 | u32 res4; |
1555 | u8 ippollfg; | 1555 | u8 ippollfg; |
1556 | u8 res5[3]; | 1556 | u8 res5[3]; |
1557 | } __attribute__ ((packed)); | 1557 | } __packed; |
1558 | 1558 | ||
1559 | static void iucv_path_severed(struct iucv_irq_data *data) | 1559 | static void iucv_path_severed(struct iucv_irq_data *data) |
1560 | { | 1560 | { |
@@ -1590,7 +1590,7 @@ struct iucv_path_quiesced { | |||
1590 | u32 res4; | 1590 | u32 res4; |
1591 | u8 ippollfg; | 1591 | u8 ippollfg; |
1592 | u8 res5[3]; | 1592 | u8 res5[3]; |
1593 | } __attribute__ ((packed)); | 1593 | } __packed; |
1594 | 1594 | ||
1595 | static void iucv_path_quiesced(struct iucv_irq_data *data) | 1595 | static void iucv_path_quiesced(struct iucv_irq_data *data) |
1596 | { | 1596 | { |
@@ -1618,7 +1618,7 @@ struct iucv_path_resumed { | |||
1618 | u32 res4; | 1618 | u32 res4; |
1619 | u8 ippollfg; | 1619 | u8 ippollfg; |
1620 | u8 res5[3]; | 1620 | u8 res5[3]; |
1621 | } __attribute__ ((packed)); | 1621 | } __packed; |
1622 | 1622 | ||
1623 | static void iucv_path_resumed(struct iucv_irq_data *data) | 1623 | static void iucv_path_resumed(struct iucv_irq_data *data) |
1624 | { | 1624 | { |
@@ -1649,7 +1649,7 @@ struct iucv_message_complete { | |||
1649 | u32 ipbfln2f; | 1649 | u32 ipbfln2f; |
1650 | u8 ippollfg; | 1650 | u8 ippollfg; |
1651 | u8 res2[3]; | 1651 | u8 res2[3]; |
1652 | } __attribute__ ((packed)); | 1652 | } __packed; |
1653 | 1653 | ||
1654 | static void iucv_message_complete(struct iucv_irq_data *data) | 1654 | static void iucv_message_complete(struct iucv_irq_data *data) |
1655 | { | 1655 | { |
@@ -1694,7 +1694,7 @@ struct iucv_message_pending { | |||
1694 | u32 ipbfln2f; | 1694 | u32 ipbfln2f; |
1695 | u8 ippollfg; | 1695 | u8 ippollfg; |
1696 | u8 res2[3]; | 1696 | u8 res2[3]; |
1697 | } __attribute__ ((packed)); | 1697 | } __packed; |
1698 | 1698 | ||
1699 | static void iucv_message_pending(struct iucv_irq_data *data) | 1699 | static void iucv_message_pending(struct iucv_irq_data *data) |
1700 | { | 1700 | { |
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index c7000a6ca379..a2ed0f7b5568 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c | |||
@@ -600,7 +600,7 @@ struct iapp_layer2_update { | |||
600 | u8 ssap; /* 0 */ | 600 | u8 ssap; /* 0 */ |
601 | u8 control; | 601 | u8 control; |
602 | u8 xid_info[3]; | 602 | u8 xid_info[3]; |
603 | } __attribute__ ((packed)); | 603 | } __packed; |
604 | 604 | ||
605 | static void ieee80211_send_layer2_update(struct sta_info *sta) | 605 | static void ieee80211_send_layer2_update(struct sta_info *sta) |
606 | { | 606 | { |
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index 1a9e2da37a93..ec3e5c3e27bd 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h | |||
@@ -1084,7 +1084,7 @@ struct ieee80211_tx_status_rtap_hdr { | |||
1084 | u8 padding_for_rate; | 1084 | u8 padding_for_rate; |
1085 | __le16 tx_flags; | 1085 | __le16 tx_flags; |
1086 | u8 data_retries; | 1086 | u8 data_retries; |
1087 | } __attribute__ ((packed)); | 1087 | } __packed; |
1088 | 1088 | ||
1089 | 1089 | ||
1090 | /* HT */ | 1090 | /* HT */ |
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index 5e0b65406c44..dd232061e4c4 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c | |||
@@ -2148,7 +2148,7 @@ static void ieee80211_rx_cooked_monitor(struct ieee80211_rx_data *rx, | |||
2148 | u8 rate_or_pad; | 2148 | u8 rate_or_pad; |
2149 | __le16 chan_freq; | 2149 | __le16 chan_freq; |
2150 | __le16 chan_flags; | 2150 | __le16 chan_flags; |
2151 | } __attribute__ ((packed)) *rthdr; | 2151 | } __packed *rthdr; |
2152 | struct sk_buff *skb = rx->skb, *skb2; | 2152 | struct sk_buff *skb = rx->skb, *skb2; |
2153 | struct net_device *prev_dev = NULL; | 2153 | struct net_device *prev_dev = NULL; |
2154 | struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); | 2154 | struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); |
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index eeeb8bc73982..77288980fae0 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c | |||
@@ -619,9 +619,7 @@ struct nf_conn *nf_conntrack_alloc(struct net *net, u16 zone, | |||
619 | ct->tuplehash[IP_CT_DIR_REPLY].hnnode.pprev = NULL; | 619 | ct->tuplehash[IP_CT_DIR_REPLY].hnnode.pprev = NULL; |
620 | /* Don't set timer yet: wait for confirmation */ | 620 | /* Don't set timer yet: wait for confirmation */ |
621 | setup_timer(&ct->timeout, death_by_timeout, (unsigned long)ct); | 621 | setup_timer(&ct->timeout, death_by_timeout, (unsigned long)ct); |
622 | #ifdef CONFIG_NET_NS | 622 | write_pnet(&ct->ct_net, net); |
623 | ct->ct_net = net; | ||
624 | #endif | ||
625 | #ifdef CONFIG_NF_CONNTRACK_ZONES | 623 | #ifdef CONFIG_NF_CONNTRACK_ZONES |
626 | if (zone) { | 624 | if (zone) { |
627 | struct nf_conntrack_zone *nf_ct_zone; | 625 | struct nf_conntrack_zone *nf_ct_zone; |
@@ -1363,9 +1361,7 @@ static int nf_conntrack_init_init_net(void) | |||
1363 | goto err_extend; | 1361 | goto err_extend; |
1364 | #endif | 1362 | #endif |
1365 | /* Set up fake conntrack: to never be deleted, not in any hashes */ | 1363 | /* Set up fake conntrack: to never be deleted, not in any hashes */ |
1366 | #ifdef CONFIG_NET_NS | 1364 | write_pnet(&nf_conntrack_untracked.ct_net, &init_net); |
1367 | nf_conntrack_untracked.ct_net = &init_net; | ||
1368 | #endif | ||
1369 | atomic_set(&nf_conntrack_untracked.ct_general.use, 1); | 1365 | atomic_set(&nf_conntrack_untracked.ct_general.use, 1); |
1370 | /* - and look it like as a confirmed connection */ | 1366 | /* - and look it like as a confirmed connection */ |
1371 | set_bit(IPS_CONFIRMED_BIT, &nf_conntrack_untracked.status); | 1367 | set_bit(IPS_CONFIRMED_BIT, &nf_conntrack_untracked.status); |
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index 2078a277e06b..9a17f28b1253 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c | |||
@@ -83,6 +83,7 @@ | |||
83 | #include <linux/if_vlan.h> | 83 | #include <linux/if_vlan.h> |
84 | #include <linux/virtio_net.h> | 84 | #include <linux/virtio_net.h> |
85 | #include <linux/errqueue.h> | 85 | #include <linux/errqueue.h> |
86 | #include <linux/net_tstamp.h> | ||
86 | 87 | ||
87 | #ifdef CONFIG_INET | 88 | #ifdef CONFIG_INET |
88 | #include <net/inet_common.h> | 89 | #include <net/inet_common.h> |
@@ -202,6 +203,7 @@ struct packet_sock { | |||
202 | unsigned int tp_hdrlen; | 203 | unsigned int tp_hdrlen; |
203 | unsigned int tp_reserve; | 204 | unsigned int tp_reserve; |
204 | unsigned int tp_loss:1; | 205 | unsigned int tp_loss:1; |
206 | unsigned int tp_tstamp; | ||
205 | struct packet_type prot_hook ____cacheline_aligned_in_smp; | 207 | struct packet_type prot_hook ____cacheline_aligned_in_smp; |
206 | }; | 208 | }; |
207 | 209 | ||
@@ -656,6 +658,7 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, | |||
656 | struct sk_buff *copy_skb = NULL; | 658 | struct sk_buff *copy_skb = NULL; |
657 | struct timeval tv; | 659 | struct timeval tv; |
658 | struct timespec ts; | 660 | struct timespec ts; |
661 | struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb); | ||
659 | 662 | ||
660 | if (skb->pkt_type == PACKET_LOOPBACK) | 663 | if (skb->pkt_type == PACKET_LOOPBACK) |
661 | goto drop; | 664 | goto drop; |
@@ -737,7 +740,13 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, | |||
737 | h.h1->tp_snaplen = snaplen; | 740 | h.h1->tp_snaplen = snaplen; |
738 | h.h1->tp_mac = macoff; | 741 | h.h1->tp_mac = macoff; |
739 | h.h1->tp_net = netoff; | 742 | h.h1->tp_net = netoff; |
740 | if (skb->tstamp.tv64) | 743 | if ((po->tp_tstamp & SOF_TIMESTAMPING_SYS_HARDWARE) |
744 | && shhwtstamps->syststamp.tv64) | ||
745 | tv = ktime_to_timeval(shhwtstamps->syststamp); | ||
746 | else if ((po->tp_tstamp & SOF_TIMESTAMPING_RAW_HARDWARE) | ||
747 | && shhwtstamps->hwtstamp.tv64) | ||
748 | tv = ktime_to_timeval(shhwtstamps->hwtstamp); | ||
749 | else if (skb->tstamp.tv64) | ||
741 | tv = ktime_to_timeval(skb->tstamp); | 750 | tv = ktime_to_timeval(skb->tstamp); |
742 | else | 751 | else |
743 | do_gettimeofday(&tv); | 752 | do_gettimeofday(&tv); |
@@ -750,7 +759,13 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, | |||
750 | h.h2->tp_snaplen = snaplen; | 759 | h.h2->tp_snaplen = snaplen; |
751 | h.h2->tp_mac = macoff; | 760 | h.h2->tp_mac = macoff; |
752 | h.h2->tp_net = netoff; | 761 | h.h2->tp_net = netoff; |
753 | if (skb->tstamp.tv64) | 762 | if ((po->tp_tstamp & SOF_TIMESTAMPING_SYS_HARDWARE) |
763 | && shhwtstamps->syststamp.tv64) | ||
764 | ts = ktime_to_timespec(shhwtstamps->syststamp); | ||
765 | else if ((po->tp_tstamp & SOF_TIMESTAMPING_RAW_HARDWARE) | ||
766 | && shhwtstamps->hwtstamp.tv64) | ||
767 | ts = ktime_to_timespec(shhwtstamps->hwtstamp); | ||
768 | else if (skb->tstamp.tv64) | ||
754 | ts = ktime_to_timespec(skb->tstamp); | 769 | ts = ktime_to_timespec(skb->tstamp); |
755 | else | 770 | else |
756 | getnstimeofday(&ts); | 771 | getnstimeofday(&ts); |
@@ -2027,6 +2042,18 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv | |||
2027 | po->has_vnet_hdr = !!val; | 2042 | po->has_vnet_hdr = !!val; |
2028 | return 0; | 2043 | return 0; |
2029 | } | 2044 | } |
2045 | case PACKET_TIMESTAMP: | ||
2046 | { | ||
2047 | int val; | ||
2048 | |||
2049 | if (optlen != sizeof(val)) | ||
2050 | return -EINVAL; | ||
2051 | if (copy_from_user(&val, optval, sizeof(val))) | ||
2052 | return -EFAULT; | ||
2053 | |||
2054 | po->tp_tstamp = val; | ||
2055 | return 0; | ||
2056 | } | ||
2030 | default: | 2057 | default: |
2031 | return -ENOPROTOOPT; | 2058 | return -ENOPROTOOPT; |
2032 | } | 2059 | } |
@@ -2119,6 +2146,12 @@ static int packet_getsockopt(struct socket *sock, int level, int optname, | |||
2119 | val = po->tp_loss; | 2146 | val = po->tp_loss; |
2120 | data = &val; | 2147 | data = &val; |
2121 | break; | 2148 | break; |
2149 | case PACKET_TIMESTAMP: | ||
2150 | if (len > sizeof(int)) | ||
2151 | len = sizeof(int); | ||
2152 | val = po->tp_tstamp; | ||
2153 | data = &val; | ||
2154 | break; | ||
2122 | default: | 2155 | default: |
2123 | return -ENOPROTOOPT; | 2156 | return -ENOPROTOOPT; |
2124 | } | 2157 | } |
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index a63029ef3edd..d20fcd2a5519 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c | |||
@@ -205,7 +205,7 @@ void __qdisc_run(struct Qdisc *q) | |||
205 | } | 205 | } |
206 | } | 206 | } |
207 | 207 | ||
208 | clear_bit(__QDISC_STATE_RUNNING, &q->state); | 208 | qdisc_run_end(q); |
209 | } | 209 | } |
210 | 210 | ||
211 | unsigned long dev_trans_start(struct net_device *dev) | 211 | unsigned long dev_trans_start(struct net_device *dev) |
@@ -327,6 +327,24 @@ void netif_carrier_off(struct net_device *dev) | |||
327 | } | 327 | } |
328 | EXPORT_SYMBOL(netif_carrier_off); | 328 | EXPORT_SYMBOL(netif_carrier_off); |
329 | 329 | ||
330 | /** | ||
331 | * netif_notify_peers - notify network peers about existence of @dev | ||
332 | * @dev: network device | ||
333 | * | ||
334 | * Generate traffic such that interested network peers are aware of | ||
335 | * @dev, such as by generating a gratuitous ARP. This may be used when | ||
336 | * a device wants to inform the rest of the network about some sort of | ||
337 | * reconfiguration such as a failover event or virtual machine | ||
338 | * migration. | ||
339 | */ | ||
340 | void netif_notify_peers(struct net_device *dev) | ||
341 | { | ||
342 | rtnl_lock(); | ||
343 | call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, dev); | ||
344 | rtnl_unlock(); | ||
345 | } | ||
346 | EXPORT_SYMBOL(netif_notify_peers); | ||
347 | |||
330 | /* "NOOP" scheduler: the best scheduler, recommended for all interfaces | 348 | /* "NOOP" scheduler: the best scheduler, recommended for all interfaces |
331 | under all circumstances. It is difficult to invent anything faster or | 349 | under all circumstances. It is difficult to invent anything faster or |
332 | cheaper. | 350 | cheaper. |
@@ -543,6 +561,7 @@ struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue, | |||
543 | 561 | ||
544 | INIT_LIST_HEAD(&sch->list); | 562 | INIT_LIST_HEAD(&sch->list); |
545 | skb_queue_head_init(&sch->q); | 563 | skb_queue_head_init(&sch->q); |
564 | spin_lock_init(&sch->busylock); | ||
546 | sch->ops = ops; | 565 | sch->ops = ops; |
547 | sch->enqueue = ops->enqueue; | 566 | sch->enqueue = ops->enqueue; |
548 | sch->dequeue = ops->dequeue; | 567 | sch->dequeue = ops->dequeue; |
@@ -779,7 +798,7 @@ static bool some_qdisc_is_busy(struct net_device *dev) | |||
779 | 798 | ||
780 | spin_lock_bh(root_lock); | 799 | spin_lock_bh(root_lock); |
781 | 800 | ||
782 | val = (test_bit(__QDISC_STATE_RUNNING, &q->state) || | 801 | val = (qdisc_is_running(q) || |
783 | test_bit(__QDISC_STATE_SCHED, &q->state)); | 802 | test_bit(__QDISC_STATE_SCHED, &q->state)); |
784 | 803 | ||
785 | spin_unlock_bh(root_lock); | 804 | spin_unlock_bh(root_lock); |
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c index bd2a50b482ac..246f92924658 100644 --- a/net/sctp/sm_make_chunk.c +++ b/net/sctp/sm_make_chunk.c | |||
@@ -1817,7 +1817,7 @@ malformed: | |||
1817 | struct __sctp_missing { | 1817 | struct __sctp_missing { |
1818 | __be32 num_missing; | 1818 | __be32 num_missing; |
1819 | __be16 type; | 1819 | __be16 type; |
1820 | } __attribute__((packed)); | 1820 | } __packed; |
1821 | 1821 | ||
1822 | /* | 1822 | /* |
1823 | * Report a missing mandatory parameter. | 1823 | * Report a missing mandatory parameter. |
diff --git a/net/socket.c b/net/socket.c index 367d5477d00f..acfa1738663d 100644 --- a/net/socket.c +++ b/net/socket.c | |||
@@ -124,7 +124,7 @@ static int sock_fasync(int fd, struct file *filp, int on); | |||
124 | static ssize_t sock_sendpage(struct file *file, struct page *page, | 124 | static ssize_t sock_sendpage(struct file *file, struct page *page, |
125 | int offset, size_t size, loff_t *ppos, int more); | 125 | int offset, size_t size, loff_t *ppos, int more); |
126 | static ssize_t sock_splice_read(struct file *file, loff_t *ppos, | 126 | static ssize_t sock_splice_read(struct file *file, loff_t *ppos, |
127 | struct pipe_inode_info *pipe, size_t len, | 127 | struct pipe_inode_info *pipe, size_t len, |
128 | unsigned int flags); | 128 | unsigned int flags); |
129 | 129 | ||
130 | /* | 130 | /* |
@@ -162,7 +162,7 @@ static const struct net_proto_family *net_families[NPROTO] __read_mostly; | |||
162 | * Statistics counters of the socket lists | 162 | * Statistics counters of the socket lists |
163 | */ | 163 | */ |
164 | 164 | ||
165 | static DEFINE_PER_CPU(int, sockets_in_use) = 0; | 165 | static DEFINE_PER_CPU(int, sockets_in_use); |
166 | 166 | ||
167 | /* | 167 | /* |
168 | * Support routines. | 168 | * Support routines. |
@@ -309,9 +309,9 @@ static int init_inodecache(void) | |||
309 | } | 309 | } |
310 | 310 | ||
311 | static const struct super_operations sockfs_ops = { | 311 | static const struct super_operations sockfs_ops = { |
312 | .alloc_inode = sock_alloc_inode, | 312 | .alloc_inode = sock_alloc_inode, |
313 | .destroy_inode =sock_destroy_inode, | 313 | .destroy_inode = sock_destroy_inode, |
314 | .statfs = simple_statfs, | 314 | .statfs = simple_statfs, |
315 | }; | 315 | }; |
316 | 316 | ||
317 | static int sockfs_get_sb(struct file_system_type *fs_type, | 317 | static int sockfs_get_sb(struct file_system_type *fs_type, |
@@ -411,6 +411,7 @@ int sock_map_fd(struct socket *sock, int flags) | |||
411 | 411 | ||
412 | return fd; | 412 | return fd; |
413 | } | 413 | } |
414 | EXPORT_SYMBOL(sock_map_fd); | ||
414 | 415 | ||
415 | static struct socket *sock_from_file(struct file *file, int *err) | 416 | static struct socket *sock_from_file(struct file *file, int *err) |
416 | { | 417 | { |
@@ -422,7 +423,7 @@ static struct socket *sock_from_file(struct file *file, int *err) | |||
422 | } | 423 | } |
423 | 424 | ||
424 | /** | 425 | /** |
425 | * sockfd_lookup - Go from a file number to its socket slot | 426 | * sockfd_lookup - Go from a file number to its socket slot |
426 | * @fd: file handle | 427 | * @fd: file handle |
427 | * @err: pointer to an error code return | 428 | * @err: pointer to an error code return |
428 | * | 429 | * |
@@ -450,6 +451,7 @@ struct socket *sockfd_lookup(int fd, int *err) | |||
450 | fput(file); | 451 | fput(file); |
451 | return sock; | 452 | return sock; |
452 | } | 453 | } |
454 | EXPORT_SYMBOL(sockfd_lookup); | ||
453 | 455 | ||
454 | static struct socket *sockfd_lookup_light(int fd, int *err, int *fput_needed) | 456 | static struct socket *sockfd_lookup_light(int fd, int *err, int *fput_needed) |
455 | { | 457 | { |
@@ -540,6 +542,7 @@ void sock_release(struct socket *sock) | |||
540 | } | 542 | } |
541 | sock->file = NULL; | 543 | sock->file = NULL; |
542 | } | 544 | } |
545 | EXPORT_SYMBOL(sock_release); | ||
543 | 546 | ||
544 | int sock_tx_timestamp(struct msghdr *msg, struct sock *sk, | 547 | int sock_tx_timestamp(struct msghdr *msg, struct sock *sk, |
545 | union skb_shared_tx *shtx) | 548 | union skb_shared_tx *shtx) |
@@ -586,6 +589,7 @@ int sock_sendmsg(struct socket *sock, struct msghdr *msg, size_t size) | |||
586 | ret = wait_on_sync_kiocb(&iocb); | 589 | ret = wait_on_sync_kiocb(&iocb); |
587 | return ret; | 590 | return ret; |
588 | } | 591 | } |
592 | EXPORT_SYMBOL(sock_sendmsg); | ||
589 | 593 | ||
590 | int kernel_sendmsg(struct socket *sock, struct msghdr *msg, | 594 | int kernel_sendmsg(struct socket *sock, struct msghdr *msg, |
591 | struct kvec *vec, size_t num, size_t size) | 595 | struct kvec *vec, size_t num, size_t size) |
@@ -604,6 +608,7 @@ int kernel_sendmsg(struct socket *sock, struct msghdr *msg, | |||
604 | set_fs(oldfs); | 608 | set_fs(oldfs); |
605 | return result; | 609 | return result; |
606 | } | 610 | } |
611 | EXPORT_SYMBOL(kernel_sendmsg); | ||
607 | 612 | ||
608 | static int ktime2ts(ktime_t kt, struct timespec *ts) | 613 | static int ktime2ts(ktime_t kt, struct timespec *ts) |
609 | { | 614 | { |
@@ -664,7 +669,6 @@ void __sock_recv_timestamp(struct msghdr *msg, struct sock *sk, | |||
664 | put_cmsg(msg, SOL_SOCKET, | 669 | put_cmsg(msg, SOL_SOCKET, |
665 | SCM_TIMESTAMPING, sizeof(ts), &ts); | 670 | SCM_TIMESTAMPING, sizeof(ts), &ts); |
666 | } | 671 | } |
667 | |||
668 | EXPORT_SYMBOL_GPL(__sock_recv_timestamp); | 672 | EXPORT_SYMBOL_GPL(__sock_recv_timestamp); |
669 | 673 | ||
670 | inline void sock_recv_drops(struct msghdr *msg, struct sock *sk, struct sk_buff *skb) | 674 | inline void sock_recv_drops(struct msghdr *msg, struct sock *sk, struct sk_buff *skb) |
@@ -720,6 +724,7 @@ int sock_recvmsg(struct socket *sock, struct msghdr *msg, | |||
720 | ret = wait_on_sync_kiocb(&iocb); | 724 | ret = wait_on_sync_kiocb(&iocb); |
721 | return ret; | 725 | return ret; |
722 | } | 726 | } |
727 | EXPORT_SYMBOL(sock_recvmsg); | ||
723 | 728 | ||
724 | static int sock_recvmsg_nosec(struct socket *sock, struct msghdr *msg, | 729 | static int sock_recvmsg_nosec(struct socket *sock, struct msghdr *msg, |
725 | size_t size, int flags) | 730 | size_t size, int flags) |
@@ -752,6 +757,7 @@ int kernel_recvmsg(struct socket *sock, struct msghdr *msg, | |||
752 | set_fs(oldfs); | 757 | set_fs(oldfs); |
753 | return result; | 758 | return result; |
754 | } | 759 | } |
760 | EXPORT_SYMBOL(kernel_recvmsg); | ||
755 | 761 | ||
756 | static void sock_aio_dtor(struct kiocb *iocb) | 762 | static void sock_aio_dtor(struct kiocb *iocb) |
757 | { | 763 | { |
@@ -774,7 +780,7 @@ static ssize_t sock_sendpage(struct file *file, struct page *page, | |||
774 | } | 780 | } |
775 | 781 | ||
776 | static ssize_t sock_splice_read(struct file *file, loff_t *ppos, | 782 | static ssize_t sock_splice_read(struct file *file, loff_t *ppos, |
777 | struct pipe_inode_info *pipe, size_t len, | 783 | struct pipe_inode_info *pipe, size_t len, |
778 | unsigned int flags) | 784 | unsigned int flags) |
779 | { | 785 | { |
780 | struct socket *sock = file->private_data; | 786 | struct socket *sock = file->private_data; |
@@ -887,7 +893,7 @@ static ssize_t sock_aio_write(struct kiocb *iocb, const struct iovec *iov, | |||
887 | */ | 893 | */ |
888 | 894 | ||
889 | static DEFINE_MUTEX(br_ioctl_mutex); | 895 | static DEFINE_MUTEX(br_ioctl_mutex); |
890 | static int (*br_ioctl_hook) (struct net *, unsigned int cmd, void __user *arg) = NULL; | 896 | static int (*br_ioctl_hook) (struct net *, unsigned int cmd, void __user *arg); |
891 | 897 | ||
892 | void brioctl_set(int (*hook) (struct net *, unsigned int, void __user *)) | 898 | void brioctl_set(int (*hook) (struct net *, unsigned int, void __user *)) |
893 | { | 899 | { |
@@ -895,7 +901,6 @@ void brioctl_set(int (*hook) (struct net *, unsigned int, void __user *)) | |||
895 | br_ioctl_hook = hook; | 901 | br_ioctl_hook = hook; |
896 | mutex_unlock(&br_ioctl_mutex); | 902 | mutex_unlock(&br_ioctl_mutex); |
897 | } | 903 | } |
898 | |||
899 | EXPORT_SYMBOL(brioctl_set); | 904 | EXPORT_SYMBOL(brioctl_set); |
900 | 905 | ||
901 | static DEFINE_MUTEX(vlan_ioctl_mutex); | 906 | static DEFINE_MUTEX(vlan_ioctl_mutex); |
@@ -907,7 +912,6 @@ void vlan_ioctl_set(int (*hook) (struct net *, void __user *)) | |||
907 | vlan_ioctl_hook = hook; | 912 | vlan_ioctl_hook = hook; |
908 | mutex_unlock(&vlan_ioctl_mutex); | 913 | mutex_unlock(&vlan_ioctl_mutex); |
909 | } | 914 | } |
910 | |||
911 | EXPORT_SYMBOL(vlan_ioctl_set); | 915 | EXPORT_SYMBOL(vlan_ioctl_set); |
912 | 916 | ||
913 | static DEFINE_MUTEX(dlci_ioctl_mutex); | 917 | static DEFINE_MUTEX(dlci_ioctl_mutex); |
@@ -919,7 +923,6 @@ void dlci_ioctl_set(int (*hook) (unsigned int, void __user *)) | |||
919 | dlci_ioctl_hook = hook; | 923 | dlci_ioctl_hook = hook; |
920 | mutex_unlock(&dlci_ioctl_mutex); | 924 | mutex_unlock(&dlci_ioctl_mutex); |
921 | } | 925 | } |
922 | |||
923 | EXPORT_SYMBOL(dlci_ioctl_set); | 926 | EXPORT_SYMBOL(dlci_ioctl_set); |
924 | 927 | ||
925 | static long sock_do_ioctl(struct net *net, struct socket *sock, | 928 | static long sock_do_ioctl(struct net *net, struct socket *sock, |
@@ -1047,6 +1050,7 @@ out_release: | |||
1047 | sock = NULL; | 1050 | sock = NULL; |
1048 | goto out; | 1051 | goto out; |
1049 | } | 1052 | } |
1053 | EXPORT_SYMBOL(sock_create_lite); | ||
1050 | 1054 | ||
1051 | /* No kernel lock held - perfect */ | 1055 | /* No kernel lock held - perfect */ |
1052 | static unsigned int sock_poll(struct file *file, poll_table *wait) | 1056 | static unsigned int sock_poll(struct file *file, poll_table *wait) |
@@ -1147,6 +1151,7 @@ call_kill: | |||
1147 | rcu_read_unlock(); | 1151 | rcu_read_unlock(); |
1148 | return 0; | 1152 | return 0; |
1149 | } | 1153 | } |
1154 | EXPORT_SYMBOL(sock_wake_async); | ||
1150 | 1155 | ||
1151 | static int __sock_create(struct net *net, int family, int type, int protocol, | 1156 | static int __sock_create(struct net *net, int family, int type, int protocol, |
1152 | struct socket **res, int kern) | 1157 | struct socket **res, int kern) |
@@ -1265,11 +1270,13 @@ int sock_create(int family, int type, int protocol, struct socket **res) | |||
1265 | { | 1270 | { |
1266 | return __sock_create(current->nsproxy->net_ns, family, type, protocol, res, 0); | 1271 | return __sock_create(current->nsproxy->net_ns, family, type, protocol, res, 0); |
1267 | } | 1272 | } |
1273 | EXPORT_SYMBOL(sock_create); | ||
1268 | 1274 | ||
1269 | int sock_create_kern(int family, int type, int protocol, struct socket **res) | 1275 | int sock_create_kern(int family, int type, int protocol, struct socket **res) |
1270 | { | 1276 | { |
1271 | return __sock_create(&init_net, family, type, protocol, res, 1); | 1277 | return __sock_create(&init_net, family, type, protocol, res, 1); |
1272 | } | 1278 | } |
1279 | EXPORT_SYMBOL(sock_create_kern); | ||
1273 | 1280 | ||
1274 | SYSCALL_DEFINE3(socket, int, family, int, type, int, protocol) | 1281 | SYSCALL_DEFINE3(socket, int, family, int, type, int, protocol) |
1275 | { | 1282 | { |
@@ -1474,7 +1481,8 @@ SYSCALL_DEFINE4(accept4, int, fd, struct sockaddr __user *, upeer_sockaddr, | |||
1474 | goto out; | 1481 | goto out; |
1475 | 1482 | ||
1476 | err = -ENFILE; | 1483 | err = -ENFILE; |
1477 | if (!(newsock = sock_alloc())) | 1484 | newsock = sock_alloc(); |
1485 | if (!newsock) | ||
1478 | goto out_put; | 1486 | goto out_put; |
1479 | 1487 | ||
1480 | newsock->type = sock->type; | 1488 | newsock->type = sock->type; |
@@ -1861,8 +1869,7 @@ SYSCALL_DEFINE3(sendmsg, int, fd, struct msghdr __user *, msg, unsigned, flags) | |||
1861 | if (MSG_CMSG_COMPAT & flags) { | 1869 | if (MSG_CMSG_COMPAT & flags) { |
1862 | if (get_compat_msghdr(&msg_sys, msg_compat)) | 1870 | if (get_compat_msghdr(&msg_sys, msg_compat)) |
1863 | return -EFAULT; | 1871 | return -EFAULT; |
1864 | } | 1872 | } else if (copy_from_user(&msg_sys, msg, sizeof(struct msghdr))) |
1865 | else if (copy_from_user(&msg_sys, msg, sizeof(struct msghdr))) | ||
1866 | return -EFAULT; | 1873 | return -EFAULT; |
1867 | 1874 | ||
1868 | sock = sockfd_lookup_light(fd, &err, &fput_needed); | 1875 | sock = sockfd_lookup_light(fd, &err, &fput_needed); |
@@ -1964,8 +1971,7 @@ static int __sys_recvmsg(struct socket *sock, struct msghdr __user *msg, | |||
1964 | if (MSG_CMSG_COMPAT & flags) { | 1971 | if (MSG_CMSG_COMPAT & flags) { |
1965 | if (get_compat_msghdr(msg_sys, msg_compat)) | 1972 | if (get_compat_msghdr(msg_sys, msg_compat)) |
1966 | return -EFAULT; | 1973 | return -EFAULT; |
1967 | } | 1974 | } else if (copy_from_user(msg_sys, msg, sizeof(struct msghdr))) |
1968 | else if (copy_from_user(msg_sys, msg, sizeof(struct msghdr))) | ||
1969 | return -EFAULT; | 1975 | return -EFAULT; |
1970 | 1976 | ||
1971 | err = -EMSGSIZE; | 1977 | err = -EMSGSIZE; |
@@ -2191,10 +2197,10 @@ SYSCALL_DEFINE5(recvmmsg, int, fd, struct mmsghdr __user *, mmsg, | |||
2191 | /* Argument list sizes for sys_socketcall */ | 2197 | /* Argument list sizes for sys_socketcall */ |
2192 | #define AL(x) ((x) * sizeof(unsigned long)) | 2198 | #define AL(x) ((x) * sizeof(unsigned long)) |
2193 | static const unsigned char nargs[20] = { | 2199 | static const unsigned char nargs[20] = { |
2194 | AL(0),AL(3),AL(3),AL(3),AL(2),AL(3), | 2200 | AL(0), AL(3), AL(3), AL(3), AL(2), AL(3), |
2195 | AL(3),AL(3),AL(4),AL(4),AL(4),AL(6), | 2201 | AL(3), AL(3), AL(4), AL(4), AL(4), AL(6), |
2196 | AL(6),AL(2),AL(5),AL(5),AL(3),AL(3), | 2202 | AL(6), AL(2), AL(5), AL(5), AL(3), AL(3), |
2197 | AL(4),AL(5) | 2203 | AL(4), AL(5) |
2198 | }; | 2204 | }; |
2199 | 2205 | ||
2200 | #undef AL | 2206 | #undef AL |
@@ -2340,6 +2346,7 @@ int sock_register(const struct net_proto_family *ops) | |||
2340 | printk(KERN_INFO "NET: Registered protocol family %d\n", ops->family); | 2346 | printk(KERN_INFO "NET: Registered protocol family %d\n", ops->family); |
2341 | return err; | 2347 | return err; |
2342 | } | 2348 | } |
2349 | EXPORT_SYMBOL(sock_register); | ||
2343 | 2350 | ||
2344 | /** | 2351 | /** |
2345 | * sock_unregister - remove a protocol handler | 2352 | * sock_unregister - remove a protocol handler |
@@ -2366,6 +2373,7 @@ void sock_unregister(int family) | |||
2366 | 2373 | ||
2367 | printk(KERN_INFO "NET: Unregistered protocol family %d\n", family); | 2374 | printk(KERN_INFO "NET: Unregistered protocol family %d\n", family); |
2368 | } | 2375 | } |
2376 | EXPORT_SYMBOL(sock_unregister); | ||
2369 | 2377 | ||
2370 | static int __init sock_init(void) | 2378 | static int __init sock_init(void) |
2371 | { | 2379 | { |
@@ -2490,13 +2498,13 @@ static int dev_ifconf(struct net *net, struct compat_ifconf __user *uifc32) | |||
2490 | ifc.ifc_req = NULL; | 2498 | ifc.ifc_req = NULL; |
2491 | uifc = compat_alloc_user_space(sizeof(struct ifconf)); | 2499 | uifc = compat_alloc_user_space(sizeof(struct ifconf)); |
2492 | } else { | 2500 | } else { |
2493 | size_t len =((ifc32.ifc_len / sizeof (struct compat_ifreq)) + 1) * | 2501 | size_t len = ((ifc32.ifc_len / sizeof(struct compat_ifreq)) + 1) * |
2494 | sizeof (struct ifreq); | 2502 | sizeof(struct ifreq); |
2495 | uifc = compat_alloc_user_space(sizeof(struct ifconf) + len); | 2503 | uifc = compat_alloc_user_space(sizeof(struct ifconf) + len); |
2496 | ifc.ifc_len = len; | 2504 | ifc.ifc_len = len; |
2497 | ifr = ifc.ifc_req = (void __user *)(uifc + 1); | 2505 | ifr = ifc.ifc_req = (void __user *)(uifc + 1); |
2498 | ifr32 = compat_ptr(ifc32.ifcbuf); | 2506 | ifr32 = compat_ptr(ifc32.ifcbuf); |
2499 | for (i = 0; i < ifc32.ifc_len; i += sizeof (struct compat_ifreq)) { | 2507 | for (i = 0; i < ifc32.ifc_len; i += sizeof(struct compat_ifreq)) { |
2500 | if (copy_in_user(ifr, ifr32, sizeof(struct compat_ifreq))) | 2508 | if (copy_in_user(ifr, ifr32, sizeof(struct compat_ifreq))) |
2501 | return -EFAULT; | 2509 | return -EFAULT; |
2502 | ifr++; | 2510 | ifr++; |
@@ -2516,9 +2524,9 @@ static int dev_ifconf(struct net *net, struct compat_ifconf __user *uifc32) | |||
2516 | ifr = ifc.ifc_req; | 2524 | ifr = ifc.ifc_req; |
2517 | ifr32 = compat_ptr(ifc32.ifcbuf); | 2525 | ifr32 = compat_ptr(ifc32.ifcbuf); |
2518 | for (i = 0, j = 0; | 2526 | for (i = 0, j = 0; |
2519 | i + sizeof (struct compat_ifreq) <= ifc32.ifc_len && j < ifc.ifc_len; | 2527 | i + sizeof(struct compat_ifreq) <= ifc32.ifc_len && j < ifc.ifc_len; |
2520 | i += sizeof (struct compat_ifreq), j += sizeof (struct ifreq)) { | 2528 | i += sizeof(struct compat_ifreq), j += sizeof(struct ifreq)) { |
2521 | if (copy_in_user(ifr32, ifr, sizeof (struct compat_ifreq))) | 2529 | if (copy_in_user(ifr32, ifr, sizeof(struct compat_ifreq))) |
2522 | return -EFAULT; | 2530 | return -EFAULT; |
2523 | ifr32++; | 2531 | ifr32++; |
2524 | ifr++; | 2532 | ifr++; |
@@ -2567,7 +2575,7 @@ static int compat_siocwandev(struct net *net, struct compat_ifreq __user *uifr32 | |||
2567 | compat_uptr_t uptr32; | 2575 | compat_uptr_t uptr32; |
2568 | struct ifreq __user *uifr; | 2576 | struct ifreq __user *uifr; |
2569 | 2577 | ||
2570 | uifr = compat_alloc_user_space(sizeof (*uifr)); | 2578 | uifr = compat_alloc_user_space(sizeof(*uifr)); |
2571 | if (copy_in_user(uifr, uifr32, sizeof(struct compat_ifreq))) | 2579 | if (copy_in_user(uifr, uifr32, sizeof(struct compat_ifreq))) |
2572 | return -EFAULT; | 2580 | return -EFAULT; |
2573 | 2581 | ||
@@ -2601,9 +2609,9 @@ static int bond_ioctl(struct net *net, unsigned int cmd, | |||
2601 | return -EFAULT; | 2609 | return -EFAULT; |
2602 | 2610 | ||
2603 | old_fs = get_fs(); | 2611 | old_fs = get_fs(); |
2604 | set_fs (KERNEL_DS); | 2612 | set_fs(KERNEL_DS); |
2605 | err = dev_ioctl(net, cmd, &kifr); | 2613 | err = dev_ioctl(net, cmd, &kifr); |
2606 | set_fs (old_fs); | 2614 | set_fs(old_fs); |
2607 | 2615 | ||
2608 | return err; | 2616 | return err; |
2609 | case SIOCBONDSLAVEINFOQUERY: | 2617 | case SIOCBONDSLAVEINFOQUERY: |
@@ -2710,9 +2718,9 @@ static int compat_sioc_ifmap(struct net *net, unsigned int cmd, | |||
2710 | return -EFAULT; | 2718 | return -EFAULT; |
2711 | 2719 | ||
2712 | old_fs = get_fs(); | 2720 | old_fs = get_fs(); |
2713 | set_fs (KERNEL_DS); | 2721 | set_fs(KERNEL_DS); |
2714 | err = dev_ioctl(net, cmd, (void __user *)&ifr); | 2722 | err = dev_ioctl(net, cmd, (void __user *)&ifr); |
2715 | set_fs (old_fs); | 2723 | set_fs(old_fs); |
2716 | 2724 | ||
2717 | if (cmd == SIOCGIFMAP && !err) { | 2725 | if (cmd == SIOCGIFMAP && !err) { |
2718 | err = copy_to_user(uifr32, &ifr, sizeof(ifr.ifr_name)); | 2726 | err = copy_to_user(uifr32, &ifr, sizeof(ifr.ifr_name)); |
@@ -2734,7 +2742,7 @@ static int compat_siocshwtstamp(struct net *net, struct compat_ifreq __user *uif | |||
2734 | compat_uptr_t uptr32; | 2742 | compat_uptr_t uptr32; |
2735 | struct ifreq __user *uifr; | 2743 | struct ifreq __user *uifr; |
2736 | 2744 | ||
2737 | uifr = compat_alloc_user_space(sizeof (*uifr)); | 2745 | uifr = compat_alloc_user_space(sizeof(*uifr)); |
2738 | if (copy_in_user(uifr, uifr32, sizeof(struct compat_ifreq))) | 2746 | if (copy_in_user(uifr, uifr32, sizeof(struct compat_ifreq))) |
2739 | return -EFAULT; | 2747 | return -EFAULT; |
2740 | 2748 | ||
@@ -2750,20 +2758,20 @@ static int compat_siocshwtstamp(struct net *net, struct compat_ifreq __user *uif | |||
2750 | } | 2758 | } |
2751 | 2759 | ||
2752 | struct rtentry32 { | 2760 | struct rtentry32 { |
2753 | u32 rt_pad1; | 2761 | u32 rt_pad1; |
2754 | struct sockaddr rt_dst; /* target address */ | 2762 | struct sockaddr rt_dst; /* target address */ |
2755 | struct sockaddr rt_gateway; /* gateway addr (RTF_GATEWAY) */ | 2763 | struct sockaddr rt_gateway; /* gateway addr (RTF_GATEWAY) */ |
2756 | struct sockaddr rt_genmask; /* target network mask (IP) */ | 2764 | struct sockaddr rt_genmask; /* target network mask (IP) */ |
2757 | unsigned short rt_flags; | 2765 | unsigned short rt_flags; |
2758 | short rt_pad2; | 2766 | short rt_pad2; |
2759 | u32 rt_pad3; | 2767 | u32 rt_pad3; |
2760 | unsigned char rt_tos; | 2768 | unsigned char rt_tos; |
2761 | unsigned char rt_class; | 2769 | unsigned char rt_class; |
2762 | short rt_pad4; | 2770 | short rt_pad4; |
2763 | short rt_metric; /* +1 for binary compatibility! */ | 2771 | short rt_metric; /* +1 for binary compatibility! */ |
2764 | /* char * */ u32 rt_dev; /* forcing the device at add */ | 2772 | /* char * */ u32 rt_dev; /* forcing the device at add */ |
2765 | u32 rt_mtu; /* per route MTU/Window */ | 2773 | u32 rt_mtu; /* per route MTU/Window */ |
2766 | u32 rt_window; /* Window clamping */ | 2774 | u32 rt_window; /* Window clamping */ |
2767 | unsigned short rt_irtt; /* Initial RTT */ | 2775 | unsigned short rt_irtt; /* Initial RTT */ |
2768 | }; | 2776 | }; |
2769 | 2777 | ||
@@ -2793,29 +2801,29 @@ static int routing_ioctl(struct net *net, struct socket *sock, | |||
2793 | 2801 | ||
2794 | if (sock && sock->sk && sock->sk->sk_family == AF_INET6) { /* ipv6 */ | 2802 | if (sock && sock->sk && sock->sk->sk_family == AF_INET6) { /* ipv6 */ |
2795 | struct in6_rtmsg32 __user *ur6 = argp; | 2803 | struct in6_rtmsg32 __user *ur6 = argp; |
2796 | ret = copy_from_user (&r6.rtmsg_dst, &(ur6->rtmsg_dst), | 2804 | ret = copy_from_user(&r6.rtmsg_dst, &(ur6->rtmsg_dst), |
2797 | 3 * sizeof(struct in6_addr)); | 2805 | 3 * sizeof(struct in6_addr)); |
2798 | ret |= __get_user (r6.rtmsg_type, &(ur6->rtmsg_type)); | 2806 | ret |= __get_user(r6.rtmsg_type, &(ur6->rtmsg_type)); |
2799 | ret |= __get_user (r6.rtmsg_dst_len, &(ur6->rtmsg_dst_len)); | 2807 | ret |= __get_user(r6.rtmsg_dst_len, &(ur6->rtmsg_dst_len)); |
2800 | ret |= __get_user (r6.rtmsg_src_len, &(ur6->rtmsg_src_len)); | 2808 | ret |= __get_user(r6.rtmsg_src_len, &(ur6->rtmsg_src_len)); |
2801 | ret |= __get_user (r6.rtmsg_metric, &(ur6->rtmsg_metric)); | 2809 | ret |= __get_user(r6.rtmsg_metric, &(ur6->rtmsg_metric)); |
2802 | ret |= __get_user (r6.rtmsg_info, &(ur6->rtmsg_info)); | 2810 | ret |= __get_user(r6.rtmsg_info, &(ur6->rtmsg_info)); |
2803 | ret |= __get_user (r6.rtmsg_flags, &(ur6->rtmsg_flags)); | 2811 | ret |= __get_user(r6.rtmsg_flags, &(ur6->rtmsg_flags)); |
2804 | ret |= __get_user (r6.rtmsg_ifindex, &(ur6->rtmsg_ifindex)); | 2812 | ret |= __get_user(r6.rtmsg_ifindex, &(ur6->rtmsg_ifindex)); |
2805 | 2813 | ||
2806 | r = (void *) &r6; | 2814 | r = (void *) &r6; |
2807 | } else { /* ipv4 */ | 2815 | } else { /* ipv4 */ |
2808 | struct rtentry32 __user *ur4 = argp; | 2816 | struct rtentry32 __user *ur4 = argp; |
2809 | ret = copy_from_user (&r4.rt_dst, &(ur4->rt_dst), | 2817 | ret = copy_from_user(&r4.rt_dst, &(ur4->rt_dst), |
2810 | 3 * sizeof(struct sockaddr)); | 2818 | 3 * sizeof(struct sockaddr)); |
2811 | ret |= __get_user (r4.rt_flags, &(ur4->rt_flags)); | 2819 | ret |= __get_user(r4.rt_flags, &(ur4->rt_flags)); |
2812 | ret |= __get_user (r4.rt_metric, &(ur4->rt_metric)); | 2820 | ret |= __get_user(r4.rt_metric, &(ur4->rt_metric)); |
2813 | ret |= __get_user (r4.rt_mtu, &(ur4->rt_mtu)); | 2821 | ret |= __get_user(r4.rt_mtu, &(ur4->rt_mtu)); |
2814 | ret |= __get_user (r4.rt_window, &(ur4->rt_window)); | 2822 | ret |= __get_user(r4.rt_window, &(ur4->rt_window)); |
2815 | ret |= __get_user (r4.rt_irtt, &(ur4->rt_irtt)); | 2823 | ret |= __get_user(r4.rt_irtt, &(ur4->rt_irtt)); |
2816 | ret |= __get_user (rtdev, &(ur4->rt_dev)); | 2824 | ret |= __get_user(rtdev, &(ur4->rt_dev)); |
2817 | if (rtdev) { | 2825 | if (rtdev) { |
2818 | ret |= copy_from_user (devname, compat_ptr(rtdev), 15); | 2826 | ret |= copy_from_user(devname, compat_ptr(rtdev), 15); |
2819 | r4.rt_dev = devname; devname[15] = 0; | 2827 | r4.rt_dev = devname; devname[15] = 0; |
2820 | } else | 2828 | } else |
2821 | r4.rt_dev = NULL; | 2829 | r4.rt_dev = NULL; |
@@ -2828,9 +2836,9 @@ static int routing_ioctl(struct net *net, struct socket *sock, | |||
2828 | goto out; | 2836 | goto out; |
2829 | } | 2837 | } |
2830 | 2838 | ||
2831 | set_fs (KERNEL_DS); | 2839 | set_fs(KERNEL_DS); |
2832 | ret = sock_do_ioctl(net, sock, cmd, (unsigned long) r); | 2840 | ret = sock_do_ioctl(net, sock, cmd, (unsigned long) r); |
2833 | set_fs (old_fs); | 2841 | set_fs(old_fs); |
2834 | 2842 | ||
2835 | out: | 2843 | out: |
2836 | return ret; | 2844 | return ret; |
@@ -2993,11 +3001,13 @@ int kernel_bind(struct socket *sock, struct sockaddr *addr, int addrlen) | |||
2993 | { | 3001 | { |
2994 | return sock->ops->bind(sock, addr, addrlen); | 3002 | return sock->ops->bind(sock, addr, addrlen); |
2995 | } | 3003 | } |
3004 | EXPORT_SYMBOL(kernel_bind); | ||
2996 | 3005 | ||
2997 | int kernel_listen(struct socket *sock, int backlog) | 3006 | int kernel_listen(struct socket *sock, int backlog) |
2998 | { | 3007 | { |
2999 | return sock->ops->listen(sock, backlog); | 3008 | return sock->ops->listen(sock, backlog); |
3000 | } | 3009 | } |
3010 | EXPORT_SYMBOL(kernel_listen); | ||
3001 | 3011 | ||
3002 | int kernel_accept(struct socket *sock, struct socket **newsock, int flags) | 3012 | int kernel_accept(struct socket *sock, struct socket **newsock, int flags) |
3003 | { | 3013 | { |
@@ -3022,24 +3032,28 @@ int kernel_accept(struct socket *sock, struct socket **newsock, int flags) | |||
3022 | done: | 3032 | done: |
3023 | return err; | 3033 | return err; |
3024 | } | 3034 | } |
3035 | EXPORT_SYMBOL(kernel_accept); | ||
3025 | 3036 | ||
3026 | int kernel_connect(struct socket *sock, struct sockaddr *addr, int addrlen, | 3037 | int kernel_connect(struct socket *sock, struct sockaddr *addr, int addrlen, |
3027 | int flags) | 3038 | int flags) |
3028 | { | 3039 | { |
3029 | return sock->ops->connect(sock, addr, addrlen, flags); | 3040 | return sock->ops->connect(sock, addr, addrlen, flags); |
3030 | } | 3041 | } |
3042 | EXPORT_SYMBOL(kernel_connect); | ||
3031 | 3043 | ||
3032 | int kernel_getsockname(struct socket *sock, struct sockaddr *addr, | 3044 | int kernel_getsockname(struct socket *sock, struct sockaddr *addr, |
3033 | int *addrlen) | 3045 | int *addrlen) |
3034 | { | 3046 | { |
3035 | return sock->ops->getname(sock, addr, addrlen, 0); | 3047 | return sock->ops->getname(sock, addr, addrlen, 0); |
3036 | } | 3048 | } |
3049 | EXPORT_SYMBOL(kernel_getsockname); | ||
3037 | 3050 | ||
3038 | int kernel_getpeername(struct socket *sock, struct sockaddr *addr, | 3051 | int kernel_getpeername(struct socket *sock, struct sockaddr *addr, |
3039 | int *addrlen) | 3052 | int *addrlen) |
3040 | { | 3053 | { |
3041 | return sock->ops->getname(sock, addr, addrlen, 1); | 3054 | return sock->ops->getname(sock, addr, addrlen, 1); |
3042 | } | 3055 | } |
3056 | EXPORT_SYMBOL(kernel_getpeername); | ||
3043 | 3057 | ||
3044 | int kernel_getsockopt(struct socket *sock, int level, int optname, | 3058 | int kernel_getsockopt(struct socket *sock, int level, int optname, |
3045 | char *optval, int *optlen) | 3059 | char *optval, int *optlen) |
@@ -3056,6 +3070,7 @@ int kernel_getsockopt(struct socket *sock, int level, int optname, | |||
3056 | set_fs(oldfs); | 3070 | set_fs(oldfs); |
3057 | return err; | 3071 | return err; |
3058 | } | 3072 | } |
3073 | EXPORT_SYMBOL(kernel_getsockopt); | ||
3059 | 3074 | ||
3060 | int kernel_setsockopt(struct socket *sock, int level, int optname, | 3075 | int kernel_setsockopt(struct socket *sock, int level, int optname, |
3061 | char *optval, unsigned int optlen) | 3076 | char *optval, unsigned int optlen) |
@@ -3072,6 +3087,7 @@ int kernel_setsockopt(struct socket *sock, int level, int optname, | |||
3072 | set_fs(oldfs); | 3087 | set_fs(oldfs); |
3073 | return err; | 3088 | return err; |
3074 | } | 3089 | } |
3090 | EXPORT_SYMBOL(kernel_setsockopt); | ||
3075 | 3091 | ||
3076 | int kernel_sendpage(struct socket *sock, struct page *page, int offset, | 3092 | int kernel_sendpage(struct socket *sock, struct page *page, int offset, |
3077 | size_t size, int flags) | 3093 | size_t size, int flags) |
@@ -3083,6 +3099,7 @@ int kernel_sendpage(struct socket *sock, struct page *page, int offset, | |||
3083 | 3099 | ||
3084 | return sock_no_sendpage(sock, page, offset, size, flags); | 3100 | return sock_no_sendpage(sock, page, offset, size, flags); |
3085 | } | 3101 | } |
3102 | EXPORT_SYMBOL(kernel_sendpage); | ||
3086 | 3103 | ||
3087 | int kernel_sock_ioctl(struct socket *sock, int cmd, unsigned long arg) | 3104 | int kernel_sock_ioctl(struct socket *sock, int cmd, unsigned long arg) |
3088 | { | 3105 | { |
@@ -3095,33 +3112,10 @@ int kernel_sock_ioctl(struct socket *sock, int cmd, unsigned long arg) | |||
3095 | 3112 | ||
3096 | return err; | 3113 | return err; |
3097 | } | 3114 | } |
3115 | EXPORT_SYMBOL(kernel_sock_ioctl); | ||
3098 | 3116 | ||
3099 | int kernel_sock_shutdown(struct socket *sock, enum sock_shutdown_cmd how) | 3117 | int kernel_sock_shutdown(struct socket *sock, enum sock_shutdown_cmd how) |
3100 | { | 3118 | { |
3101 | return sock->ops->shutdown(sock, how); | 3119 | return sock->ops->shutdown(sock, how); |
3102 | } | 3120 | } |
3103 | |||
3104 | EXPORT_SYMBOL(sock_create); | ||
3105 | EXPORT_SYMBOL(sock_create_kern); | ||
3106 | EXPORT_SYMBOL(sock_create_lite); | ||
3107 | EXPORT_SYMBOL(sock_map_fd); | ||
3108 | EXPORT_SYMBOL(sock_recvmsg); | ||
3109 | EXPORT_SYMBOL(sock_register); | ||
3110 | EXPORT_SYMBOL(sock_release); | ||
3111 | EXPORT_SYMBOL(sock_sendmsg); | ||
3112 | EXPORT_SYMBOL(sock_unregister); | ||
3113 | EXPORT_SYMBOL(sock_wake_async); | ||
3114 | EXPORT_SYMBOL(sockfd_lookup); | ||
3115 | EXPORT_SYMBOL(kernel_sendmsg); | ||
3116 | EXPORT_SYMBOL(kernel_recvmsg); | ||
3117 | EXPORT_SYMBOL(kernel_bind); | ||
3118 | EXPORT_SYMBOL(kernel_listen); | ||
3119 | EXPORT_SYMBOL(kernel_accept); | ||
3120 | EXPORT_SYMBOL(kernel_connect); | ||
3121 | EXPORT_SYMBOL(kernel_getsockname); | ||
3122 | EXPORT_SYMBOL(kernel_getpeername); | ||
3123 | EXPORT_SYMBOL(kernel_getsockopt); | ||
3124 | EXPORT_SYMBOL(kernel_setsockopt); | ||
3125 | EXPORT_SYMBOL(kernel_sendpage); | ||
3126 | EXPORT_SYMBOL(kernel_sock_ioctl); | ||
3127 | EXPORT_SYMBOL(kernel_sock_shutdown); | 3121 | EXPORT_SYMBOL(kernel_sock_shutdown); |