diff options
34 files changed, 706 insertions, 1374 deletions
diff --git a/Documentation/networking/phonet.txt b/Documentation/networking/phonet.txt index 2d9bc2b711fc..24ad2adba6e5 100644 --- a/Documentation/networking/phonet.txt +++ b/Documentation/networking/phonet.txt | |||
@@ -199,33 +199,29 @@ between itself and a remote pipe-end point (e.g. modem). | |||
199 | 199 | ||
200 | The implementation adds socket options at SOL_PNPIPE level: | 200 | The implementation adds socket options at SOL_PNPIPE level: |
201 | 201 | ||
202 | PNPIPE_CREATE | 202 | PNPIPE_PIPE_HANDLE |
203 | It accepts an integer argument where-in | 203 | It accepts an integer argument for setting value of pipe handle. |
204 | lower order 16 bits: pn_dev and pn_port pair for remote pep. | ||
205 | higher order 16 bits: 8 bit pipe-handle | ||
206 | |||
207 | It sends a PNS_PEP_CONNECT_REQ on sequenced socket itself. On getting | ||
208 | PNS_PEP_CONNECT_RESP, it sends PNS_PEP_CONNECT_REQ to remote pep. On | ||
209 | getting response from remote pep, it selects the best possible Flow | ||
210 | control mechanism supported by remote-pep (modem) and then it sends | ||
211 | PNS_PEP_CREATED_IND to the sequenced socket and to the remote pep. | ||
212 | |||
213 | It then updates the pipe state associated with the sequenced socket to | ||
214 | be PIPE_DISABLED. | ||
215 | 204 | ||
216 | PNPIPE_ENABLE accepts one integer value (int). If set to zero, the pipe | 205 | PNPIPE_ENABLE accepts one integer value (int). If set to zero, the pipe |
217 | is disabled. If the value is non-zero, the pipe is enabled. If the pipe | 206 | is disabled. If the value is non-zero, the pipe is enabled. If the pipe |
218 | is not (yet) connected, ENOTCONN is error is returned. | 207 | is not (yet) connected, ENOTCONN is error is returned. |
219 | 208 | ||
220 | PNPIPE_DESTROY | 209 | The implementation also adds socket 'connect'. On calling the 'connect', pipe |
221 | This will send out PNS_PEP_DISCONNECT_REQ on the sequenced socket and | 210 | will be created between the source socket and the destination, and the pipe |
222 | the remote pep. | 211 | state will be set to PIPE_DISABLED. |
223 | It will also update the pipe state associated with the sequenced socket | ||
224 | to PIPE_IDLE | ||
225 | 212 | ||
226 | After a pipe has been created and enabled successfully, the Pipe data can be | 213 | After a pipe has been created and enabled successfully, the Pipe data can be |
227 | exchanged between the host-pep and remote-pep (modem). | 214 | exchanged between the host-pep and remote-pep (modem). |
228 | 215 | ||
216 | User-space would typically follow below sequence with Pipe controller:- | ||
217 | -socket | ||
218 | -bind | ||
219 | -setsockopt for PNPIPE_PIPE_HANDLE | ||
220 | -connect | ||
221 | -setsockopt for PNPIPE_ENCAP_IP | ||
222 | -setsockopt for PNPIPE_ENABLE | ||
223 | |||
224 | |||
229 | Authors | 225 | Authors |
230 | ------- | 226 | ------- |
231 | 227 | ||
diff --git a/drivers/net/cnic.c b/drivers/net/cnic.c index 27449bf775e3..92bac19ad60a 100644 --- a/drivers/net/cnic.c +++ b/drivers/net/cnic.c | |||
@@ -60,6 +60,7 @@ MODULE_LICENSE("GPL"); | |||
60 | MODULE_VERSION(CNIC_MODULE_VERSION); | 60 | MODULE_VERSION(CNIC_MODULE_VERSION); |
61 | 61 | ||
62 | static LIST_HEAD(cnic_dev_list); | 62 | static LIST_HEAD(cnic_dev_list); |
63 | static LIST_HEAD(cnic_udev_list); | ||
63 | static DEFINE_RWLOCK(cnic_dev_lock); | 64 | static DEFINE_RWLOCK(cnic_dev_lock); |
64 | static DEFINE_MUTEX(cnic_lock); | 65 | static DEFINE_MUTEX(cnic_lock); |
65 | 66 | ||
@@ -81,29 +82,34 @@ static struct cnic_ops cnic_bnx2x_ops = { | |||
81 | .cnic_ctl = cnic_ctl, | 82 | .cnic_ctl = cnic_ctl, |
82 | }; | 83 | }; |
83 | 84 | ||
85 | static struct workqueue_struct *cnic_wq; | ||
86 | |||
84 | static void cnic_shutdown_rings(struct cnic_dev *); | 87 | static void cnic_shutdown_rings(struct cnic_dev *); |
85 | static void cnic_init_rings(struct cnic_dev *); | 88 | static void cnic_init_rings(struct cnic_dev *); |
86 | static int cnic_cm_set_pg(struct cnic_sock *); | 89 | static int cnic_cm_set_pg(struct cnic_sock *); |
87 | 90 | ||
88 | static int cnic_uio_open(struct uio_info *uinfo, struct inode *inode) | 91 | static int cnic_uio_open(struct uio_info *uinfo, struct inode *inode) |
89 | { | 92 | { |
90 | struct cnic_dev *dev = uinfo->priv; | 93 | struct cnic_uio_dev *udev = uinfo->priv; |
91 | struct cnic_local *cp = dev->cnic_priv; | 94 | struct cnic_dev *dev; |
92 | 95 | ||
93 | if (!capable(CAP_NET_ADMIN)) | 96 | if (!capable(CAP_NET_ADMIN)) |
94 | return -EPERM; | 97 | return -EPERM; |
95 | 98 | ||
96 | if (cp->uio_dev != -1) | 99 | if (udev->uio_dev != -1) |
97 | return -EBUSY; | 100 | return -EBUSY; |
98 | 101 | ||
99 | rtnl_lock(); | 102 | rtnl_lock(); |
100 | if (!test_bit(CNIC_F_CNIC_UP, &dev->flags)) { | 103 | dev = udev->dev; |
104 | |||
105 | if (!dev || !test_bit(CNIC_F_CNIC_UP, &dev->flags)) { | ||
101 | rtnl_unlock(); | 106 | rtnl_unlock(); |
102 | return -ENODEV; | 107 | return -ENODEV; |
103 | } | 108 | } |
104 | 109 | ||
105 | cp->uio_dev = iminor(inode); | 110 | udev->uio_dev = iminor(inode); |
106 | 111 | ||
112 | cnic_shutdown_rings(dev); | ||
107 | cnic_init_rings(dev); | 113 | cnic_init_rings(dev); |
108 | rtnl_unlock(); | 114 | rtnl_unlock(); |
109 | 115 | ||
@@ -112,12 +118,9 @@ static int cnic_uio_open(struct uio_info *uinfo, struct inode *inode) | |||
112 | 118 | ||
113 | static int cnic_uio_close(struct uio_info *uinfo, struct inode *inode) | 119 | static int cnic_uio_close(struct uio_info *uinfo, struct inode *inode) |
114 | { | 120 | { |
115 | struct cnic_dev *dev = uinfo->priv; | 121 | struct cnic_uio_dev *udev = uinfo->priv; |
116 | struct cnic_local *cp = dev->cnic_priv; | ||
117 | 122 | ||
118 | cnic_shutdown_rings(dev); | 123 | udev->uio_dev = -1; |
119 | |||
120 | cp->uio_dev = -1; | ||
121 | return 0; | 124 | return 0; |
122 | } | 125 | } |
123 | 126 | ||
@@ -274,8 +277,9 @@ static int cnic_send_nlmsg(struct cnic_local *cp, u32 type, | |||
274 | u16 len = 0; | 277 | u16 len = 0; |
275 | u32 msg_type = ISCSI_KEVENT_IF_DOWN; | 278 | u32 msg_type = ISCSI_KEVENT_IF_DOWN; |
276 | struct cnic_ulp_ops *ulp_ops; | 279 | struct cnic_ulp_ops *ulp_ops; |
280 | struct cnic_uio_dev *udev = cp->udev; | ||
277 | 281 | ||
278 | if (cp->uio_dev == -1) | 282 | if (!udev || udev->uio_dev == -1) |
279 | return -ENODEV; | 283 | return -ENODEV; |
280 | 284 | ||
281 | if (csk) { | 285 | if (csk) { |
@@ -406,8 +410,7 @@ static void cnic_uio_stop(void) | |||
406 | list_for_each_entry(dev, &cnic_dev_list, list) { | 410 | list_for_each_entry(dev, &cnic_dev_list, list) { |
407 | struct cnic_local *cp = dev->cnic_priv; | 411 | struct cnic_local *cp = dev->cnic_priv; |
408 | 412 | ||
409 | if (cp->cnic_uinfo) | 413 | cnic_send_nlmsg(cp, ISCSI_KEVENT_IF_DOWN, NULL); |
410 | cnic_send_nlmsg(cp, ISCSI_KEVENT_IF_DOWN, NULL); | ||
411 | } | 414 | } |
412 | read_unlock(&cnic_dev_lock); | 415 | read_unlock(&cnic_dev_lock); |
413 | } | 416 | } |
@@ -768,31 +771,45 @@ static void cnic_free_context(struct cnic_dev *dev) | |||
768 | } | 771 | } |
769 | } | 772 | } |
770 | 773 | ||
771 | static void cnic_free_resc(struct cnic_dev *dev) | 774 | static void __cnic_free_uio(struct cnic_uio_dev *udev) |
772 | { | 775 | { |
773 | struct cnic_local *cp = dev->cnic_priv; | 776 | uio_unregister_device(&udev->cnic_uinfo); |
774 | int i = 0; | ||
775 | 777 | ||
776 | if (cp->cnic_uinfo) { | 778 | if (udev->l2_buf) { |
777 | while (cp->uio_dev != -1 && i < 15) { | 779 | dma_free_coherent(&udev->pdev->dev, udev->l2_buf_size, |
778 | msleep(100); | 780 | udev->l2_buf, udev->l2_buf_map); |
779 | i++; | 781 | udev->l2_buf = NULL; |
780 | } | ||
781 | uio_unregister_device(cp->cnic_uinfo); | ||
782 | kfree(cp->cnic_uinfo); | ||
783 | cp->cnic_uinfo = NULL; | ||
784 | } | 782 | } |
785 | 783 | ||
786 | if (cp->l2_buf) { | 784 | if (udev->l2_ring) { |
787 | dma_free_coherent(&dev->pcidev->dev, cp->l2_buf_size, | 785 | dma_free_coherent(&udev->pdev->dev, udev->l2_ring_size, |
788 | cp->l2_buf, cp->l2_buf_map); | 786 | udev->l2_ring, udev->l2_ring_map); |
789 | cp->l2_buf = NULL; | 787 | udev->l2_ring = NULL; |
790 | } | 788 | } |
791 | 789 | ||
792 | if (cp->l2_ring) { | 790 | pci_dev_put(udev->pdev); |
793 | dma_free_coherent(&dev->pcidev->dev, cp->l2_ring_size, | 791 | kfree(udev); |
794 | cp->l2_ring, cp->l2_ring_map); | 792 | } |
795 | cp->l2_ring = NULL; | 793 | |
794 | static void cnic_free_uio(struct cnic_uio_dev *udev) | ||
795 | { | ||
796 | if (!udev) | ||
797 | return; | ||
798 | |||
799 | write_lock(&cnic_dev_lock); | ||
800 | list_del_init(&udev->list); | ||
801 | write_unlock(&cnic_dev_lock); | ||
802 | __cnic_free_uio(udev); | ||
803 | } | ||
804 | |||
805 | static void cnic_free_resc(struct cnic_dev *dev) | ||
806 | { | ||
807 | struct cnic_local *cp = dev->cnic_priv; | ||
808 | struct cnic_uio_dev *udev = cp->udev; | ||
809 | |||
810 | if (udev) { | ||
811 | udev->dev = NULL; | ||
812 | cp->udev = NULL; | ||
796 | } | 813 | } |
797 | 814 | ||
798 | cnic_free_context(dev); | 815 | cnic_free_context(dev); |
@@ -894,37 +911,68 @@ static int cnic_alloc_kcq(struct cnic_dev *dev, struct kcq_info *info) | |||
894 | return 0; | 911 | return 0; |
895 | } | 912 | } |
896 | 913 | ||
897 | static int cnic_alloc_l2_rings(struct cnic_dev *dev, int pages) | 914 | static int cnic_alloc_uio_rings(struct cnic_dev *dev, int pages) |
898 | { | 915 | { |
899 | struct cnic_local *cp = dev->cnic_priv; | 916 | struct cnic_local *cp = dev->cnic_priv; |
917 | struct cnic_uio_dev *udev; | ||
918 | |||
919 | read_lock(&cnic_dev_lock); | ||
920 | list_for_each_entry(udev, &cnic_udev_list, list) { | ||
921 | if (udev->pdev == dev->pcidev) { | ||
922 | udev->dev = dev; | ||
923 | cp->udev = udev; | ||
924 | read_unlock(&cnic_dev_lock); | ||
925 | return 0; | ||
926 | } | ||
927 | } | ||
928 | read_unlock(&cnic_dev_lock); | ||
929 | |||
930 | udev = kzalloc(sizeof(struct cnic_uio_dev), GFP_ATOMIC); | ||
931 | if (!udev) | ||
932 | return -ENOMEM; | ||
933 | |||
934 | udev->uio_dev = -1; | ||
900 | 935 | ||
901 | cp->l2_ring_size = pages * BCM_PAGE_SIZE; | 936 | udev->dev = dev; |
902 | cp->l2_ring = dma_alloc_coherent(&dev->pcidev->dev, cp->l2_ring_size, | 937 | udev->pdev = dev->pcidev; |
903 | &cp->l2_ring_map, | 938 | udev->l2_ring_size = pages * BCM_PAGE_SIZE; |
904 | GFP_KERNEL | __GFP_COMP); | 939 | udev->l2_ring = dma_alloc_coherent(&udev->pdev->dev, udev->l2_ring_size, |
905 | if (!cp->l2_ring) | 940 | &udev->l2_ring_map, |
941 | GFP_KERNEL | __GFP_COMP); | ||
942 | if (!udev->l2_ring) | ||
906 | return -ENOMEM; | 943 | return -ENOMEM; |
907 | 944 | ||
908 | cp->l2_buf_size = (cp->l2_rx_ring_size + 1) * cp->l2_single_buf_size; | 945 | udev->l2_buf_size = (cp->l2_rx_ring_size + 1) * cp->l2_single_buf_size; |
909 | cp->l2_buf_size = PAGE_ALIGN(cp->l2_buf_size); | 946 | udev->l2_buf_size = PAGE_ALIGN(udev->l2_buf_size); |
910 | cp->l2_buf = dma_alloc_coherent(&dev->pcidev->dev, cp->l2_buf_size, | 947 | udev->l2_buf = dma_alloc_coherent(&udev->pdev->dev, udev->l2_buf_size, |
911 | &cp->l2_buf_map, | 948 | &udev->l2_buf_map, |
912 | GFP_KERNEL | __GFP_COMP); | 949 | GFP_KERNEL | __GFP_COMP); |
913 | if (!cp->l2_buf) | 950 | if (!udev->l2_buf) |
914 | return -ENOMEM; | 951 | return -ENOMEM; |
915 | 952 | ||
953 | write_lock(&cnic_dev_lock); | ||
954 | list_add(&udev->list, &cnic_udev_list); | ||
955 | write_unlock(&cnic_dev_lock); | ||
956 | |||
957 | pci_dev_get(udev->pdev); | ||
958 | |||
959 | cp->udev = udev; | ||
960 | |||
916 | return 0; | 961 | return 0; |
917 | } | 962 | } |
918 | 963 | ||
919 | static int cnic_alloc_uio(struct cnic_dev *dev) { | 964 | static int cnic_init_uio(struct cnic_dev *dev) |
965 | { | ||
920 | struct cnic_local *cp = dev->cnic_priv; | 966 | struct cnic_local *cp = dev->cnic_priv; |
967 | struct cnic_uio_dev *udev = cp->udev; | ||
921 | struct uio_info *uinfo; | 968 | struct uio_info *uinfo; |
922 | int ret; | 969 | int ret = 0; |
923 | 970 | ||
924 | uinfo = kzalloc(sizeof(*uinfo), GFP_ATOMIC); | 971 | if (!udev) |
925 | if (!uinfo) | ||
926 | return -ENOMEM; | 972 | return -ENOMEM; |
927 | 973 | ||
974 | uinfo = &udev->cnic_uinfo; | ||
975 | |||
928 | uinfo->mem[0].addr = dev->netdev->base_addr; | 976 | uinfo->mem[0].addr = dev->netdev->base_addr; |
929 | uinfo->mem[0].internal_addr = dev->regview; | 977 | uinfo->mem[0].internal_addr = dev->regview; |
930 | uinfo->mem[0].size = dev->netdev->mem_end - dev->netdev->mem_start; | 978 | uinfo->mem[0].size = dev->netdev->mem_end - dev->netdev->mem_start; |
@@ -932,7 +980,7 @@ static int cnic_alloc_uio(struct cnic_dev *dev) { | |||
932 | 980 | ||
933 | if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) { | 981 | if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) { |
934 | uinfo->mem[1].addr = (unsigned long) cp->status_blk.gen & | 982 | uinfo->mem[1].addr = (unsigned long) cp->status_blk.gen & |
935 | PAGE_MASK; | 983 | PAGE_MASK; |
936 | if (cp->ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) | 984 | if (cp->ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) |
937 | uinfo->mem[1].size = BNX2_SBLK_MSIX_ALIGN_SIZE * 9; | 985 | uinfo->mem[1].size = BNX2_SBLK_MSIX_ALIGN_SIZE * 9; |
938 | else | 986 | else |
@@ -949,12 +997,12 @@ static int cnic_alloc_uio(struct cnic_dev *dev) { | |||
949 | 997 | ||
950 | uinfo->mem[1].memtype = UIO_MEM_LOGICAL; | 998 | uinfo->mem[1].memtype = UIO_MEM_LOGICAL; |
951 | 999 | ||
952 | uinfo->mem[2].addr = (unsigned long) cp->l2_ring; | 1000 | uinfo->mem[2].addr = (unsigned long) udev->l2_ring; |
953 | uinfo->mem[2].size = cp->l2_ring_size; | 1001 | uinfo->mem[2].size = udev->l2_ring_size; |
954 | uinfo->mem[2].memtype = UIO_MEM_LOGICAL; | 1002 | uinfo->mem[2].memtype = UIO_MEM_LOGICAL; |
955 | 1003 | ||
956 | uinfo->mem[3].addr = (unsigned long) cp->l2_buf; | 1004 | uinfo->mem[3].addr = (unsigned long) udev->l2_buf; |
957 | uinfo->mem[3].size = cp->l2_buf_size; | 1005 | uinfo->mem[3].size = udev->l2_buf_size; |
958 | uinfo->mem[3].memtype = UIO_MEM_LOGICAL; | 1006 | uinfo->mem[3].memtype = UIO_MEM_LOGICAL; |
959 | 1007 | ||
960 | uinfo->version = CNIC_MODULE_VERSION; | 1008 | uinfo->version = CNIC_MODULE_VERSION; |
@@ -963,16 +1011,17 @@ static int cnic_alloc_uio(struct cnic_dev *dev) { | |||
963 | uinfo->open = cnic_uio_open; | 1011 | uinfo->open = cnic_uio_open; |
964 | uinfo->release = cnic_uio_close; | 1012 | uinfo->release = cnic_uio_close; |
965 | 1013 | ||
966 | uinfo->priv = dev; | 1014 | if (udev->uio_dev == -1) { |
1015 | if (!uinfo->priv) { | ||
1016 | uinfo->priv = udev; | ||
967 | 1017 | ||
968 | ret = uio_register_device(&dev->pcidev->dev, uinfo); | 1018 | ret = uio_register_device(&udev->pdev->dev, uinfo); |
969 | if (ret) { | 1019 | } |
970 | kfree(uinfo); | 1020 | } else { |
971 | return ret; | 1021 | cnic_init_rings(dev); |
972 | } | 1022 | } |
973 | 1023 | ||
974 | cp->cnic_uinfo = uinfo; | 1024 | return ret; |
975 | return 0; | ||
976 | } | 1025 | } |
977 | 1026 | ||
978 | static int cnic_alloc_bnx2_resc(struct cnic_dev *dev) | 1027 | static int cnic_alloc_bnx2_resc(struct cnic_dev *dev) |
@@ -993,11 +1042,11 @@ static int cnic_alloc_bnx2_resc(struct cnic_dev *dev) | |||
993 | if (ret) | 1042 | if (ret) |
994 | goto error; | 1043 | goto error; |
995 | 1044 | ||
996 | ret = cnic_alloc_l2_rings(dev, 2); | 1045 | ret = cnic_alloc_uio_rings(dev, 2); |
997 | if (ret) | 1046 | if (ret) |
998 | goto error; | 1047 | goto error; |
999 | 1048 | ||
1000 | ret = cnic_alloc_uio(dev); | 1049 | ret = cnic_init_uio(dev); |
1001 | if (ret) | 1050 | if (ret) |
1002 | goto error; | 1051 | goto error; |
1003 | 1052 | ||
@@ -1028,7 +1077,7 @@ static int cnic_alloc_bnx2x_context(struct cnic_dev *dev) | |||
1028 | 1077 | ||
1029 | cp->ctx_blks = blks; | 1078 | cp->ctx_blks = blks; |
1030 | cp->ctx_blk_size = ctx_blk_size; | 1079 | cp->ctx_blk_size = ctx_blk_size; |
1031 | if (BNX2X_CHIP_IS_E1H(cp->chip_id)) | 1080 | if (!BNX2X_CHIP_IS_57710(cp->chip_id)) |
1032 | cp->ctx_align = 0; | 1081 | cp->ctx_align = 0; |
1033 | else | 1082 | else |
1034 | cp->ctx_align = ctx_blk_size; | 1083 | cp->ctx_align = ctx_blk_size; |
@@ -1131,11 +1180,11 @@ static int cnic_alloc_bnx2x_resc(struct cnic_dev *dev) | |||
1131 | 1180 | ||
1132 | cp->l2_rx_ring_size = 15; | 1181 | cp->l2_rx_ring_size = 15; |
1133 | 1182 | ||
1134 | ret = cnic_alloc_l2_rings(dev, 4); | 1183 | ret = cnic_alloc_uio_rings(dev, 4); |
1135 | if (ret) | 1184 | if (ret) |
1136 | goto error; | 1185 | goto error; |
1137 | 1186 | ||
1138 | ret = cnic_alloc_uio(dev); | 1187 | ret = cnic_init_uio(dev); |
1139 | if (ret) | 1188 | if (ret) |
1140 | goto error; | 1189 | goto error; |
1141 | 1190 | ||
@@ -1629,10 +1678,11 @@ static int cnic_bnx2x_iscsi_ofld1(struct cnic_dev *dev, struct kwqe *wqes[], | |||
1629 | struct iscsi_kwqe_conn_offload1 *req1; | 1678 | struct iscsi_kwqe_conn_offload1 *req1; |
1630 | struct iscsi_kwqe_conn_offload2 *req2; | 1679 | struct iscsi_kwqe_conn_offload2 *req2; |
1631 | struct cnic_local *cp = dev->cnic_priv; | 1680 | struct cnic_local *cp = dev->cnic_priv; |
1681 | struct cnic_context *ctx; | ||
1632 | struct iscsi_kcqe kcqe; | 1682 | struct iscsi_kcqe kcqe; |
1633 | struct kcqe *cqes[1]; | 1683 | struct kcqe *cqes[1]; |
1634 | u32 l5_cid; | 1684 | u32 l5_cid; |
1635 | int ret; | 1685 | int ret = 0; |
1636 | 1686 | ||
1637 | if (num < 2) { | 1687 | if (num < 2) { |
1638 | *work = num; | 1688 | *work = num; |
@@ -1656,9 +1706,15 @@ static int cnic_bnx2x_iscsi_ofld1(struct cnic_dev *dev, struct kwqe *wqes[], | |||
1656 | kcqe.iscsi_conn_id = l5_cid; | 1706 | kcqe.iscsi_conn_id = l5_cid; |
1657 | kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE; | 1707 | kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE; |
1658 | 1708 | ||
1709 | ctx = &cp->ctx_tbl[l5_cid]; | ||
1710 | if (test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags)) { | ||
1711 | kcqe.completion_status = | ||
1712 | ISCSI_KCQE_COMPLETION_STATUS_CID_BUSY; | ||
1713 | goto done; | ||
1714 | } | ||
1715 | |||
1659 | if (atomic_inc_return(&cp->iscsi_conn) > dev->max_iscsi_conn) { | 1716 | if (atomic_inc_return(&cp->iscsi_conn) > dev->max_iscsi_conn) { |
1660 | atomic_dec(&cp->iscsi_conn); | 1717 | atomic_dec(&cp->iscsi_conn); |
1661 | ret = 0; | ||
1662 | goto done; | 1718 | goto done; |
1663 | } | 1719 | } |
1664 | ret = cnic_alloc_bnx2x_conn_resc(dev, l5_cid); | 1720 | ret = cnic_alloc_bnx2x_conn_resc(dev, l5_cid); |
@@ -1708,25 +1764,14 @@ static int cnic_bnx2x_iscsi_update(struct cnic_dev *dev, struct kwqe *kwqe) | |||
1708 | return ret; | 1764 | return ret; |
1709 | } | 1765 | } |
1710 | 1766 | ||
1711 | static int cnic_bnx2x_iscsi_destroy(struct cnic_dev *dev, struct kwqe *kwqe) | 1767 | static int cnic_bnx2x_destroy_ramrod(struct cnic_dev *dev, u32 l5_cid) |
1712 | { | 1768 | { |
1713 | struct cnic_local *cp = dev->cnic_priv; | 1769 | struct cnic_local *cp = dev->cnic_priv; |
1714 | struct iscsi_kwqe_conn_destroy *req = | ||
1715 | (struct iscsi_kwqe_conn_destroy *) kwqe; | ||
1716 | union l5cm_specific_data l5_data; | ||
1717 | u32 l5_cid = req->reserved0; | ||
1718 | struct cnic_context *ctx = &cp->ctx_tbl[l5_cid]; | 1770 | struct cnic_context *ctx = &cp->ctx_tbl[l5_cid]; |
1719 | int ret = 0; | 1771 | union l5cm_specific_data l5_data; |
1720 | struct iscsi_kcqe kcqe; | 1772 | int ret; |
1721 | struct kcqe *cqes[1]; | ||
1722 | u32 hw_cid, type; | 1773 | u32 hw_cid, type; |
1723 | 1774 | ||
1724 | if (!(ctx->ctx_flags & CTX_FL_OFFLD_START)) | ||
1725 | goto skip_cfc_delete; | ||
1726 | |||
1727 | while (!time_after(jiffies, ctx->timestamp + (2 * HZ))) | ||
1728 | msleep(250); | ||
1729 | |||
1730 | init_waitqueue_head(&ctx->waitq); | 1775 | init_waitqueue_head(&ctx->waitq); |
1731 | ctx->wait_cond = 0; | 1776 | ctx->wait_cond = 0; |
1732 | memset(&l5_data, 0, sizeof(l5_data)); | 1777 | memset(&l5_data, 0, sizeof(l5_data)); |
@@ -1742,11 +1787,43 @@ static int cnic_bnx2x_iscsi_destroy(struct cnic_dev *dev, struct kwqe *kwqe) | |||
1742 | if (ret == 0) | 1787 | if (ret == 0) |
1743 | wait_event(ctx->waitq, ctx->wait_cond); | 1788 | wait_event(ctx->waitq, ctx->wait_cond); |
1744 | 1789 | ||
1790 | return ret; | ||
1791 | } | ||
1792 | |||
1793 | static int cnic_bnx2x_iscsi_destroy(struct cnic_dev *dev, struct kwqe *kwqe) | ||
1794 | { | ||
1795 | struct cnic_local *cp = dev->cnic_priv; | ||
1796 | struct iscsi_kwqe_conn_destroy *req = | ||
1797 | (struct iscsi_kwqe_conn_destroy *) kwqe; | ||
1798 | u32 l5_cid = req->reserved0; | ||
1799 | struct cnic_context *ctx = &cp->ctx_tbl[l5_cid]; | ||
1800 | int ret = 0; | ||
1801 | struct iscsi_kcqe kcqe; | ||
1802 | struct kcqe *cqes[1]; | ||
1803 | |||
1804 | if (!test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags)) | ||
1805 | goto skip_cfc_delete; | ||
1806 | |||
1807 | if (!time_after(jiffies, ctx->timestamp + (2 * HZ))) { | ||
1808 | unsigned long delta = ctx->timestamp + (2 * HZ) - jiffies; | ||
1809 | |||
1810 | if (delta > (2 * HZ)) | ||
1811 | delta = 0; | ||
1812 | |||
1813 | set_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags); | ||
1814 | queue_delayed_work(cnic_wq, &cp->delete_task, delta); | ||
1815 | goto destroy_reply; | ||
1816 | } | ||
1817 | |||
1818 | ret = cnic_bnx2x_destroy_ramrod(dev, l5_cid); | ||
1819 | |||
1745 | skip_cfc_delete: | 1820 | skip_cfc_delete: |
1746 | cnic_free_bnx2x_conn_resc(dev, l5_cid); | 1821 | cnic_free_bnx2x_conn_resc(dev, l5_cid); |
1747 | 1822 | ||
1748 | atomic_dec(&cp->iscsi_conn); | 1823 | atomic_dec(&cp->iscsi_conn); |
1824 | clear_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags); | ||
1749 | 1825 | ||
1826 | destroy_reply: | ||
1750 | memset(&kcqe, 0, sizeof(kcqe)); | 1827 | memset(&kcqe, 0, sizeof(kcqe)); |
1751 | kcqe.op_code = ISCSI_KCQE_OPCODE_DESTROY_CONN; | 1828 | kcqe.op_code = ISCSI_KCQE_OPCODE_DESTROY_CONN; |
1752 | kcqe.iscsi_conn_id = l5_cid; | 1829 | kcqe.iscsi_conn_id = l5_cid; |
@@ -1943,7 +2020,7 @@ static int cnic_bnx2x_connect(struct cnic_dev *dev, struct kwqe *wqes[], | |||
1943 | ret = cnic_submit_kwqe_16(dev, L5CM_RAMROD_CMD_ID_TCP_CONNECT, | 2020 | ret = cnic_submit_kwqe_16(dev, L5CM_RAMROD_CMD_ID_TCP_CONNECT, |
1944 | kwqe1->cid, ISCSI_CONNECTION_TYPE, &l5_data); | 2021 | kwqe1->cid, ISCSI_CONNECTION_TYPE, &l5_data); |
1945 | if (!ret) | 2022 | if (!ret) |
1946 | ctx->ctx_flags |= CTX_FL_OFFLD_START; | 2023 | set_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags); |
1947 | 2024 | ||
1948 | return ret; | 2025 | return ret; |
1949 | } | 2026 | } |
@@ -2179,8 +2256,9 @@ static int cnic_get_kcqes(struct cnic_dev *dev, struct kcq_info *info) | |||
2179 | static int cnic_l2_completion(struct cnic_local *cp) | 2256 | static int cnic_l2_completion(struct cnic_local *cp) |
2180 | { | 2257 | { |
2181 | u16 hw_cons, sw_cons; | 2258 | u16 hw_cons, sw_cons; |
2259 | struct cnic_uio_dev *udev = cp->udev; | ||
2182 | union eth_rx_cqe *cqe, *cqe_ring = (union eth_rx_cqe *) | 2260 | union eth_rx_cqe *cqe, *cqe_ring = (union eth_rx_cqe *) |
2183 | (cp->l2_ring + (2 * BCM_PAGE_SIZE)); | 2261 | (udev->l2_ring + (2 * BCM_PAGE_SIZE)); |
2184 | u32 cmd; | 2262 | u32 cmd; |
2185 | int comp = 0; | 2263 | int comp = 0; |
2186 | 2264 | ||
@@ -2226,7 +2304,8 @@ static void cnic_chk_pkt_rings(struct cnic_local *cp) | |||
2226 | cp->tx_cons = tx_cons; | 2304 | cp->tx_cons = tx_cons; |
2227 | cp->rx_cons = rx_cons; | 2305 | cp->rx_cons = rx_cons; |
2228 | 2306 | ||
2229 | uio_event_notify(cp->cnic_uinfo); | 2307 | if (cp->udev) |
2308 | uio_event_notify(&cp->udev->cnic_uinfo); | ||
2230 | } | 2309 | } |
2231 | if (comp) | 2310 | if (comp) |
2232 | clear_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags); | 2311 | clear_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags); |
@@ -2327,6 +2406,22 @@ static inline void cnic_ack_bnx2x_int(struct cnic_dev *dev, u8 id, u8 storm, | |||
2327 | CNIC_WR(dev, hc_addr, (*(u32 *)&igu_ack)); | 2406 | CNIC_WR(dev, hc_addr, (*(u32 *)&igu_ack)); |
2328 | } | 2407 | } |
2329 | 2408 | ||
2409 | static void cnic_ack_igu_sb(struct cnic_dev *dev, u8 igu_sb_id, u8 segment, | ||
2410 | u16 index, u8 op, u8 update) | ||
2411 | { | ||
2412 | struct igu_regular cmd_data; | ||
2413 | u32 igu_addr = BAR_IGU_INTMEM + (IGU_CMD_INT_ACK_BASE + igu_sb_id) * 8; | ||
2414 | |||
2415 | cmd_data.sb_id_and_flags = | ||
2416 | (index << IGU_REGULAR_SB_INDEX_SHIFT) | | ||
2417 | (segment << IGU_REGULAR_SEGMENT_ACCESS_SHIFT) | | ||
2418 | (update << IGU_REGULAR_BUPDATE_SHIFT) | | ||
2419 | (op << IGU_REGULAR_ENABLE_INT_SHIFT); | ||
2420 | |||
2421 | |||
2422 | CNIC_WR(dev, igu_addr, cmd_data.sb_id_and_flags); | ||
2423 | } | ||
2424 | |||
2330 | static void cnic_ack_bnx2x_msix(struct cnic_dev *dev) | 2425 | static void cnic_ack_bnx2x_msix(struct cnic_dev *dev) |
2331 | { | 2426 | { |
2332 | struct cnic_local *cp = dev->cnic_priv; | 2427 | struct cnic_local *cp = dev->cnic_priv; |
@@ -2335,6 +2430,14 @@ static void cnic_ack_bnx2x_msix(struct cnic_dev *dev) | |||
2335 | IGU_INT_DISABLE, 0); | 2430 | IGU_INT_DISABLE, 0); |
2336 | } | 2431 | } |
2337 | 2432 | ||
2433 | static void cnic_ack_bnx2x_e2_msix(struct cnic_dev *dev) | ||
2434 | { | ||
2435 | struct cnic_local *cp = dev->cnic_priv; | ||
2436 | |||
2437 | cnic_ack_igu_sb(dev, cp->bnx2x_igu_sb_id, IGU_SEG_ACCESS_DEF, 0, | ||
2438 | IGU_INT_DISABLE, 0); | ||
2439 | } | ||
2440 | |||
2338 | static u32 cnic_service_bnx2x_kcq(struct cnic_dev *dev, struct kcq_info *info) | 2441 | static u32 cnic_service_bnx2x_kcq(struct cnic_dev *dev, struct kcq_info *info) |
2339 | { | 2442 | { |
2340 | u32 last_status = *info->status_idx_ptr; | 2443 | u32 last_status = *info->status_idx_ptr; |
@@ -2366,8 +2469,12 @@ static void cnic_service_bnx2x_bh(unsigned long data) | |||
2366 | status_idx = cnic_service_bnx2x_kcq(dev, &cp->kcq1); | 2469 | status_idx = cnic_service_bnx2x_kcq(dev, &cp->kcq1); |
2367 | 2470 | ||
2368 | CNIC_WR16(dev, cp->kcq1.io_addr, cp->kcq1.sw_prod_idx + MAX_KCQ_IDX); | 2471 | CNIC_WR16(dev, cp->kcq1.io_addr, cp->kcq1.sw_prod_idx + MAX_KCQ_IDX); |
2369 | cnic_ack_bnx2x_int(dev, cp->bnx2x_igu_sb_id, USTORM_ID, | 2472 | if (BNX2X_CHIP_IS_E2(cp->chip_id)) |
2370 | status_idx, IGU_INT_ENABLE, 1); | 2473 | cnic_ack_igu_sb(dev, cp->bnx2x_igu_sb_id, IGU_SEG_ACCESS_DEF, |
2474 | status_idx, IGU_INT_ENABLE, 1); | ||
2475 | else | ||
2476 | cnic_ack_bnx2x_int(dev, cp->bnx2x_igu_sb_id, USTORM_ID, | ||
2477 | status_idx, IGU_INT_ENABLE, 1); | ||
2371 | } | 2478 | } |
2372 | 2479 | ||
2373 | static int cnic_service_bnx2x(void *data, void *status_blk) | 2480 | static int cnic_service_bnx2x(void *data, void *status_blk) |
@@ -2388,8 +2495,7 @@ static void cnic_ulp_stop(struct cnic_dev *dev) | |||
2388 | struct cnic_local *cp = dev->cnic_priv; | 2495 | struct cnic_local *cp = dev->cnic_priv; |
2389 | int if_type; | 2496 | int if_type; |
2390 | 2497 | ||
2391 | if (cp->cnic_uinfo) | 2498 | cnic_send_nlmsg(cp, ISCSI_KEVENT_IF_DOWN, NULL); |
2392 | cnic_send_nlmsg(cp, ISCSI_KEVENT_IF_DOWN, NULL); | ||
2393 | 2499 | ||
2394 | for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) { | 2500 | for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) { |
2395 | struct cnic_ulp_ops *ulp_ops; | 2501 | struct cnic_ulp_ops *ulp_ops; |
@@ -2737,6 +2843,13 @@ static int cnic_cm_create(struct cnic_dev *dev, int ulp_type, u32 cid, | |||
2737 | if (l5_cid >= MAX_CM_SK_TBL_SZ) | 2843 | if (l5_cid >= MAX_CM_SK_TBL_SZ) |
2738 | return -EINVAL; | 2844 | return -EINVAL; |
2739 | 2845 | ||
2846 | if (cp->ctx_tbl) { | ||
2847 | struct cnic_context *ctx = &cp->ctx_tbl[l5_cid]; | ||
2848 | |||
2849 | if (test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags)) | ||
2850 | return -EAGAIN; | ||
2851 | } | ||
2852 | |||
2740 | csk1 = &cp->csk_tbl[l5_cid]; | 2853 | csk1 = &cp->csk_tbl[l5_cid]; |
2741 | if (atomic_read(&csk1->ref_count)) | 2854 | if (atomic_read(&csk1->ref_count)) |
2742 | return -EAGAIN; | 2855 | return -EAGAIN; |
@@ -3288,6 +3401,32 @@ static void cnic_close_bnx2x_conn(struct cnic_sock *csk, u32 opcode) | |||
3288 | 3401 | ||
3289 | static void cnic_cm_stop_bnx2x_hw(struct cnic_dev *dev) | 3402 | static void cnic_cm_stop_bnx2x_hw(struct cnic_dev *dev) |
3290 | { | 3403 | { |
3404 | struct cnic_local *cp = dev->cnic_priv; | ||
3405 | int i; | ||
3406 | |||
3407 | if (!cp->ctx_tbl) | ||
3408 | return; | ||
3409 | |||
3410 | if (!netif_running(dev->netdev)) | ||
3411 | return; | ||
3412 | |||
3413 | for (i = 0; i < cp->max_cid_space; i++) { | ||
3414 | struct cnic_context *ctx = &cp->ctx_tbl[i]; | ||
3415 | |||
3416 | while (test_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags)) | ||
3417 | msleep(10); | ||
3418 | |||
3419 | if (test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags)) | ||
3420 | netdev_warn(dev->netdev, "CID %x not deleted\n", | ||
3421 | ctx->cid); | ||
3422 | } | ||
3423 | |||
3424 | cancel_delayed_work(&cp->delete_task); | ||
3425 | flush_workqueue(cnic_wq); | ||
3426 | |||
3427 | if (atomic_read(&cp->iscsi_conn) != 0) | ||
3428 | netdev_warn(dev->netdev, "%d iSCSI connections not destroyed\n", | ||
3429 | atomic_read(&cp->iscsi_conn)); | ||
3291 | } | 3430 | } |
3292 | 3431 | ||
3293 | static int cnic_cm_init_bnx2x_hw(struct cnic_dev *dev) | 3432 | static int cnic_cm_init_bnx2x_hw(struct cnic_dev *dev) |
@@ -3322,6 +3461,46 @@ static int cnic_cm_init_bnx2x_hw(struct cnic_dev *dev) | |||
3322 | return 0; | 3461 | return 0; |
3323 | } | 3462 | } |
3324 | 3463 | ||
3464 | static void cnic_delete_task(struct work_struct *work) | ||
3465 | { | ||
3466 | struct cnic_local *cp; | ||
3467 | struct cnic_dev *dev; | ||
3468 | u32 i; | ||
3469 | int need_resched = 0; | ||
3470 | |||
3471 | cp = container_of(work, struct cnic_local, delete_task.work); | ||
3472 | dev = cp->dev; | ||
3473 | |||
3474 | for (i = 0; i < cp->max_cid_space; i++) { | ||
3475 | struct cnic_context *ctx = &cp->ctx_tbl[i]; | ||
3476 | |||
3477 | if (!test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags) || | ||
3478 | !test_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags)) | ||
3479 | continue; | ||
3480 | |||
3481 | if (!time_after(jiffies, ctx->timestamp + (2 * HZ))) { | ||
3482 | need_resched = 1; | ||
3483 | continue; | ||
3484 | } | ||
3485 | |||
3486 | if (!test_and_clear_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags)) | ||
3487 | continue; | ||
3488 | |||
3489 | cnic_bnx2x_destroy_ramrod(dev, i); | ||
3490 | |||
3491 | cnic_free_bnx2x_conn_resc(dev, i); | ||
3492 | if (ctx->ulp_proto_id == CNIC_ULP_ISCSI) | ||
3493 | atomic_dec(&cp->iscsi_conn); | ||
3494 | |||
3495 | clear_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags); | ||
3496 | } | ||
3497 | |||
3498 | if (need_resched) | ||
3499 | queue_delayed_work(cnic_wq, &cp->delete_task, | ||
3500 | msecs_to_jiffies(10)); | ||
3501 | |||
3502 | } | ||
3503 | |||
3325 | static int cnic_cm_open(struct cnic_dev *dev) | 3504 | static int cnic_cm_open(struct cnic_dev *dev) |
3326 | { | 3505 | { |
3327 | struct cnic_local *cp = dev->cnic_priv; | 3506 | struct cnic_local *cp = dev->cnic_priv; |
@@ -3336,6 +3515,8 @@ static int cnic_cm_open(struct cnic_dev *dev) | |||
3336 | if (err) | 3515 | if (err) |
3337 | goto err_out; | 3516 | goto err_out; |
3338 | 3517 | ||
3518 | INIT_DELAYED_WORK(&cp->delete_task, cnic_delete_task); | ||
3519 | |||
3339 | dev->cm_create = cnic_cm_create; | 3520 | dev->cm_create = cnic_cm_create; |
3340 | dev->cm_destroy = cnic_cm_destroy; | 3521 | dev->cm_destroy = cnic_cm_destroy; |
3341 | dev->cm_connect = cnic_cm_connect; | 3522 | dev->cm_connect = cnic_cm_connect; |
@@ -3428,11 +3609,24 @@ static void cnic_free_irq(struct cnic_dev *dev) | |||
3428 | 3609 | ||
3429 | if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) { | 3610 | if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) { |
3430 | cp->disable_int_sync(dev); | 3611 | cp->disable_int_sync(dev); |
3431 | tasklet_disable(&cp->cnic_irq_task); | 3612 | tasklet_kill(&cp->cnic_irq_task); |
3432 | free_irq(ethdev->irq_arr[0].vector, dev); | 3613 | free_irq(ethdev->irq_arr[0].vector, dev); |
3433 | } | 3614 | } |
3434 | } | 3615 | } |
3435 | 3616 | ||
3617 | static int cnic_request_irq(struct cnic_dev *dev) | ||
3618 | { | ||
3619 | struct cnic_local *cp = dev->cnic_priv; | ||
3620 | struct cnic_eth_dev *ethdev = cp->ethdev; | ||
3621 | int err; | ||
3622 | |||
3623 | err = request_irq(ethdev->irq_arr[0].vector, cnic_irq, 0, "cnic", dev); | ||
3624 | if (err) | ||
3625 | tasklet_disable(&cp->cnic_irq_task); | ||
3626 | |||
3627 | return err; | ||
3628 | } | ||
3629 | |||
3436 | static int cnic_init_bnx2_irq(struct cnic_dev *dev) | 3630 | static int cnic_init_bnx2_irq(struct cnic_dev *dev) |
3437 | { | 3631 | { |
3438 | struct cnic_local *cp = dev->cnic_priv; | 3632 | struct cnic_local *cp = dev->cnic_priv; |
@@ -3453,12 +3647,10 @@ static int cnic_init_bnx2_irq(struct cnic_dev *dev) | |||
3453 | cp->last_status_idx = cp->status_blk.bnx2->status_idx; | 3647 | cp->last_status_idx = cp->status_blk.bnx2->status_idx; |
3454 | tasklet_init(&cp->cnic_irq_task, cnic_service_bnx2_msix, | 3648 | tasklet_init(&cp->cnic_irq_task, cnic_service_bnx2_msix, |
3455 | (unsigned long) dev); | 3649 | (unsigned long) dev); |
3456 | err = request_irq(ethdev->irq_arr[0].vector, cnic_irq, 0, | 3650 | err = cnic_request_irq(dev); |
3457 | "cnic", dev); | 3651 | if (err) |
3458 | if (err) { | ||
3459 | tasklet_disable(&cp->cnic_irq_task); | ||
3460 | return err; | 3652 | return err; |
3461 | } | 3653 | |
3462 | while (cp->status_blk.bnx2->status_completion_producer_index && | 3654 | while (cp->status_blk.bnx2->status_completion_producer_index && |
3463 | i < 10) { | 3655 | i < 10) { |
3464 | CNIC_WR(dev, BNX2_HC_COALESCE_NOW, | 3656 | CNIC_WR(dev, BNX2_HC_COALESCE_NOW, |
@@ -3525,11 +3717,12 @@ static void cnic_init_bnx2_tx_ring(struct cnic_dev *dev) | |||
3525 | { | 3717 | { |
3526 | struct cnic_local *cp = dev->cnic_priv; | 3718 | struct cnic_local *cp = dev->cnic_priv; |
3527 | struct cnic_eth_dev *ethdev = cp->ethdev; | 3719 | struct cnic_eth_dev *ethdev = cp->ethdev; |
3720 | struct cnic_uio_dev *udev = cp->udev; | ||
3528 | u32 cid_addr, tx_cid, sb_id; | 3721 | u32 cid_addr, tx_cid, sb_id; |
3529 | u32 val, offset0, offset1, offset2, offset3; | 3722 | u32 val, offset0, offset1, offset2, offset3; |
3530 | int i; | 3723 | int i; |
3531 | struct tx_bd *txbd; | 3724 | struct tx_bd *txbd; |
3532 | dma_addr_t buf_map; | 3725 | dma_addr_t buf_map, ring_map = udev->l2_ring_map; |
3533 | struct status_block *s_blk = cp->status_blk.gen; | 3726 | struct status_block *s_blk = cp->status_blk.gen; |
3534 | 3727 | ||
3535 | sb_id = cp->status_blk_num; | 3728 | sb_id = cp->status_blk_num; |
@@ -3571,18 +3764,18 @@ static void cnic_init_bnx2_tx_ring(struct cnic_dev *dev) | |||
3571 | val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16); | 3764 | val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16); |
3572 | cnic_ctx_wr(dev, cid_addr, offset1, val); | 3765 | cnic_ctx_wr(dev, cid_addr, offset1, val); |
3573 | 3766 | ||
3574 | txbd = (struct tx_bd *) cp->l2_ring; | 3767 | txbd = (struct tx_bd *) udev->l2_ring; |
3575 | 3768 | ||
3576 | buf_map = cp->l2_buf_map; | 3769 | buf_map = udev->l2_buf_map; |
3577 | for (i = 0; i < MAX_TX_DESC_CNT; i++, txbd++) { | 3770 | for (i = 0; i < MAX_TX_DESC_CNT; i++, txbd++) { |
3578 | txbd->tx_bd_haddr_hi = (u64) buf_map >> 32; | 3771 | txbd->tx_bd_haddr_hi = (u64) buf_map >> 32; |
3579 | txbd->tx_bd_haddr_lo = (u64) buf_map & 0xffffffff; | 3772 | txbd->tx_bd_haddr_lo = (u64) buf_map & 0xffffffff; |
3580 | } | 3773 | } |
3581 | val = (u64) cp->l2_ring_map >> 32; | 3774 | val = (u64) ring_map >> 32; |
3582 | cnic_ctx_wr(dev, cid_addr, offset2, val); | 3775 | cnic_ctx_wr(dev, cid_addr, offset2, val); |
3583 | txbd->tx_bd_haddr_hi = val; | 3776 | txbd->tx_bd_haddr_hi = val; |
3584 | 3777 | ||
3585 | val = (u64) cp->l2_ring_map & 0xffffffff; | 3778 | val = (u64) ring_map & 0xffffffff; |
3586 | cnic_ctx_wr(dev, cid_addr, offset3, val); | 3779 | cnic_ctx_wr(dev, cid_addr, offset3, val); |
3587 | txbd->tx_bd_haddr_lo = val; | 3780 | txbd->tx_bd_haddr_lo = val; |
3588 | } | 3781 | } |
@@ -3591,10 +3784,12 @@ static void cnic_init_bnx2_rx_ring(struct cnic_dev *dev) | |||
3591 | { | 3784 | { |
3592 | struct cnic_local *cp = dev->cnic_priv; | 3785 | struct cnic_local *cp = dev->cnic_priv; |
3593 | struct cnic_eth_dev *ethdev = cp->ethdev; | 3786 | struct cnic_eth_dev *ethdev = cp->ethdev; |
3787 | struct cnic_uio_dev *udev = cp->udev; | ||
3594 | u32 cid_addr, sb_id, val, coal_reg, coal_val; | 3788 | u32 cid_addr, sb_id, val, coal_reg, coal_val; |
3595 | int i; | 3789 | int i; |
3596 | struct rx_bd *rxbd; | 3790 | struct rx_bd *rxbd; |
3597 | struct status_block *s_blk = cp->status_blk.gen; | 3791 | struct status_block *s_blk = cp->status_blk.gen; |
3792 | dma_addr_t ring_map = udev->l2_ring_map; | ||
3598 | 3793 | ||
3599 | sb_id = cp->status_blk_num; | 3794 | sb_id = cp->status_blk_num; |
3600 | cnic_init_context(dev, 2); | 3795 | cnic_init_context(dev, 2); |
@@ -3628,22 +3823,22 @@ static void cnic_init_bnx2_rx_ring(struct cnic_dev *dev) | |||
3628 | val = BNX2_L2CTX_L2_STATUSB_NUM(sb_id); | 3823 | val = BNX2_L2CTX_L2_STATUSB_NUM(sb_id); |
3629 | cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_HOST_BDIDX, val); | 3824 | cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_HOST_BDIDX, val); |
3630 | 3825 | ||
3631 | rxbd = (struct rx_bd *) (cp->l2_ring + BCM_PAGE_SIZE); | 3826 | rxbd = (struct rx_bd *) (udev->l2_ring + BCM_PAGE_SIZE); |
3632 | for (i = 0; i < MAX_RX_DESC_CNT; i++, rxbd++) { | 3827 | for (i = 0; i < MAX_RX_DESC_CNT; i++, rxbd++) { |
3633 | dma_addr_t buf_map; | 3828 | dma_addr_t buf_map; |
3634 | int n = (i % cp->l2_rx_ring_size) + 1; | 3829 | int n = (i % cp->l2_rx_ring_size) + 1; |
3635 | 3830 | ||
3636 | buf_map = cp->l2_buf_map + (n * cp->l2_single_buf_size); | 3831 | buf_map = udev->l2_buf_map + (n * cp->l2_single_buf_size); |
3637 | rxbd->rx_bd_len = cp->l2_single_buf_size; | 3832 | rxbd->rx_bd_len = cp->l2_single_buf_size; |
3638 | rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END; | 3833 | rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END; |
3639 | rxbd->rx_bd_haddr_hi = (u64) buf_map >> 32; | 3834 | rxbd->rx_bd_haddr_hi = (u64) buf_map >> 32; |
3640 | rxbd->rx_bd_haddr_lo = (u64) buf_map & 0xffffffff; | 3835 | rxbd->rx_bd_haddr_lo = (u64) buf_map & 0xffffffff; |
3641 | } | 3836 | } |
3642 | val = (u64) (cp->l2_ring_map + BCM_PAGE_SIZE) >> 32; | 3837 | val = (u64) (ring_map + BCM_PAGE_SIZE) >> 32; |
3643 | cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val); | 3838 | cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val); |
3644 | rxbd->rx_bd_haddr_hi = val; | 3839 | rxbd->rx_bd_haddr_hi = val; |
3645 | 3840 | ||
3646 | val = (u64) (cp->l2_ring_map + BCM_PAGE_SIZE) & 0xffffffff; | 3841 | val = (u64) (ring_map + BCM_PAGE_SIZE) & 0xffffffff; |
3647 | cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val); | 3842 | cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val); |
3648 | rxbd->rx_bd_haddr_lo = val; | 3843 | rxbd->rx_bd_haddr_lo = val; |
3649 | 3844 | ||
@@ -3860,12 +4055,9 @@ static int cnic_init_bnx2x_irq(struct cnic_dev *dev) | |||
3860 | 4055 | ||
3861 | tasklet_init(&cp->cnic_irq_task, cnic_service_bnx2x_bh, | 4056 | tasklet_init(&cp->cnic_irq_task, cnic_service_bnx2x_bh, |
3862 | (unsigned long) dev); | 4057 | (unsigned long) dev); |
3863 | if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) { | 4058 | if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) |
3864 | err = request_irq(ethdev->irq_arr[0].vector, cnic_irq, 0, | 4059 | err = cnic_request_irq(dev); |
3865 | "cnic", dev); | 4060 | |
3866 | if (err) | ||
3867 | tasklet_disable(&cp->cnic_irq_task); | ||
3868 | } | ||
3869 | return err; | 4061 | return err; |
3870 | } | 4062 | } |
3871 | 4063 | ||
@@ -3908,8 +4100,9 @@ static void cnic_init_bnx2x_tx_ring(struct cnic_dev *dev, | |||
3908 | struct client_init_ramrod_data *data) | 4100 | struct client_init_ramrod_data *data) |
3909 | { | 4101 | { |
3910 | struct cnic_local *cp = dev->cnic_priv; | 4102 | struct cnic_local *cp = dev->cnic_priv; |
3911 | union eth_tx_bd_types *txbd = (union eth_tx_bd_types *) cp->l2_ring; | 4103 | struct cnic_uio_dev *udev = cp->udev; |
3912 | dma_addr_t buf_map, ring_map = cp->l2_ring_map; | 4104 | union eth_tx_bd_types *txbd = (union eth_tx_bd_types *) udev->l2_ring; |
4105 | dma_addr_t buf_map, ring_map = udev->l2_ring_map; | ||
3913 | struct host_sp_status_block *sb = cp->bnx2x_def_status_blk; | 4106 | struct host_sp_status_block *sb = cp->bnx2x_def_status_blk; |
3914 | int port = CNIC_PORT(cp); | 4107 | int port = CNIC_PORT(cp); |
3915 | int i; | 4108 | int i; |
@@ -3918,7 +4111,7 @@ static void cnic_init_bnx2x_tx_ring(struct cnic_dev *dev, | |||
3918 | 4111 | ||
3919 | memset(txbd, 0, BCM_PAGE_SIZE); | 4112 | memset(txbd, 0, BCM_PAGE_SIZE); |
3920 | 4113 | ||
3921 | buf_map = cp->l2_buf_map; | 4114 | buf_map = udev->l2_buf_map; |
3922 | for (i = 0; i < MAX_TX_DESC_CNT; i += 3, txbd += 3) { | 4115 | for (i = 0; i < MAX_TX_DESC_CNT; i += 3, txbd += 3) { |
3923 | struct eth_tx_start_bd *start_bd = &txbd->start_bd; | 4116 | struct eth_tx_start_bd *start_bd = &txbd->start_bd; |
3924 | struct eth_tx_bd *reg_bd = &((txbd + 2)->reg_bd); | 4117 | struct eth_tx_bd *reg_bd = &((txbd + 2)->reg_bd); |
@@ -3966,17 +4159,18 @@ static void cnic_init_bnx2x_rx_ring(struct cnic_dev *dev, | |||
3966 | struct client_init_ramrod_data *data) | 4159 | struct client_init_ramrod_data *data) |
3967 | { | 4160 | { |
3968 | struct cnic_local *cp = dev->cnic_priv; | 4161 | struct cnic_local *cp = dev->cnic_priv; |
3969 | struct eth_rx_bd *rxbd = (struct eth_rx_bd *) (cp->l2_ring + | 4162 | struct cnic_uio_dev *udev = cp->udev; |
4163 | struct eth_rx_bd *rxbd = (struct eth_rx_bd *) (udev->l2_ring + | ||
3970 | BCM_PAGE_SIZE); | 4164 | BCM_PAGE_SIZE); |
3971 | struct eth_rx_cqe_next_page *rxcqe = (struct eth_rx_cqe_next_page *) | 4165 | struct eth_rx_cqe_next_page *rxcqe = (struct eth_rx_cqe_next_page *) |
3972 | (cp->l2_ring + (2 * BCM_PAGE_SIZE)); | 4166 | (udev->l2_ring + (2 * BCM_PAGE_SIZE)); |
3973 | struct host_sp_status_block *sb = cp->bnx2x_def_status_blk; | 4167 | struct host_sp_status_block *sb = cp->bnx2x_def_status_blk; |
3974 | int i; | 4168 | int i; |
3975 | int port = CNIC_PORT(cp); | 4169 | int port = CNIC_PORT(cp); |
3976 | int cli = BNX2X_ISCSI_CL_ID(CNIC_E1HVN(cp)); | 4170 | int cli = BNX2X_ISCSI_CL_ID(CNIC_E1HVN(cp)); |
3977 | int cl_qzone_id = BNX2X_CL_QZONE_ID(cp, cli); | 4171 | int cl_qzone_id = BNX2X_CL_QZONE_ID(cp, cli); |
3978 | u32 val; | 4172 | u32 val; |
3979 | dma_addr_t ring_map = cp->l2_ring_map; | 4173 | dma_addr_t ring_map = udev->l2_ring_map; |
3980 | 4174 | ||
3981 | /* General data */ | 4175 | /* General data */ |
3982 | data->general.client_id = cli; | 4176 | data->general.client_id = cli; |
@@ -3989,7 +4183,7 @@ static void cnic_init_bnx2x_rx_ring(struct cnic_dev *dev, | |||
3989 | dma_addr_t buf_map; | 4183 | dma_addr_t buf_map; |
3990 | int n = (i % cp->l2_rx_ring_size) + 1; | 4184 | int n = (i % cp->l2_rx_ring_size) + 1; |
3991 | 4185 | ||
3992 | buf_map = cp->l2_buf_map + (n * cp->l2_single_buf_size); | 4186 | buf_map = udev->l2_buf_map + (n * cp->l2_single_buf_size); |
3993 | rxbd->addr_hi = cpu_to_le32((u64) buf_map >> 32); | 4187 | rxbd->addr_hi = cpu_to_le32((u64) buf_map >> 32); |
3994 | rxbd->addr_lo = cpu_to_le32(buf_map & 0xffffffff); | 4188 | rxbd->addr_lo = cpu_to_le32(buf_map & 0xffffffff); |
3995 | } | 4189 | } |
@@ -4042,7 +4236,7 @@ static void cnic_init_bnx2x_rx_ring(struct cnic_dev *dev, | |||
4042 | static void cnic_get_bnx2x_iscsi_info(struct cnic_dev *dev) | 4236 | static void cnic_get_bnx2x_iscsi_info(struct cnic_dev *dev) |
4043 | { | 4237 | { |
4044 | struct cnic_local *cp = dev->cnic_priv; | 4238 | struct cnic_local *cp = dev->cnic_priv; |
4045 | u32 base, addr, val; | 4239 | u32 base, base2, addr, val; |
4046 | int port = CNIC_PORT(cp); | 4240 | int port = CNIC_PORT(cp); |
4047 | 4241 | ||
4048 | dev->max_iscsi_conn = 0; | 4242 | dev->max_iscsi_conn = 0; |
@@ -4050,6 +4244,8 @@ static void cnic_get_bnx2x_iscsi_info(struct cnic_dev *dev) | |||
4050 | if (base == 0) | 4244 | if (base == 0) |
4051 | return; | 4245 | return; |
4052 | 4246 | ||
4247 | base2 = CNIC_RD(dev, (CNIC_PATH(cp) ? MISC_REG_GENERIC_CR_1 : | ||
4248 | MISC_REG_GENERIC_CR_0)); | ||
4053 | addr = BNX2X_SHMEM_ADDR(base, | 4249 | addr = BNX2X_SHMEM_ADDR(base, |
4054 | dev_info.port_hw_config[port].iscsi_mac_upper); | 4250 | dev_info.port_hw_config[port].iscsi_mac_upper); |
4055 | 4251 | ||
@@ -4082,11 +4278,15 @@ static void cnic_get_bnx2x_iscsi_info(struct cnic_dev *dev) | |||
4082 | val16 ^= 0x1e1e; | 4278 | val16 ^= 0x1e1e; |
4083 | dev->max_iscsi_conn = val16; | 4279 | dev->max_iscsi_conn = val16; |
4084 | } | 4280 | } |
4085 | if (BNX2X_CHIP_IS_E1H(cp->chip_id)) { | 4281 | if (BNX2X_CHIP_IS_E1H(cp->chip_id) || BNX2X_CHIP_IS_E2(cp->chip_id)) { |
4086 | int func = CNIC_FUNC(cp); | 4282 | int func = CNIC_FUNC(cp); |
4087 | u32 mf_cfg_addr; | 4283 | u32 mf_cfg_addr; |
4088 | 4284 | ||
4089 | mf_cfg_addr = base + BNX2X_SHMEM_MF_BLK_OFFSET; | 4285 | if (BNX2X_SHMEM2_HAS(base2, mf_cfg_addr)) |
4286 | mf_cfg_addr = CNIC_RD(dev, BNX2X_SHMEM2_ADDR(base2, | ||
4287 | mf_cfg_addr)); | ||
4288 | else | ||
4289 | mf_cfg_addr = base + BNX2X_SHMEM_MF_BLK_OFFSET; | ||
4090 | 4290 | ||
4091 | addr = mf_cfg_addr + | 4291 | addr = mf_cfg_addr + |
4092 | offsetof(struct mf_cfg, func_mf_config[func].e1hov_tag); | 4292 | offsetof(struct mf_cfg, func_mf_config[func].e1hov_tag); |
@@ -4111,9 +4311,22 @@ static int cnic_start_bnx2x_hw(struct cnic_dev *dev) | |||
4111 | struct cnic_eth_dev *ethdev = cp->ethdev; | 4311 | struct cnic_eth_dev *ethdev = cp->ethdev; |
4112 | int func = CNIC_FUNC(cp), ret, i; | 4312 | int func = CNIC_FUNC(cp), ret, i; |
4113 | u32 pfid; | 4313 | u32 pfid; |
4114 | struct host_hc_status_block_e1x *sb = cp->status_blk.gen; | ||
4115 | 4314 | ||
4116 | cp->pfid = func; | 4315 | if (BNX2X_CHIP_IS_E2(cp->chip_id)) { |
4316 | u32 val = CNIC_RD(dev, MISC_REG_PORT4MODE_EN_OVWR); | ||
4317 | |||
4318 | if (!(val & 1)) | ||
4319 | val = CNIC_RD(dev, MISC_REG_PORT4MODE_EN); | ||
4320 | else | ||
4321 | val = (val >> 1) & 1; | ||
4322 | |||
4323 | if (val) | ||
4324 | cp->pfid = func >> 1; | ||
4325 | else | ||
4326 | cp->pfid = func & 0x6; | ||
4327 | } else { | ||
4328 | cp->pfid = func; | ||
4329 | } | ||
4117 | pfid = cp->pfid; | 4330 | pfid = cp->pfid; |
4118 | 4331 | ||
4119 | ret = cnic_init_id_tbl(&cp->cid_tbl, MAX_ISCSI_TBL_SZ, | 4332 | ret = cnic_init_id_tbl(&cp->cid_tbl, MAX_ISCSI_TBL_SZ, |
@@ -4128,10 +4341,21 @@ static int cnic_start_bnx2x_hw(struct cnic_dev *dev) | |||
4128 | CSTORM_ISCSI_EQ_PROD_OFFSET(pfid, 0); | 4341 | CSTORM_ISCSI_EQ_PROD_OFFSET(pfid, 0); |
4129 | cp->kcq1.sw_prod_idx = 0; | 4342 | cp->kcq1.sw_prod_idx = 0; |
4130 | 4343 | ||
4131 | cp->kcq1.hw_prod_idx_ptr = | 4344 | if (BNX2X_CHIP_IS_E2(cp->chip_id)) { |
4132 | &sb->sb.index_values[HC_INDEX_ISCSI_EQ_CONS]; | 4345 | struct host_hc_status_block_e2 *sb = cp->status_blk.gen; |
4133 | cp->kcq1.status_idx_ptr = | 4346 | |
4134 | &sb->sb.running_index[SM_RX_ID]; | 4347 | cp->kcq1.hw_prod_idx_ptr = |
4348 | &sb->sb.index_values[HC_INDEX_ISCSI_EQ_CONS]; | ||
4349 | cp->kcq1.status_idx_ptr = | ||
4350 | &sb->sb.running_index[SM_RX_ID]; | ||
4351 | } else { | ||
4352 | struct host_hc_status_block_e1x *sb = cp->status_blk.gen; | ||
4353 | |||
4354 | cp->kcq1.hw_prod_idx_ptr = | ||
4355 | &sb->sb.index_values[HC_INDEX_ISCSI_EQ_CONS]; | ||
4356 | cp->kcq1.status_idx_ptr = | ||
4357 | &sb->sb.running_index[SM_RX_ID]; | ||
4358 | } | ||
4135 | 4359 | ||
4136 | cnic_get_bnx2x_iscsi_info(dev); | 4360 | cnic_get_bnx2x_iscsi_info(dev); |
4137 | 4361 | ||
@@ -4190,6 +4414,7 @@ static int cnic_start_bnx2x_hw(struct cnic_dev *dev) | |||
4190 | static void cnic_init_rings(struct cnic_dev *dev) | 4414 | static void cnic_init_rings(struct cnic_dev *dev) |
4191 | { | 4415 | { |
4192 | struct cnic_local *cp = dev->cnic_priv; | 4416 | struct cnic_local *cp = dev->cnic_priv; |
4417 | struct cnic_uio_dev *udev = cp->udev; | ||
4193 | 4418 | ||
4194 | if (test_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags)) | 4419 | if (test_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags)) |
4195 | return; | 4420 | return; |
@@ -4213,22 +4438,24 @@ static void cnic_init_rings(struct cnic_dev *dev) | |||
4213 | cl_qzone_id = BNX2X_CL_QZONE_ID(cp, cli); | 4438 | cl_qzone_id = BNX2X_CL_QZONE_ID(cp, cli); |
4214 | 4439 | ||
4215 | off = BAR_USTRORM_INTMEM + | 4440 | off = BAR_USTRORM_INTMEM + |
4216 | USTORM_RX_PRODS_E1X_OFFSET(CNIC_PORT(cp), cli); | 4441 | (BNX2X_CHIP_IS_E2(cp->chip_id) ? |
4442 | USTORM_RX_PRODS_E2_OFFSET(cl_qzone_id) : | ||
4443 | USTORM_RX_PRODS_E1X_OFFSET(CNIC_PORT(cp), cli)); | ||
4217 | 4444 | ||
4218 | for (i = 0; i < sizeof(struct ustorm_eth_rx_producers) / 4; i++) | 4445 | for (i = 0; i < sizeof(struct ustorm_eth_rx_producers) / 4; i++) |
4219 | CNIC_WR(dev, off + i * 4, ((u32 *) &rx_prods)[i]); | 4446 | CNIC_WR(dev, off + i * 4, ((u32 *) &rx_prods)[i]); |
4220 | 4447 | ||
4221 | set_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags); | 4448 | set_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags); |
4222 | 4449 | ||
4223 | data = cp->l2_buf; | 4450 | data = udev->l2_buf; |
4224 | 4451 | ||
4225 | memset(data, 0, sizeof(*data)); | 4452 | memset(data, 0, sizeof(*data)); |
4226 | 4453 | ||
4227 | cnic_init_bnx2x_tx_ring(dev, data); | 4454 | cnic_init_bnx2x_tx_ring(dev, data); |
4228 | cnic_init_bnx2x_rx_ring(dev, data); | 4455 | cnic_init_bnx2x_rx_ring(dev, data); |
4229 | 4456 | ||
4230 | l5_data.phy_address.lo = cp->l2_buf_map & 0xffffffff; | 4457 | l5_data.phy_address.lo = udev->l2_buf_map & 0xffffffff; |
4231 | l5_data.phy_address.hi = (u64) cp->l2_buf_map >> 32; | 4458 | l5_data.phy_address.hi = (u64) udev->l2_buf_map >> 32; |
4232 | 4459 | ||
4233 | type = (ETH_CONNECTION_TYPE << SPE_HDR_CONN_TYPE_SHIFT) | 4460 | type = (ETH_CONNECTION_TYPE << SPE_HDR_CONN_TYPE_SHIFT) |
4234 | & SPE_HDR_CONN_TYPE; | 4461 | & SPE_HDR_CONN_TYPE; |
@@ -4339,7 +4566,6 @@ static int cnic_start_hw(struct cnic_dev *dev) | |||
4339 | return -EALREADY; | 4566 | return -EALREADY; |
4340 | 4567 | ||
4341 | dev->regview = ethdev->io_base; | 4568 | dev->regview = ethdev->io_base; |
4342 | cp->chip_id = ethdev->chip_id; | ||
4343 | pci_dev_get(dev->pcidev); | 4569 | pci_dev_get(dev->pcidev); |
4344 | cp->func = PCI_FUNC(dev->pcidev->devfn); | 4570 | cp->func = PCI_FUNC(dev->pcidev->devfn); |
4345 | cp->status_blk.gen = ethdev->irq_arr[0].status_blk; | 4571 | cp->status_blk.gen = ethdev->irq_arr[0].status_blk; |
@@ -4409,10 +4635,11 @@ static void cnic_stop_hw(struct cnic_dev *dev) | |||
4409 | /* Need to wait for the ring shutdown event to complete | 4635 | /* Need to wait for the ring shutdown event to complete |
4410 | * before clearing the CNIC_UP flag. | 4636 | * before clearing the CNIC_UP flag. |
4411 | */ | 4637 | */ |
4412 | while (cp->uio_dev != -1 && i < 15) { | 4638 | while (cp->udev->uio_dev != -1 && i < 15) { |
4413 | msleep(100); | 4639 | msleep(100); |
4414 | i++; | 4640 | i++; |
4415 | } | 4641 | } |
4642 | cnic_shutdown_rings(dev); | ||
4416 | clear_bit(CNIC_F_CNIC_UP, &dev->flags); | 4643 | clear_bit(CNIC_F_CNIC_UP, &dev->flags); |
4417 | rcu_assign_pointer(cp->ulp_ops[CNIC_ULP_L4], NULL); | 4644 | rcu_assign_pointer(cp->ulp_ops[CNIC_ULP_L4], NULL); |
4418 | synchronize_rcu(); | 4645 | synchronize_rcu(); |
@@ -4461,7 +4688,6 @@ static struct cnic_dev *cnic_alloc_dev(struct net_device *dev, | |||
4461 | 4688 | ||
4462 | cp = cdev->cnic_priv; | 4689 | cp = cdev->cnic_priv; |
4463 | cp->dev = cdev; | 4690 | cp->dev = cdev; |
4464 | cp->uio_dev = -1; | ||
4465 | cp->l2_single_buf_size = 0x400; | 4691 | cp->l2_single_buf_size = 0x400; |
4466 | cp->l2_rx_ring_size = 3; | 4692 | cp->l2_rx_ring_size = 3; |
4467 | 4693 | ||
@@ -4516,6 +4742,7 @@ static struct cnic_dev *init_bnx2_cnic(struct net_device *dev) | |||
4516 | cp = cdev->cnic_priv; | 4742 | cp = cdev->cnic_priv; |
4517 | cp->ethdev = ethdev; | 4743 | cp->ethdev = ethdev; |
4518 | cdev->pcidev = pdev; | 4744 | cdev->pcidev = pdev; |
4745 | cp->chip_id = ethdev->chip_id; | ||
4519 | 4746 | ||
4520 | cp->cnic_ops = &cnic_bnx2_ops; | 4747 | cp->cnic_ops = &cnic_bnx2_ops; |
4521 | cp->start_hw = cnic_start_bnx2_hw; | 4748 | cp->start_hw = cnic_start_bnx2_hw; |
@@ -4570,6 +4797,7 @@ static struct cnic_dev *init_bnx2x_cnic(struct net_device *dev) | |||
4570 | cp = cdev->cnic_priv; | 4797 | cp = cdev->cnic_priv; |
4571 | cp->ethdev = ethdev; | 4798 | cp->ethdev = ethdev; |
4572 | cdev->pcidev = pdev; | 4799 | cdev->pcidev = pdev; |
4800 | cp->chip_id = ethdev->chip_id; | ||
4573 | 4801 | ||
4574 | cp->cnic_ops = &cnic_bnx2x_ops; | 4802 | cp->cnic_ops = &cnic_bnx2x_ops; |
4575 | cp->start_hw = cnic_start_bnx2x_hw; | 4803 | cp->start_hw = cnic_start_bnx2x_hw; |
@@ -4581,7 +4809,10 @@ static struct cnic_dev *init_bnx2x_cnic(struct net_device *dev) | |||
4581 | cp->stop_cm = cnic_cm_stop_bnx2x_hw; | 4809 | cp->stop_cm = cnic_cm_stop_bnx2x_hw; |
4582 | cp->enable_int = cnic_enable_bnx2x_int; | 4810 | cp->enable_int = cnic_enable_bnx2x_int; |
4583 | cp->disable_int_sync = cnic_disable_bnx2x_int_sync; | 4811 | cp->disable_int_sync = cnic_disable_bnx2x_int_sync; |
4584 | cp->ack_int = cnic_ack_bnx2x_msix; | 4812 | if (BNX2X_CHIP_IS_E2(cp->chip_id)) |
4813 | cp->ack_int = cnic_ack_bnx2x_e2_msix; | ||
4814 | else | ||
4815 | cp->ack_int = cnic_ack_bnx2x_msix; | ||
4585 | cp->close_conn = cnic_close_bnx2x_conn; | 4816 | cp->close_conn = cnic_close_bnx2x_conn; |
4586 | cp->next_idx = cnic_bnx2x_next_idx; | 4817 | cp->next_idx = cnic_bnx2x_next_idx; |
4587 | cp->hw_idx = cnic_bnx2x_hw_idx; | 4818 | cp->hw_idx = cnic_bnx2x_hw_idx; |
@@ -4689,6 +4920,7 @@ static struct notifier_block cnic_netdev_notifier = { | |||
4689 | static void cnic_release(void) | 4920 | static void cnic_release(void) |
4690 | { | 4921 | { |
4691 | struct cnic_dev *dev; | 4922 | struct cnic_dev *dev; |
4923 | struct cnic_uio_dev *udev; | ||
4692 | 4924 | ||
4693 | while (!list_empty(&cnic_dev_list)) { | 4925 | while (!list_empty(&cnic_dev_list)) { |
4694 | dev = list_entry(cnic_dev_list.next, struct cnic_dev, list); | 4926 | dev = list_entry(cnic_dev_list.next, struct cnic_dev, list); |
@@ -4702,6 +4934,11 @@ static void cnic_release(void) | |||
4702 | list_del_init(&dev->list); | 4934 | list_del_init(&dev->list); |
4703 | cnic_free_dev(dev); | 4935 | cnic_free_dev(dev); |
4704 | } | 4936 | } |
4937 | while (!list_empty(&cnic_udev_list)) { | ||
4938 | udev = list_entry(cnic_udev_list.next, struct cnic_uio_dev, | ||
4939 | list); | ||
4940 | cnic_free_uio(udev); | ||
4941 | } | ||
4705 | } | 4942 | } |
4706 | 4943 | ||
4707 | static int __init cnic_init(void) | 4944 | static int __init cnic_init(void) |
@@ -4716,6 +4953,13 @@ static int __init cnic_init(void) | |||
4716 | return rc; | 4953 | return rc; |
4717 | } | 4954 | } |
4718 | 4955 | ||
4956 | cnic_wq = create_singlethread_workqueue("cnic_wq"); | ||
4957 | if (!cnic_wq) { | ||
4958 | cnic_release(); | ||
4959 | unregister_netdevice_notifier(&cnic_netdev_notifier); | ||
4960 | return -ENOMEM; | ||
4961 | } | ||
4962 | |||
4719 | return 0; | 4963 | return 0; |
4720 | } | 4964 | } |
4721 | 4965 | ||
@@ -4723,6 +4967,7 @@ static void __exit cnic_exit(void) | |||
4723 | { | 4967 | { |
4724 | unregister_netdevice_notifier(&cnic_netdev_notifier); | 4968 | unregister_netdevice_notifier(&cnic_netdev_notifier); |
4725 | cnic_release(); | 4969 | cnic_release(); |
4970 | destroy_workqueue(cnic_wq); | ||
4726 | } | 4971 | } |
4727 | 4972 | ||
4728 | module_init(cnic_init); | 4973 | module_init(cnic_init); |
diff --git a/drivers/net/cnic.h b/drivers/net/cnic.h index 676d008509c6..6a4a0ae5cfe3 100644 --- a/drivers/net/cnic.h +++ b/drivers/net/cnic.h | |||
@@ -168,8 +168,9 @@ struct cnic_context { | |||
168 | wait_queue_head_t waitq; | 168 | wait_queue_head_t waitq; |
169 | int wait_cond; | 169 | int wait_cond; |
170 | unsigned long timestamp; | 170 | unsigned long timestamp; |
171 | u32 ctx_flags; | 171 | unsigned long ctx_flags; |
172 | #define CTX_FL_OFFLD_START 0x00000001 | 172 | #define CTX_FL_OFFLD_START 0 |
173 | #define CTX_FL_DELETE_WAIT 1 | ||
173 | u8 ulp_proto_id; | 174 | u8 ulp_proto_id; |
174 | union { | 175 | union { |
175 | struct cnic_iscsi *iscsi; | 176 | struct cnic_iscsi *iscsi; |
@@ -194,6 +195,23 @@ struct iro { | |||
194 | u16 size; | 195 | u16 size; |
195 | }; | 196 | }; |
196 | 197 | ||
198 | struct cnic_uio_dev { | ||
199 | struct uio_info cnic_uinfo; | ||
200 | u32 uio_dev; | ||
201 | |||
202 | int l2_ring_size; | ||
203 | void *l2_ring; | ||
204 | dma_addr_t l2_ring_map; | ||
205 | |||
206 | int l2_buf_size; | ||
207 | void *l2_buf; | ||
208 | dma_addr_t l2_buf_map; | ||
209 | |||
210 | struct cnic_dev *dev; | ||
211 | struct pci_dev *pdev; | ||
212 | struct list_head list; | ||
213 | }; | ||
214 | |||
197 | struct cnic_local { | 215 | struct cnic_local { |
198 | 216 | ||
199 | spinlock_t cnic_ulp_lock; | 217 | spinlock_t cnic_ulp_lock; |
@@ -213,14 +231,9 @@ struct cnic_local { | |||
213 | 231 | ||
214 | struct cnic_eth_dev *ethdev; | 232 | struct cnic_eth_dev *ethdev; |
215 | 233 | ||
216 | void *l2_ring; | 234 | struct cnic_uio_dev *udev; |
217 | dma_addr_t l2_ring_map; | ||
218 | int l2_ring_size; | ||
219 | int l2_rx_ring_size; | ||
220 | 235 | ||
221 | void *l2_buf; | 236 | int l2_rx_ring_size; |
222 | dma_addr_t l2_buf_map; | ||
223 | int l2_buf_size; | ||
224 | int l2_single_buf_size; | 237 | int l2_single_buf_size; |
225 | 238 | ||
226 | u16 *rx_cons_ptr; | 239 | u16 *rx_cons_ptr; |
@@ -287,6 +300,8 @@ struct cnic_local { | |||
287 | int hq_size; | 300 | int hq_size; |
288 | int num_cqs; | 301 | int num_cqs; |
289 | 302 | ||
303 | struct delayed_work delete_task; | ||
304 | |||
290 | struct cnic_ctx *ctx_arr; | 305 | struct cnic_ctx *ctx_arr; |
291 | int ctx_blks; | 306 | int ctx_blks; |
292 | int ctx_blk_size; | 307 | int ctx_blk_size; |
@@ -298,9 +313,6 @@ struct cnic_local { | |||
298 | u32 pfid; | 313 | u32 pfid; |
299 | u32 shmem_base; | 314 | u32 shmem_base; |
300 | 315 | ||
301 | u32 uio_dev; | ||
302 | struct uio_info *cnic_uinfo; | ||
303 | |||
304 | struct cnic_ops *cnic_ops; | 316 | struct cnic_ops *cnic_ops; |
305 | int (*start_hw)(struct cnic_dev *); | 317 | int (*start_hw)(struct cnic_dev *); |
306 | void (*stop_hw)(struct cnic_dev *); | 318 | void (*stop_hw)(struct cnic_dev *); |
@@ -360,15 +372,35 @@ struct bnx2x_bd_chain_next { | |||
360 | #define BNX2X_ISCSI_PBL_NOT_CACHED 0xff | 372 | #define BNX2X_ISCSI_PBL_NOT_CACHED 0xff |
361 | #define BNX2X_ISCSI_PDU_HEADER_NOT_CACHED 0xff | 373 | #define BNX2X_ISCSI_PDU_HEADER_NOT_CACHED 0xff |
362 | 374 | ||
375 | #define BNX2X_CHIP_NUM_57710 0x164e | ||
363 | #define BNX2X_CHIP_NUM_57711 0x164f | 376 | #define BNX2X_CHIP_NUM_57711 0x164f |
364 | #define BNX2X_CHIP_NUM_57711E 0x1650 | 377 | #define BNX2X_CHIP_NUM_57711E 0x1650 |
378 | #define BNX2X_CHIP_NUM_57712 0x1662 | ||
379 | #define BNX2X_CHIP_NUM_57712E 0x1663 | ||
380 | #define BNX2X_CHIP_NUM_57713 0x1651 | ||
381 | #define BNX2X_CHIP_NUM_57713E 0x1652 | ||
382 | |||
365 | #define BNX2X_CHIP_NUM(x) (x >> 16) | 383 | #define BNX2X_CHIP_NUM(x) (x >> 16) |
384 | #define BNX2X_CHIP_IS_57710(x) \ | ||
385 | (BNX2X_CHIP_NUM(x) == BNX2X_CHIP_NUM_57710) | ||
366 | #define BNX2X_CHIP_IS_57711(x) \ | 386 | #define BNX2X_CHIP_IS_57711(x) \ |
367 | (BNX2X_CHIP_NUM(x) == BNX2X_CHIP_NUM_57711) | 387 | (BNX2X_CHIP_NUM(x) == BNX2X_CHIP_NUM_57711) |
368 | #define BNX2X_CHIP_IS_57711E(x) \ | 388 | #define BNX2X_CHIP_IS_57711E(x) \ |
369 | (BNX2X_CHIP_NUM(x) == BNX2X_CHIP_NUM_57711E) | 389 | (BNX2X_CHIP_NUM(x) == BNX2X_CHIP_NUM_57711E) |
370 | #define BNX2X_CHIP_IS_E1H(x) \ | 390 | #define BNX2X_CHIP_IS_E1H(x) \ |
371 | (BNX2X_CHIP_IS_57711(x) || BNX2X_CHIP_IS_57711E(x)) | 391 | (BNX2X_CHIP_IS_57711(x) || BNX2X_CHIP_IS_57711E(x)) |
392 | #define BNX2X_CHIP_IS_57712(x) \ | ||
393 | (BNX2X_CHIP_NUM(x) == BNX2X_CHIP_NUM_57712) | ||
394 | #define BNX2X_CHIP_IS_57712E(x) \ | ||
395 | (BNX2X_CHIP_NUM(x) == BNX2X_CHIP_NUM_57712E) | ||
396 | #define BNX2X_CHIP_IS_57713(x) \ | ||
397 | (BNX2X_CHIP_NUM(x) == BNX2X_CHIP_NUM_57713) | ||
398 | #define BNX2X_CHIP_IS_57713E(x) \ | ||
399 | (BNX2X_CHIP_NUM(x) == BNX2X_CHIP_NUM_57713E) | ||
400 | #define BNX2X_CHIP_IS_E2(x) \ | ||
401 | (BNX2X_CHIP_IS_57712(x) || BNX2X_CHIP_IS_57712E(x) || \ | ||
402 | BNX2X_CHIP_IS_57713(x) || BNX2X_CHIP_IS_57713E(x)) | ||
403 | |||
372 | #define IS_E1H_OFFSET BNX2X_CHIP_IS_E1H(cp->chip_id) | 404 | #define IS_E1H_OFFSET BNX2X_CHIP_IS_E1H(cp->chip_id) |
373 | 405 | ||
374 | #define BNX2X_RX_DESC_CNT (BCM_PAGE_SIZE / sizeof(struct eth_rx_bd)) | 406 | #define BNX2X_RX_DESC_CNT (BCM_PAGE_SIZE / sizeof(struct eth_rx_bd)) |
@@ -397,6 +429,8 @@ struct bnx2x_bd_chain_next { | |||
397 | 429 | ||
398 | #define CNIC_PORT(cp) ((cp)->pfid & 1) | 430 | #define CNIC_PORT(cp) ((cp)->pfid & 1) |
399 | #define CNIC_FUNC(cp) ((cp)->func) | 431 | #define CNIC_FUNC(cp) ((cp)->func) |
432 | #define CNIC_PATH(cp) (!BNX2X_CHIP_IS_E2(cp->chip_id) ? 0 :\ | ||
433 | (CNIC_FUNC(cp) & 1)) | ||
400 | #define CNIC_E1HVN(cp) ((cp)->pfid >> 1) | 434 | #define CNIC_E1HVN(cp) ((cp)->pfid >> 1) |
401 | 435 | ||
402 | #define BNX2X_HW_CID(cp, x) ((CNIC_PORT(cp) << 23) | \ | 436 | #define BNX2X_HW_CID(cp, x) ((CNIC_PORT(cp) << 23) | \ |
diff --git a/drivers/net/cnic_if.h b/drivers/net/cnic_if.h index 98ebac52013e..0dbeaec4f03a 100644 --- a/drivers/net/cnic_if.h +++ b/drivers/net/cnic_if.h | |||
@@ -12,8 +12,8 @@ | |||
12 | #ifndef CNIC_IF_H | 12 | #ifndef CNIC_IF_H |
13 | #define CNIC_IF_H | 13 | #define CNIC_IF_H |
14 | 14 | ||
15 | #define CNIC_MODULE_VERSION "2.2.5" | 15 | #define CNIC_MODULE_VERSION "2.2.6" |
16 | #define CNIC_MODULE_RELDATE "September 29, 2010" | 16 | #define CNIC_MODULE_RELDATE "Oct 12, 2010" |
17 | 17 | ||
18 | #define CNIC_ULP_RDMA 0 | 18 | #define CNIC_ULP_RDMA 0 |
19 | #define CNIC_ULP_ISCSI 1 | 19 | #define CNIC_ULP_ISCSI 1 |
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c index 55edcb74abf4..5b04eff2fd23 100644 --- a/drivers/net/igb/igb_main.c +++ b/drivers/net/igb/igb_main.c | |||
@@ -6115,6 +6115,13 @@ int igb_set_spd_dplx(struct igb_adapter *adapter, u16 spddplx) | |||
6115 | 6115 | ||
6116 | mac->autoneg = 0; | 6116 | mac->autoneg = 0; |
6117 | 6117 | ||
6118 | /* Fiber NIC's only allow 1000 Gbps Full duplex */ | ||
6119 | if ((adapter->hw.phy.media_type == e1000_media_type_internal_serdes) && | ||
6120 | spddplx != (SPEED_1000 + DUPLEX_FULL)) { | ||
6121 | dev_err(&pdev->dev, "Unsupported Speed/Duplex configuration\n"); | ||
6122 | return -EINVAL; | ||
6123 | } | ||
6124 | |||
6118 | switch (spddplx) { | 6125 | switch (spddplx) { |
6119 | case SPEED_10 + DUPLEX_HALF: | 6126 | case SPEED_10 + DUPLEX_HALF: |
6120 | mac->forced_speed_duplex = ADVERTISE_10_HALF; | 6127 | mac->forced_speed_duplex = ADVERTISE_10_HALF; |
diff --git a/drivers/net/ixgbe/ixgbe_82599.c b/drivers/net/ixgbe/ixgbe_82599.c index e80657c75506..0bd8fbb5bfd0 100644 --- a/drivers/net/ixgbe/ixgbe_82599.c +++ b/drivers/net/ixgbe/ixgbe_82599.c | |||
@@ -39,20 +39,20 @@ | |||
39 | #define IXGBE_82599_MC_TBL_SIZE 128 | 39 | #define IXGBE_82599_MC_TBL_SIZE 128 |
40 | #define IXGBE_82599_VFT_TBL_SIZE 128 | 40 | #define IXGBE_82599_VFT_TBL_SIZE 128 |
41 | 41 | ||
42 | void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw); | 42 | static void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw); |
43 | void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw); | 43 | static void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw); |
44 | void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw); | 44 | static void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw); |
45 | s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw, | 45 | static s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw, |
46 | ixgbe_link_speed speed, | 46 | ixgbe_link_speed speed, |
47 | bool autoneg, | 47 | bool autoneg, |
48 | bool autoneg_wait_to_complete); | 48 | bool autoneg_wait_to_complete); |
49 | static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw, | 49 | static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw, |
50 | ixgbe_link_speed speed, | 50 | ixgbe_link_speed speed, |
51 | bool autoneg, | 51 | bool autoneg, |
52 | bool autoneg_wait_to_complete); | 52 | bool autoneg_wait_to_complete); |
53 | s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw, | 53 | static s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw, |
54 | bool autoneg_wait_to_complete); | 54 | bool autoneg_wait_to_complete); |
55 | s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw, | 55 | static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw, |
56 | ixgbe_link_speed speed, | 56 | ixgbe_link_speed speed, |
57 | bool autoneg, | 57 | bool autoneg, |
58 | bool autoneg_wait_to_complete); | 58 | bool autoneg_wait_to_complete); |
@@ -369,7 +369,7 @@ out: | |||
369 | * Configures link settings based on values in the ixgbe_hw struct. | 369 | * Configures link settings based on values in the ixgbe_hw struct. |
370 | * Restarts the link. Performs autonegotiation if needed. | 370 | * Restarts the link. Performs autonegotiation if needed. |
371 | **/ | 371 | **/ |
372 | s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw, | 372 | static s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw, |
373 | bool autoneg_wait_to_complete) | 373 | bool autoneg_wait_to_complete) |
374 | { | 374 | { |
375 | u32 autoc_reg; | 375 | u32 autoc_reg; |
@@ -418,7 +418,7 @@ s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw, | |||
418 | * PHY states. This includes selectively shutting down the Tx | 418 | * PHY states. This includes selectively shutting down the Tx |
419 | * laser on the PHY, effectively halting physical link. | 419 | * laser on the PHY, effectively halting physical link. |
420 | **/ | 420 | **/ |
421 | void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw) | 421 | static void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw) |
422 | { | 422 | { |
423 | u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP); | 423 | u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP); |
424 | 424 | ||
@@ -437,7 +437,7 @@ void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw) | |||
437 | * PHY states. This includes selectively turning on the Tx | 437 | * PHY states. This includes selectively turning on the Tx |
438 | * laser on the PHY, effectively starting physical link. | 438 | * laser on the PHY, effectively starting physical link. |
439 | **/ | 439 | **/ |
440 | void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw) | 440 | static void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw) |
441 | { | 441 | { |
442 | u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP); | 442 | u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP); |
443 | 443 | ||
@@ -460,7 +460,7 @@ void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw) | |||
460 | * end. This is consistent with true clause 37 autoneg, which also | 460 | * end. This is consistent with true clause 37 autoneg, which also |
461 | * involves a loss of signal. | 461 | * involves a loss of signal. |
462 | **/ | 462 | **/ |
463 | void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw) | 463 | static void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw) |
464 | { | 464 | { |
465 | hw_dbg(hw, "ixgbe_flap_tx_laser_multispeed_fiber\n"); | 465 | hw_dbg(hw, "ixgbe_flap_tx_laser_multispeed_fiber\n"); |
466 | 466 | ||
@@ -729,7 +729,7 @@ out: | |||
729 | * | 729 | * |
730 | * Set the link speed in the AUTOC register and restarts link. | 730 | * Set the link speed in the AUTOC register and restarts link. |
731 | **/ | 731 | **/ |
732 | s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw, | 732 | static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw, |
733 | ixgbe_link_speed speed, bool autoneg, | 733 | ixgbe_link_speed speed, bool autoneg, |
734 | bool autoneg_wait_to_complete) | 734 | bool autoneg_wait_to_complete) |
735 | { | 735 | { |
@@ -1415,92 +1415,6 @@ s32 ixgbe_atr_set_dst_ipv4_82599(struct ixgbe_atr_input *input, u32 dst_addr) | |||
1415 | } | 1415 | } |
1416 | 1416 | ||
1417 | /** | 1417 | /** |
1418 | * ixgbe_atr_set_src_ipv6_82599 - Sets the source IPv6 address | ||
1419 | * @input: input stream to modify | ||
1420 | * @src_addr_1: the first 4 bytes of the IP address to load | ||
1421 | * @src_addr_2: the second 4 bytes of the IP address to load | ||
1422 | * @src_addr_3: the third 4 bytes of the IP address to load | ||
1423 | * @src_addr_4: the fourth 4 bytes of the IP address to load | ||
1424 | **/ | ||
1425 | s32 ixgbe_atr_set_src_ipv6_82599(struct ixgbe_atr_input *input, | ||
1426 | u32 src_addr_1, u32 src_addr_2, | ||
1427 | u32 src_addr_3, u32 src_addr_4) | ||
1428 | { | ||
1429 | input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET] = src_addr_4 & 0xff; | ||
1430 | input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 1] = | ||
1431 | (src_addr_4 >> 8) & 0xff; | ||
1432 | input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 2] = | ||
1433 | (src_addr_4 >> 16) & 0xff; | ||
1434 | input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 3] = src_addr_4 >> 24; | ||
1435 | |||
1436 | input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 4] = src_addr_3 & 0xff; | ||
1437 | input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 5] = | ||
1438 | (src_addr_3 >> 8) & 0xff; | ||
1439 | input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 6] = | ||
1440 | (src_addr_3 >> 16) & 0xff; | ||
1441 | input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 7] = src_addr_3 >> 24; | ||
1442 | |||
1443 | input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 8] = src_addr_2 & 0xff; | ||
1444 | input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 9] = | ||
1445 | (src_addr_2 >> 8) & 0xff; | ||
1446 | input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 10] = | ||
1447 | (src_addr_2 >> 16) & 0xff; | ||
1448 | input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 11] = src_addr_2 >> 24; | ||
1449 | |||
1450 | input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 12] = src_addr_1 & 0xff; | ||
1451 | input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 13] = | ||
1452 | (src_addr_1 >> 8) & 0xff; | ||
1453 | input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 14] = | ||
1454 | (src_addr_1 >> 16) & 0xff; | ||
1455 | input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 15] = src_addr_1 >> 24; | ||
1456 | |||
1457 | return 0; | ||
1458 | } | ||
1459 | |||
1460 | /** | ||
1461 | * ixgbe_atr_set_dst_ipv6_82599 - Sets the destination IPv6 address | ||
1462 | * @input: input stream to modify | ||
1463 | * @dst_addr_1: the first 4 bytes of the IP address to load | ||
1464 | * @dst_addr_2: the second 4 bytes of the IP address to load | ||
1465 | * @dst_addr_3: the third 4 bytes of the IP address to load | ||
1466 | * @dst_addr_4: the fourth 4 bytes of the IP address to load | ||
1467 | **/ | ||
1468 | s32 ixgbe_atr_set_dst_ipv6_82599(struct ixgbe_atr_input *input, | ||
1469 | u32 dst_addr_1, u32 dst_addr_2, | ||
1470 | u32 dst_addr_3, u32 dst_addr_4) | ||
1471 | { | ||
1472 | input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET] = dst_addr_4 & 0xff; | ||
1473 | input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 1] = | ||
1474 | (dst_addr_4 >> 8) & 0xff; | ||
1475 | input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 2] = | ||
1476 | (dst_addr_4 >> 16) & 0xff; | ||
1477 | input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 3] = dst_addr_4 >> 24; | ||
1478 | |||
1479 | input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 4] = dst_addr_3 & 0xff; | ||
1480 | input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 5] = | ||
1481 | (dst_addr_3 >> 8) & 0xff; | ||
1482 | input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 6] = | ||
1483 | (dst_addr_3 >> 16) & 0xff; | ||
1484 | input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 7] = dst_addr_3 >> 24; | ||
1485 | |||
1486 | input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 8] = dst_addr_2 & 0xff; | ||
1487 | input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 9] = | ||
1488 | (dst_addr_2 >> 8) & 0xff; | ||
1489 | input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 10] = | ||
1490 | (dst_addr_2 >> 16) & 0xff; | ||
1491 | input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 11] = dst_addr_2 >> 24; | ||
1492 | |||
1493 | input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 12] = dst_addr_1 & 0xff; | ||
1494 | input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 13] = | ||
1495 | (dst_addr_1 >> 8) & 0xff; | ||
1496 | input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 14] = | ||
1497 | (dst_addr_1 >> 16) & 0xff; | ||
1498 | input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 15] = dst_addr_1 >> 24; | ||
1499 | |||
1500 | return 0; | ||
1501 | } | ||
1502 | |||
1503 | /** | ||
1504 | * ixgbe_atr_set_src_port_82599 - Sets the source port | 1418 | * ixgbe_atr_set_src_port_82599 - Sets the source port |
1505 | * @input: input stream to modify | 1419 | * @input: input stream to modify |
1506 | * @src_port: the source port to load | 1420 | * @src_port: the source port to load |
@@ -1540,19 +1454,6 @@ s32 ixgbe_atr_set_flex_byte_82599(struct ixgbe_atr_input *input, u16 flex_byte) | |||
1540 | } | 1454 | } |
1541 | 1455 | ||
1542 | /** | 1456 | /** |
1543 | * ixgbe_atr_set_vm_pool_82599 - Sets the Virtual Machine pool | ||
1544 | * @input: input stream to modify | ||
1545 | * @vm_pool: the Virtual Machine pool to load | ||
1546 | **/ | ||
1547 | s32 ixgbe_atr_set_vm_pool_82599(struct ixgbe_atr_input *input, | ||
1548 | u8 vm_pool) | ||
1549 | { | ||
1550 | input->byte_stream[IXGBE_ATR_VM_POOL_OFFSET] = vm_pool; | ||
1551 | |||
1552 | return 0; | ||
1553 | } | ||
1554 | |||
1555 | /** | ||
1556 | * ixgbe_atr_set_l4type_82599 - Sets the layer 4 packet type | 1457 | * ixgbe_atr_set_l4type_82599 - Sets the layer 4 packet type |
1557 | * @input: input stream to modify | 1458 | * @input: input stream to modify |
1558 | * @l4type: the layer 4 type value to load | 1459 | * @l4type: the layer 4 type value to load |
@@ -1645,41 +1546,6 @@ static s32 ixgbe_atr_get_src_ipv6_82599(struct ixgbe_atr_input *input, | |||
1645 | } | 1546 | } |
1646 | 1547 | ||
1647 | /** | 1548 | /** |
1648 | * ixgbe_atr_get_dst_ipv6_82599 - Gets the destination IPv6 address | ||
1649 | * @input: input stream to search | ||
1650 | * @dst_addr_1: the first 4 bytes of the IP address to load | ||
1651 | * @dst_addr_2: the second 4 bytes of the IP address to load | ||
1652 | * @dst_addr_3: the third 4 bytes of the IP address to load | ||
1653 | * @dst_addr_4: the fourth 4 bytes of the IP address to load | ||
1654 | **/ | ||
1655 | s32 ixgbe_atr_get_dst_ipv6_82599(struct ixgbe_atr_input *input, | ||
1656 | u32 *dst_addr_1, u32 *dst_addr_2, | ||
1657 | u32 *dst_addr_3, u32 *dst_addr_4) | ||
1658 | { | ||
1659 | *dst_addr_1 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 12]; | ||
1660 | *dst_addr_1 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 13] << 8; | ||
1661 | *dst_addr_1 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 14] << 16; | ||
1662 | *dst_addr_1 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 15] << 24; | ||
1663 | |||
1664 | *dst_addr_2 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 8]; | ||
1665 | *dst_addr_2 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 9] << 8; | ||
1666 | *dst_addr_2 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 10] << 16; | ||
1667 | *dst_addr_2 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 11] << 24; | ||
1668 | |||
1669 | *dst_addr_3 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 4]; | ||
1670 | *dst_addr_3 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 5] << 8; | ||
1671 | *dst_addr_3 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 6] << 16; | ||
1672 | *dst_addr_3 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 7] << 24; | ||
1673 | |||
1674 | *dst_addr_4 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET]; | ||
1675 | *dst_addr_4 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 1] << 8; | ||
1676 | *dst_addr_4 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 2] << 16; | ||
1677 | *dst_addr_4 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 3] << 24; | ||
1678 | |||
1679 | return 0; | ||
1680 | } | ||
1681 | |||
1682 | /** | ||
1683 | * ixgbe_atr_get_src_port_82599 - Gets the source port | 1549 | * ixgbe_atr_get_src_port_82599 - Gets the source port |
1684 | * @input: input stream to modify | 1550 | * @input: input stream to modify |
1685 | * @src_port: the source port to load | 1551 | * @src_port: the source port to load |
@@ -1732,19 +1598,6 @@ static s32 ixgbe_atr_get_flex_byte_82599(struct ixgbe_atr_input *input, | |||
1732 | } | 1598 | } |
1733 | 1599 | ||
1734 | /** | 1600 | /** |
1735 | * ixgbe_atr_get_vm_pool_82599 - Gets the Virtual Machine pool | ||
1736 | * @input: input stream to modify | ||
1737 | * @vm_pool: the Virtual Machine pool to load | ||
1738 | **/ | ||
1739 | s32 ixgbe_atr_get_vm_pool_82599(struct ixgbe_atr_input *input, | ||
1740 | u8 *vm_pool) | ||
1741 | { | ||
1742 | *vm_pool = input->byte_stream[IXGBE_ATR_VM_POOL_OFFSET]; | ||
1743 | |||
1744 | return 0; | ||
1745 | } | ||
1746 | |||
1747 | /** | ||
1748 | * ixgbe_atr_get_l4type_82599 - Gets the layer 4 packet type | 1601 | * ixgbe_atr_get_l4type_82599 - Gets the layer 4 packet type |
1749 | * @input: input stream to modify | 1602 | * @input: input stream to modify |
1750 | * @l4type: the layer 4 type value to load | 1603 | * @l4type: the layer 4 type value to load |
diff --git a/drivers/net/ixgbe/ixgbe_common.c b/drivers/net/ixgbe/ixgbe_common.c index 9595b1bfb8dd..e3eca1316389 100644 --- a/drivers/net/ixgbe/ixgbe_common.c +++ b/drivers/net/ixgbe/ixgbe_common.c | |||
@@ -52,6 +52,7 @@ static void ixgbe_disable_rar(struct ixgbe_hw *hw, u32 index); | |||
52 | static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr); | 52 | static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr); |
53 | static void ixgbe_add_uc_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq); | 53 | static void ixgbe_add_uc_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq); |
54 | static s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num); | 54 | static s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num); |
55 | static s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg); | ||
55 | 56 | ||
56 | /** | 57 | /** |
57 | * ixgbe_start_hw_generic - Prepare hardware for Tx/Rx | 58 | * ixgbe_start_hw_generic - Prepare hardware for Tx/Rx |
@@ -637,7 +638,7 @@ out: | |||
637 | * Polls the status bit (bit 1) of the EERD or EEWR to determine when the | 638 | * Polls the status bit (bit 1) of the EERD or EEWR to determine when the |
638 | * read or write is done respectively. | 639 | * read or write is done respectively. |
639 | **/ | 640 | **/ |
640 | s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg) | 641 | static s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg) |
641 | { | 642 | { |
642 | u32 i; | 643 | u32 i; |
643 | u32 reg; | 644 | u32 reg; |
@@ -2449,7 +2450,7 @@ s32 ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw) | |||
2449 | * return the VLVF index where this VLAN id should be placed | 2450 | * return the VLVF index where this VLAN id should be placed |
2450 | * | 2451 | * |
2451 | **/ | 2452 | **/ |
2452 | s32 ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, u32 vlan) | 2453 | static s32 ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, u32 vlan) |
2453 | { | 2454 | { |
2454 | u32 bits = 0; | 2455 | u32 bits = 0; |
2455 | u32 first_empty_slot = 0; | 2456 | u32 first_empty_slot = 0; |
@@ -2704,48 +2705,3 @@ s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed, | |||
2704 | 2705 | ||
2705 | return 0; | 2706 | return 0; |
2706 | } | 2707 | } |
2707 | |||
2708 | /** | ||
2709 | * ixgbe_get_wwn_prefix_generic - Get alternative WWNN/WWPN prefix from | ||
2710 | * the EEPROM | ||
2711 | * @hw: pointer to hardware structure | ||
2712 | * @wwnn_prefix: the alternative WWNN prefix | ||
2713 | * @wwpn_prefix: the alternative WWPN prefix | ||
2714 | * | ||
2715 | * This function will read the EEPROM from the alternative SAN MAC address | ||
2716 | * block to check the support for the alternative WWNN/WWPN prefix support. | ||
2717 | **/ | ||
2718 | s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix, | ||
2719 | u16 *wwpn_prefix) | ||
2720 | { | ||
2721 | u16 offset, caps; | ||
2722 | u16 alt_san_mac_blk_offset; | ||
2723 | |||
2724 | /* clear output first */ | ||
2725 | *wwnn_prefix = 0xFFFF; | ||
2726 | *wwpn_prefix = 0xFFFF; | ||
2727 | |||
2728 | /* check if alternative SAN MAC is supported */ | ||
2729 | hw->eeprom.ops.read(hw, IXGBE_ALT_SAN_MAC_ADDR_BLK_PTR, | ||
2730 | &alt_san_mac_blk_offset); | ||
2731 | |||
2732 | if ((alt_san_mac_blk_offset == 0) || | ||
2733 | (alt_san_mac_blk_offset == 0xFFFF)) | ||
2734 | goto wwn_prefix_out; | ||
2735 | |||
2736 | /* check capability in alternative san mac address block */ | ||
2737 | offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_CAPS_OFFSET; | ||
2738 | hw->eeprom.ops.read(hw, offset, &caps); | ||
2739 | if (!(caps & IXGBE_ALT_SAN_MAC_ADDR_CAPS_ALTWWN)) | ||
2740 | goto wwn_prefix_out; | ||
2741 | |||
2742 | /* get the corresponding prefix for WWNN/WWPN */ | ||
2743 | offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWNN_OFFSET; | ||
2744 | hw->eeprom.ops.read(hw, offset, wwnn_prefix); | ||
2745 | |||
2746 | offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWPN_OFFSET; | ||
2747 | hw->eeprom.ops.read(hw, offset, wwpn_prefix); | ||
2748 | |||
2749 | wwn_prefix_out: | ||
2750 | return 0; | ||
2751 | } | ||
diff --git a/drivers/net/ixgbe/ixgbe_common.h b/drivers/net/ixgbe/ixgbe_common.h index 5cf15aa11cac..424c223437dc 100644 --- a/drivers/net/ixgbe/ixgbe_common.h +++ b/drivers/net/ixgbe/ixgbe_common.h | |||
@@ -52,7 +52,6 @@ s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset, | |||
52 | s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw, | 52 | s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw, |
53 | u16 *checksum_val); | 53 | u16 *checksum_val); |
54 | s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw); | 54 | s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw); |
55 | s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg); | ||
56 | 55 | ||
57 | s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq, | 56 | s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq, |
58 | u32 enable_addr); | 57 | u32 enable_addr); |
diff --git a/drivers/net/ixgbe/ixgbe_dcb.c b/drivers/net/ixgbe/ixgbe_dcb.c index 9aea4f04bbd2..8bb9ddb6dffe 100644 --- a/drivers/net/ixgbe/ixgbe_dcb.c +++ b/drivers/net/ixgbe/ixgbe_dcb.c | |||
@@ -34,98 +34,6 @@ | |||
34 | #include "ixgbe_dcb_82599.h" | 34 | #include "ixgbe_dcb_82599.h" |
35 | 35 | ||
36 | /** | 36 | /** |
37 | * ixgbe_dcb_config - Struct containing DCB settings. | ||
38 | * @dcb_config: Pointer to DCB config structure | ||
39 | * | ||
40 | * This function checks DCB rules for DCB settings. | ||
41 | * The following rules are checked: | ||
42 | * 1. The sum of bandwidth percentages of all Bandwidth Groups must total 100%. | ||
43 | * 2. The sum of bandwidth percentages of all Traffic Classes within a Bandwidth | ||
44 | * Group must total 100. | ||
45 | * 3. A Traffic Class should not be set to both Link Strict Priority | ||
46 | * and Group Strict Priority. | ||
47 | * 4. Link strict Bandwidth Groups can only have link strict traffic classes | ||
48 | * with zero bandwidth. | ||
49 | */ | ||
50 | s32 ixgbe_dcb_check_config(struct ixgbe_dcb_config *dcb_config) | ||
51 | { | ||
52 | struct tc_bw_alloc *p; | ||
53 | s32 ret_val = 0; | ||
54 | u8 i, j, bw = 0, bw_id; | ||
55 | u8 bw_sum[2][MAX_BW_GROUP]; | ||
56 | bool link_strict[2][MAX_BW_GROUP]; | ||
57 | |||
58 | memset(bw_sum, 0, sizeof(bw_sum)); | ||
59 | memset(link_strict, 0, sizeof(link_strict)); | ||
60 | |||
61 | /* First Tx, then Rx */ | ||
62 | for (i = 0; i < 2; i++) { | ||
63 | /* Check each traffic class for rule violation */ | ||
64 | for (j = 0; j < MAX_TRAFFIC_CLASS; j++) { | ||
65 | p = &dcb_config->tc_config[j].path[i]; | ||
66 | |||
67 | bw = p->bwg_percent; | ||
68 | bw_id = p->bwg_id; | ||
69 | |||
70 | if (bw_id >= MAX_BW_GROUP) { | ||
71 | ret_val = DCB_ERR_CONFIG; | ||
72 | goto err_config; | ||
73 | } | ||
74 | if (p->prio_type == prio_link) { | ||
75 | link_strict[i][bw_id] = true; | ||
76 | /* Link strict should have zero bandwidth */ | ||
77 | if (bw) { | ||
78 | ret_val = DCB_ERR_LS_BW_NONZERO; | ||
79 | goto err_config; | ||
80 | } | ||
81 | } else if (!bw) { | ||
82 | /* | ||
83 | * Traffic classes without link strict | ||
84 | * should have non-zero bandwidth. | ||
85 | */ | ||
86 | ret_val = DCB_ERR_TC_BW_ZERO; | ||
87 | goto err_config; | ||
88 | } | ||
89 | bw_sum[i][bw_id] += bw; | ||
90 | } | ||
91 | |||
92 | bw = 0; | ||
93 | |||
94 | /* Check each bandwidth group for rule violation */ | ||
95 | for (j = 0; j < MAX_BW_GROUP; j++) { | ||
96 | bw += dcb_config->bw_percentage[i][j]; | ||
97 | /* | ||
98 | * Sum of bandwidth percentages of all traffic classes | ||
99 | * within a Bandwidth Group must total 100 except for | ||
100 | * link strict group (zero bandwidth). | ||
101 | */ | ||
102 | if (link_strict[i][j]) { | ||
103 | if (bw_sum[i][j]) { | ||
104 | /* | ||
105 | * Link strict group should have zero | ||
106 | * bandwidth. | ||
107 | */ | ||
108 | ret_val = DCB_ERR_LS_BWG_NONZERO; | ||
109 | goto err_config; | ||
110 | } | ||
111 | } else if (bw_sum[i][j] != BW_PERCENT && | ||
112 | bw_sum[i][j] != 0) { | ||
113 | ret_val = DCB_ERR_TC_BW; | ||
114 | goto err_config; | ||
115 | } | ||
116 | } | ||
117 | |||
118 | if (bw != BW_PERCENT) { | ||
119 | ret_val = DCB_ERR_BW_GROUP; | ||
120 | goto err_config; | ||
121 | } | ||
122 | } | ||
123 | |||
124 | err_config: | ||
125 | return ret_val; | ||
126 | } | ||
127 | |||
128 | /** | ||
129 | * ixgbe_dcb_calculate_tc_credits - Calculates traffic class credits | 37 | * ixgbe_dcb_calculate_tc_credits - Calculates traffic class credits |
130 | * @ixgbe_dcb_config: Struct containing DCB settings. | 38 | * @ixgbe_dcb_config: Struct containing DCB settings. |
131 | * @direction: Configuring either Tx or Rx. | 39 | * @direction: Configuring either Tx or Rx. |
@@ -203,133 +111,6 @@ out: | |||
203 | } | 111 | } |
204 | 112 | ||
205 | /** | 113 | /** |
206 | * ixgbe_dcb_get_tc_stats - Returns status of each traffic class | ||
207 | * @hw: pointer to hardware structure | ||
208 | * @stats: pointer to statistics structure | ||
209 | * @tc_count: Number of elements in bwg_array. | ||
210 | * | ||
211 | * This function returns the status data for each of the Traffic Classes in use. | ||
212 | */ | ||
213 | s32 ixgbe_dcb_get_tc_stats(struct ixgbe_hw *hw, struct ixgbe_hw_stats *stats, | ||
214 | u8 tc_count) | ||
215 | { | ||
216 | s32 ret = 0; | ||
217 | if (hw->mac.type == ixgbe_mac_82598EB) | ||
218 | ret = ixgbe_dcb_get_tc_stats_82598(hw, stats, tc_count); | ||
219 | else if (hw->mac.type == ixgbe_mac_82599EB) | ||
220 | ret = ixgbe_dcb_get_tc_stats_82599(hw, stats, tc_count); | ||
221 | return ret; | ||
222 | } | ||
223 | |||
224 | /** | ||
225 | * ixgbe_dcb_get_pfc_stats - Returns CBFC status of each traffic class | ||
226 | * hw - pointer to hardware structure | ||
227 | * stats - pointer to statistics structure | ||
228 | * tc_count - Number of elements in bwg_array. | ||
229 | * | ||
230 | * This function returns the CBFC status data for each of the Traffic Classes. | ||
231 | */ | ||
232 | s32 ixgbe_dcb_get_pfc_stats(struct ixgbe_hw *hw, struct ixgbe_hw_stats *stats, | ||
233 | u8 tc_count) | ||
234 | { | ||
235 | s32 ret = 0; | ||
236 | if (hw->mac.type == ixgbe_mac_82598EB) | ||
237 | ret = ixgbe_dcb_get_pfc_stats_82598(hw, stats, tc_count); | ||
238 | else if (hw->mac.type == ixgbe_mac_82599EB) | ||
239 | ret = ixgbe_dcb_get_pfc_stats_82599(hw, stats, tc_count); | ||
240 | return ret; | ||
241 | } | ||
242 | |||
243 | /** | ||
244 | * ixgbe_dcb_config_rx_arbiter - Config Rx arbiter | ||
245 | * @hw: pointer to hardware structure | ||
246 | * @dcb_config: pointer to ixgbe_dcb_config structure | ||
247 | * | ||
248 | * Configure Rx Data Arbiter and credits for each traffic class. | ||
249 | */ | ||
250 | s32 ixgbe_dcb_config_rx_arbiter(struct ixgbe_hw *hw, | ||
251 | struct ixgbe_dcb_config *dcb_config) | ||
252 | { | ||
253 | s32 ret = 0; | ||
254 | if (hw->mac.type == ixgbe_mac_82598EB) | ||
255 | ret = ixgbe_dcb_config_rx_arbiter_82598(hw, dcb_config); | ||
256 | else if (hw->mac.type == ixgbe_mac_82599EB) | ||
257 | ret = ixgbe_dcb_config_rx_arbiter_82599(hw, dcb_config); | ||
258 | return ret; | ||
259 | } | ||
260 | |||
261 | /** | ||
262 | * ixgbe_dcb_config_tx_desc_arbiter - Config Tx Desc arbiter | ||
263 | * @hw: pointer to hardware structure | ||
264 | * @dcb_config: pointer to ixgbe_dcb_config structure | ||
265 | * | ||
266 | * Configure Tx Descriptor Arbiter and credits for each traffic class. | ||
267 | */ | ||
268 | s32 ixgbe_dcb_config_tx_desc_arbiter(struct ixgbe_hw *hw, | ||
269 | struct ixgbe_dcb_config *dcb_config) | ||
270 | { | ||
271 | s32 ret = 0; | ||
272 | if (hw->mac.type == ixgbe_mac_82598EB) | ||
273 | ret = ixgbe_dcb_config_tx_desc_arbiter_82598(hw, dcb_config); | ||
274 | else if (hw->mac.type == ixgbe_mac_82599EB) | ||
275 | ret = ixgbe_dcb_config_tx_desc_arbiter_82599(hw, dcb_config); | ||
276 | return ret; | ||
277 | } | ||
278 | |||
279 | /** | ||
280 | * ixgbe_dcb_config_tx_data_arbiter - Config Tx data arbiter | ||
281 | * @hw: pointer to hardware structure | ||
282 | * @dcb_config: pointer to ixgbe_dcb_config structure | ||
283 | * | ||
284 | * Configure Tx Data Arbiter and credits for each traffic class. | ||
285 | */ | ||
286 | s32 ixgbe_dcb_config_tx_data_arbiter(struct ixgbe_hw *hw, | ||
287 | struct ixgbe_dcb_config *dcb_config) | ||
288 | { | ||
289 | s32 ret = 0; | ||
290 | if (hw->mac.type == ixgbe_mac_82598EB) | ||
291 | ret = ixgbe_dcb_config_tx_data_arbiter_82598(hw, dcb_config); | ||
292 | else if (hw->mac.type == ixgbe_mac_82599EB) | ||
293 | ret = ixgbe_dcb_config_tx_data_arbiter_82599(hw, dcb_config); | ||
294 | return ret; | ||
295 | } | ||
296 | |||
297 | /** | ||
298 | * ixgbe_dcb_config_pfc - Config priority flow control | ||
299 | * @hw: pointer to hardware structure | ||
300 | * @dcb_config: pointer to ixgbe_dcb_config structure | ||
301 | * | ||
302 | * Configure Priority Flow Control for each traffic class. | ||
303 | */ | ||
304 | s32 ixgbe_dcb_config_pfc(struct ixgbe_hw *hw, | ||
305 | struct ixgbe_dcb_config *dcb_config) | ||
306 | { | ||
307 | s32 ret = 0; | ||
308 | if (hw->mac.type == ixgbe_mac_82598EB) | ||
309 | ret = ixgbe_dcb_config_pfc_82598(hw, dcb_config); | ||
310 | else if (hw->mac.type == ixgbe_mac_82599EB) | ||
311 | ret = ixgbe_dcb_config_pfc_82599(hw, dcb_config); | ||
312 | return ret; | ||
313 | } | ||
314 | |||
315 | /** | ||
316 | * ixgbe_dcb_config_tc_stats - Config traffic class statistics | ||
317 | * @hw: pointer to hardware structure | ||
318 | * | ||
319 | * Configure queue statistics registers, all queues belonging to same traffic | ||
320 | * class uses a single set of queue statistics counters. | ||
321 | */ | ||
322 | s32 ixgbe_dcb_config_tc_stats(struct ixgbe_hw *hw) | ||
323 | { | ||
324 | s32 ret = 0; | ||
325 | if (hw->mac.type == ixgbe_mac_82598EB) | ||
326 | ret = ixgbe_dcb_config_tc_stats_82598(hw); | ||
327 | else if (hw->mac.type == ixgbe_mac_82599EB) | ||
328 | ret = ixgbe_dcb_config_tc_stats_82599(hw); | ||
329 | return ret; | ||
330 | } | ||
331 | |||
332 | /** | ||
333 | * ixgbe_dcb_hw_config - Config and enable DCB | 114 | * ixgbe_dcb_hw_config - Config and enable DCB |
334 | * @hw: pointer to hardware structure | 115 | * @hw: pointer to hardware structure |
335 | * @dcb_config: pointer to ixgbe_dcb_config structure | 116 | * @dcb_config: pointer to ixgbe_dcb_config structure |
diff --git a/drivers/net/ixgbe/ixgbe_dcb.h b/drivers/net/ixgbe/ixgbe_dcb.h index 5caafd4afbc3..eb1059f09da0 100644 --- a/drivers/net/ixgbe/ixgbe_dcb.h +++ b/drivers/net/ixgbe/ixgbe_dcb.h | |||
@@ -149,27 +149,9 @@ struct ixgbe_dcb_config { | |||
149 | 149 | ||
150 | /* DCB driver APIs */ | 150 | /* DCB driver APIs */ |
151 | 151 | ||
152 | /* DCB rule checking function.*/ | ||
153 | s32 ixgbe_dcb_check_config(struct ixgbe_dcb_config *config); | ||
154 | |||
155 | /* DCB credits calculation */ | 152 | /* DCB credits calculation */ |
156 | s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_dcb_config *, u8); | 153 | s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_dcb_config *, u8); |
157 | 154 | ||
158 | /* DCB PFC functions */ | ||
159 | s32 ixgbe_dcb_config_pfc(struct ixgbe_hw *, struct ixgbe_dcb_config *g); | ||
160 | s32 ixgbe_dcb_get_pfc_stats(struct ixgbe_hw *, struct ixgbe_hw_stats *, u8); | ||
161 | |||
162 | /* DCB traffic class stats */ | ||
163 | s32 ixgbe_dcb_config_tc_stats(struct ixgbe_hw *); | ||
164 | s32 ixgbe_dcb_get_tc_stats(struct ixgbe_hw *, struct ixgbe_hw_stats *, u8); | ||
165 | |||
166 | /* DCB config arbiters */ | ||
167 | s32 ixgbe_dcb_config_tx_desc_arbiter(struct ixgbe_hw *, | ||
168 | struct ixgbe_dcb_config *); | ||
169 | s32 ixgbe_dcb_config_tx_data_arbiter(struct ixgbe_hw *, | ||
170 | struct ixgbe_dcb_config *); | ||
171 | s32 ixgbe_dcb_config_rx_arbiter(struct ixgbe_hw *, struct ixgbe_dcb_config *); | ||
172 | |||
173 | /* DCB hw initialization */ | 155 | /* DCB hw initialization */ |
174 | s32 ixgbe_dcb_hw_config(struct ixgbe_hw *, struct ixgbe_dcb_config *); | 156 | s32 ixgbe_dcb_hw_config(struct ixgbe_hw *, struct ixgbe_dcb_config *); |
175 | 157 | ||
diff --git a/drivers/net/ixgbe/ixgbe_dcb_82598.c b/drivers/net/ixgbe/ixgbe_dcb_82598.c index f0e9279d4669..50288bcadc59 100644 --- a/drivers/net/ixgbe/ixgbe_dcb_82598.c +++ b/drivers/net/ixgbe/ixgbe_dcb_82598.c | |||
@@ -32,65 +32,6 @@ | |||
32 | #include "ixgbe_dcb_82598.h" | 32 | #include "ixgbe_dcb_82598.h" |
33 | 33 | ||
34 | /** | 34 | /** |
35 | * ixgbe_dcb_get_tc_stats_82598 - Return status data for each traffic class | ||
36 | * @hw: pointer to hardware structure | ||
37 | * @stats: pointer to statistics structure | ||
38 | * @tc_count: Number of elements in bwg_array. | ||
39 | * | ||
40 | * This function returns the status data for each of the Traffic Classes in use. | ||
41 | */ | ||
42 | s32 ixgbe_dcb_get_tc_stats_82598(struct ixgbe_hw *hw, | ||
43 | struct ixgbe_hw_stats *stats, | ||
44 | u8 tc_count) | ||
45 | { | ||
46 | int tc; | ||
47 | |||
48 | if (tc_count > MAX_TRAFFIC_CLASS) | ||
49 | return DCB_ERR_PARAM; | ||
50 | |||
51 | /* Statistics pertaining to each traffic class */ | ||
52 | for (tc = 0; tc < tc_count; tc++) { | ||
53 | /* Transmitted Packets */ | ||
54 | stats->qptc[tc] += IXGBE_READ_REG(hw, IXGBE_QPTC(tc)); | ||
55 | /* Transmitted Bytes */ | ||
56 | stats->qbtc[tc] += IXGBE_READ_REG(hw, IXGBE_QBTC(tc)); | ||
57 | /* Received Packets */ | ||
58 | stats->qprc[tc] += IXGBE_READ_REG(hw, IXGBE_QPRC(tc)); | ||
59 | /* Received Bytes */ | ||
60 | stats->qbrc[tc] += IXGBE_READ_REG(hw, IXGBE_QBRC(tc)); | ||
61 | } | ||
62 | |||
63 | return 0; | ||
64 | } | ||
65 | |||
66 | /** | ||
67 | * ixgbe_dcb_get_pfc_stats_82598 - Returns CBFC status data | ||
68 | * @hw: pointer to hardware structure | ||
69 | * @stats: pointer to statistics structure | ||
70 | * @tc_count: Number of elements in bwg_array. | ||
71 | * | ||
72 | * This function returns the CBFC status data for each of the Traffic Classes. | ||
73 | */ | ||
74 | s32 ixgbe_dcb_get_pfc_stats_82598(struct ixgbe_hw *hw, | ||
75 | struct ixgbe_hw_stats *stats, | ||
76 | u8 tc_count) | ||
77 | { | ||
78 | int tc; | ||
79 | |||
80 | if (tc_count > MAX_TRAFFIC_CLASS) | ||
81 | return DCB_ERR_PARAM; | ||
82 | |||
83 | for (tc = 0; tc < tc_count; tc++) { | ||
84 | /* Priority XOFF Transmitted */ | ||
85 | stats->pxofftxc[tc] += IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(tc)); | ||
86 | /* Priority XOFF Received */ | ||
87 | stats->pxoffrxc[tc] += IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(tc)); | ||
88 | } | ||
89 | |||
90 | return 0; | ||
91 | } | ||
92 | |||
93 | /** | ||
94 | * ixgbe_dcb_config_packet_buffers_82598 - Configure packet buffers | 35 | * ixgbe_dcb_config_packet_buffers_82598 - Configure packet buffers |
95 | * @hw: pointer to hardware structure | 36 | * @hw: pointer to hardware structure |
96 | * @dcb_config: pointer to ixgbe_dcb_config structure | 37 | * @dcb_config: pointer to ixgbe_dcb_config structure |
@@ -137,7 +78,7 @@ static s32 ixgbe_dcb_config_packet_buffers_82598(struct ixgbe_hw *hw, | |||
137 | * | 78 | * |
138 | * Configure Rx Data Arbiter and credits for each traffic class. | 79 | * Configure Rx Data Arbiter and credits for each traffic class. |
139 | */ | 80 | */ |
140 | s32 ixgbe_dcb_config_rx_arbiter_82598(struct ixgbe_hw *hw, | 81 | static s32 ixgbe_dcb_config_rx_arbiter_82598(struct ixgbe_hw *hw, |
141 | struct ixgbe_dcb_config *dcb_config) | 82 | struct ixgbe_dcb_config *dcb_config) |
142 | { | 83 | { |
143 | struct tc_bw_alloc *p; | 84 | struct tc_bw_alloc *p; |
@@ -194,7 +135,7 @@ s32 ixgbe_dcb_config_rx_arbiter_82598(struct ixgbe_hw *hw, | |||
194 | * | 135 | * |
195 | * Configure Tx Descriptor Arbiter and credits for each traffic class. | 136 | * Configure Tx Descriptor Arbiter and credits for each traffic class. |
196 | */ | 137 | */ |
197 | s32 ixgbe_dcb_config_tx_desc_arbiter_82598(struct ixgbe_hw *hw, | 138 | static s32 ixgbe_dcb_config_tx_desc_arbiter_82598(struct ixgbe_hw *hw, |
198 | struct ixgbe_dcb_config *dcb_config) | 139 | struct ixgbe_dcb_config *dcb_config) |
199 | { | 140 | { |
200 | struct tc_bw_alloc *p; | 141 | struct tc_bw_alloc *p; |
@@ -242,7 +183,7 @@ s32 ixgbe_dcb_config_tx_desc_arbiter_82598(struct ixgbe_hw *hw, | |||
242 | * | 183 | * |
243 | * Configure Tx Data Arbiter and credits for each traffic class. | 184 | * Configure Tx Data Arbiter and credits for each traffic class. |
244 | */ | 185 | */ |
245 | s32 ixgbe_dcb_config_tx_data_arbiter_82598(struct ixgbe_hw *hw, | 186 | static s32 ixgbe_dcb_config_tx_data_arbiter_82598(struct ixgbe_hw *hw, |
246 | struct ixgbe_dcb_config *dcb_config) | 187 | struct ixgbe_dcb_config *dcb_config) |
247 | { | 188 | { |
248 | struct tc_bw_alloc *p; | 189 | struct tc_bw_alloc *p; |
@@ -355,7 +296,7 @@ out: | |||
355 | * Configure queue statistics registers, all queues belonging to same traffic | 296 | * Configure queue statistics registers, all queues belonging to same traffic |
356 | * class uses a single set of queue statistics counters. | 297 | * class uses a single set of queue statistics counters. |
357 | */ | 298 | */ |
358 | s32 ixgbe_dcb_config_tc_stats_82598(struct ixgbe_hw *hw) | 299 | static s32 ixgbe_dcb_config_tc_stats_82598(struct ixgbe_hw *hw) |
359 | { | 300 | { |
360 | u32 reg = 0; | 301 | u32 reg = 0; |
361 | u8 i = 0; | 302 | u8 i = 0; |
diff --git a/drivers/net/ixgbe/ixgbe_dcb_82598.h b/drivers/net/ixgbe/ixgbe_dcb_82598.h index cc728fa092e2..abc03ccfa088 100644 --- a/drivers/net/ixgbe/ixgbe_dcb_82598.h +++ b/drivers/net/ixgbe/ixgbe_dcb_82598.h | |||
@@ -72,21 +72,6 @@ | |||
72 | 72 | ||
73 | /* DCB PFC functions */ | 73 | /* DCB PFC functions */ |
74 | s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *, struct ixgbe_dcb_config *); | 74 | s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *, struct ixgbe_dcb_config *); |
75 | s32 ixgbe_dcb_get_pfc_stats_82598(struct ixgbe_hw *, struct ixgbe_hw_stats *, | ||
76 | u8); | ||
77 | |||
78 | /* DCB traffic class stats */ | ||
79 | s32 ixgbe_dcb_config_tc_stats_82598(struct ixgbe_hw *); | ||
80 | s32 ixgbe_dcb_get_tc_stats_82598(struct ixgbe_hw *, struct ixgbe_hw_stats *, | ||
81 | u8); | ||
82 | |||
83 | /* DCB config arbiters */ | ||
84 | s32 ixgbe_dcb_config_tx_desc_arbiter_82598(struct ixgbe_hw *, | ||
85 | struct ixgbe_dcb_config *); | ||
86 | s32 ixgbe_dcb_config_tx_data_arbiter_82598(struct ixgbe_hw *, | ||
87 | struct ixgbe_dcb_config *); | ||
88 | s32 ixgbe_dcb_config_rx_arbiter_82598(struct ixgbe_hw *, | ||
89 | struct ixgbe_dcb_config *); | ||
90 | 75 | ||
91 | /* DCB hw initialization */ | 76 | /* DCB hw initialization */ |
92 | s32 ixgbe_dcb_hw_config_82598(struct ixgbe_hw *, struct ixgbe_dcb_config *); | 77 | s32 ixgbe_dcb_hw_config_82598(struct ixgbe_hw *, struct ixgbe_dcb_config *); |
diff --git a/drivers/net/ixgbe/ixgbe_dcb_82599.c b/drivers/net/ixgbe/ixgbe_dcb_82599.c index 25b02fb425ac..67c219f86c3a 100644 --- a/drivers/net/ixgbe/ixgbe_dcb_82599.c +++ b/drivers/net/ixgbe/ixgbe_dcb_82599.c | |||
@@ -31,70 +31,13 @@ | |||
31 | #include "ixgbe_dcb_82599.h" | 31 | #include "ixgbe_dcb_82599.h" |
32 | 32 | ||
33 | /** | 33 | /** |
34 | * ixgbe_dcb_get_tc_stats_82599 - Returns status for each traffic class | ||
35 | * @hw: pointer to hardware structure | ||
36 | * @stats: pointer to statistics structure | ||
37 | * @tc_count: Number of elements in bwg_array. | ||
38 | * | ||
39 | * This function returns the status data for each of the Traffic Classes in use. | ||
40 | */ | ||
41 | s32 ixgbe_dcb_get_tc_stats_82599(struct ixgbe_hw *hw, | ||
42 | struct ixgbe_hw_stats *stats, | ||
43 | u8 tc_count) | ||
44 | { | ||
45 | int tc; | ||
46 | |||
47 | if (tc_count > MAX_TRAFFIC_CLASS) | ||
48 | return DCB_ERR_PARAM; | ||
49 | /* Statistics pertaining to each traffic class */ | ||
50 | for (tc = 0; tc < tc_count; tc++) { | ||
51 | /* Transmitted Packets */ | ||
52 | stats->qptc[tc] += IXGBE_READ_REG(hw, IXGBE_QPTC(tc)); | ||
53 | /* Transmitted Bytes */ | ||
54 | stats->qbtc[tc] += IXGBE_READ_REG(hw, IXGBE_QBTC(tc)); | ||
55 | /* Received Packets */ | ||
56 | stats->qprc[tc] += IXGBE_READ_REG(hw, IXGBE_QPRC(tc)); | ||
57 | /* Received Bytes */ | ||
58 | stats->qbrc[tc] += IXGBE_READ_REG(hw, IXGBE_QBRC(tc)); | ||
59 | } | ||
60 | |||
61 | return 0; | ||
62 | } | ||
63 | |||
64 | /** | ||
65 | * ixgbe_dcb_get_pfc_stats_82599 - Return CBFC status data | ||
66 | * @hw: pointer to hardware structure | ||
67 | * @stats: pointer to statistics structure | ||
68 | * @tc_count: Number of elements in bwg_array. | ||
69 | * | ||
70 | * This function returns the CBFC status data for each of the Traffic Classes. | ||
71 | */ | ||
72 | s32 ixgbe_dcb_get_pfc_stats_82599(struct ixgbe_hw *hw, | ||
73 | struct ixgbe_hw_stats *stats, | ||
74 | u8 tc_count) | ||
75 | { | ||
76 | int tc; | ||
77 | |||
78 | if (tc_count > MAX_TRAFFIC_CLASS) | ||
79 | return DCB_ERR_PARAM; | ||
80 | for (tc = 0; tc < tc_count; tc++) { | ||
81 | /* Priority XOFF Transmitted */ | ||
82 | stats->pxofftxc[tc] += IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(tc)); | ||
83 | /* Priority XOFF Received */ | ||
84 | stats->pxoffrxc[tc] += IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(tc)); | ||
85 | } | ||
86 | |||
87 | return 0; | ||
88 | } | ||
89 | |||
90 | /** | ||
91 | * ixgbe_dcb_config_packet_buffers_82599 - Configure DCB packet buffers | 34 | * ixgbe_dcb_config_packet_buffers_82599 - Configure DCB packet buffers |
92 | * @hw: pointer to hardware structure | 35 | * @hw: pointer to hardware structure |
93 | * @dcb_config: pointer to ixgbe_dcb_config structure | 36 | * @dcb_config: pointer to ixgbe_dcb_config structure |
94 | * | 37 | * |
95 | * Configure packet buffers for DCB mode. | 38 | * Configure packet buffers for DCB mode. |
96 | */ | 39 | */ |
97 | s32 ixgbe_dcb_config_packet_buffers_82599(struct ixgbe_hw *hw, | 40 | static s32 ixgbe_dcb_config_packet_buffers_82599(struct ixgbe_hw *hw, |
98 | struct ixgbe_dcb_config *dcb_config) | 41 | struct ixgbe_dcb_config *dcb_config) |
99 | { | 42 | { |
100 | s32 ret_val = 0; | 43 | s32 ret_val = 0; |
@@ -136,7 +79,7 @@ s32 ixgbe_dcb_config_packet_buffers_82599(struct ixgbe_hw *hw, | |||
136 | * | 79 | * |
137 | * Configure Rx Packet Arbiter and credits for each traffic class. | 80 | * Configure Rx Packet Arbiter and credits for each traffic class. |
138 | */ | 81 | */ |
139 | s32 ixgbe_dcb_config_rx_arbiter_82599(struct ixgbe_hw *hw, | 82 | static s32 ixgbe_dcb_config_rx_arbiter_82599(struct ixgbe_hw *hw, |
140 | struct ixgbe_dcb_config *dcb_config) | 83 | struct ixgbe_dcb_config *dcb_config) |
141 | { | 84 | { |
142 | struct tc_bw_alloc *p; | 85 | struct tc_bw_alloc *p; |
@@ -191,7 +134,7 @@ s32 ixgbe_dcb_config_rx_arbiter_82599(struct ixgbe_hw *hw, | |||
191 | * | 134 | * |
192 | * Configure Tx Descriptor Arbiter and credits for each traffic class. | 135 | * Configure Tx Descriptor Arbiter and credits for each traffic class. |
193 | */ | 136 | */ |
194 | s32 ixgbe_dcb_config_tx_desc_arbiter_82599(struct ixgbe_hw *hw, | 137 | static s32 ixgbe_dcb_config_tx_desc_arbiter_82599(struct ixgbe_hw *hw, |
195 | struct ixgbe_dcb_config *dcb_config) | 138 | struct ixgbe_dcb_config *dcb_config) |
196 | { | 139 | { |
197 | struct tc_bw_alloc *p; | 140 | struct tc_bw_alloc *p; |
@@ -238,7 +181,7 @@ s32 ixgbe_dcb_config_tx_desc_arbiter_82599(struct ixgbe_hw *hw, | |||
238 | * | 181 | * |
239 | * Configure Tx Packet Arbiter and credits for each traffic class. | 182 | * Configure Tx Packet Arbiter and credits for each traffic class. |
240 | */ | 183 | */ |
241 | s32 ixgbe_dcb_config_tx_data_arbiter_82599(struct ixgbe_hw *hw, | 184 | static s32 ixgbe_dcb_config_tx_data_arbiter_82599(struct ixgbe_hw *hw, |
242 | struct ixgbe_dcb_config *dcb_config) | 185 | struct ixgbe_dcb_config *dcb_config) |
243 | { | 186 | { |
244 | struct tc_bw_alloc *p; | 187 | struct tc_bw_alloc *p; |
@@ -359,7 +302,7 @@ out: | |||
359 | * Configure queue statistics registers, all queues belonging to same traffic | 302 | * Configure queue statistics registers, all queues belonging to same traffic |
360 | * class uses a single set of queue statistics counters. | 303 | * class uses a single set of queue statistics counters. |
361 | */ | 304 | */ |
362 | s32 ixgbe_dcb_config_tc_stats_82599(struct ixgbe_hw *hw) | 305 | static s32 ixgbe_dcb_config_tc_stats_82599(struct ixgbe_hw *hw) |
363 | { | 306 | { |
364 | u32 reg = 0; | 307 | u32 reg = 0; |
365 | u8 i = 0; | 308 | u8 i = 0; |
@@ -412,7 +355,7 @@ s32 ixgbe_dcb_config_tc_stats_82599(struct ixgbe_hw *hw) | |||
412 | * | 355 | * |
413 | * Configure general DCB parameters. | 356 | * Configure general DCB parameters. |
414 | */ | 357 | */ |
415 | s32 ixgbe_dcb_config_82599(struct ixgbe_hw *hw) | 358 | static s32 ixgbe_dcb_config_82599(struct ixgbe_hw *hw) |
416 | { | 359 | { |
417 | u32 reg; | 360 | u32 reg; |
418 | u32 q; | 361 | u32 q; |
diff --git a/drivers/net/ixgbe/ixgbe_dcb_82599.h b/drivers/net/ixgbe/ixgbe_dcb_82599.h index 0f3f791e1e1d..18d7fbf6c292 100644 --- a/drivers/net/ixgbe/ixgbe_dcb_82599.h +++ b/drivers/net/ixgbe/ixgbe_dcb_82599.h | |||
@@ -101,24 +101,6 @@ | |||
101 | /* DCB PFC functions */ | 101 | /* DCB PFC functions */ |
102 | s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, | 102 | s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, |
103 | struct ixgbe_dcb_config *dcb_config); | 103 | struct ixgbe_dcb_config *dcb_config); |
104 | s32 ixgbe_dcb_get_pfc_stats_82599(struct ixgbe_hw *hw, | ||
105 | struct ixgbe_hw_stats *stats, | ||
106 | u8 tc_count); | ||
107 | |||
108 | /* DCB traffic class stats */ | ||
109 | s32 ixgbe_dcb_config_tc_stats_82599(struct ixgbe_hw *hw); | ||
110 | s32 ixgbe_dcb_get_tc_stats_82599(struct ixgbe_hw *hw, | ||
111 | struct ixgbe_hw_stats *stats, | ||
112 | u8 tc_count); | ||
113 | |||
114 | /* DCB config arbiters */ | ||
115 | s32 ixgbe_dcb_config_tx_desc_arbiter_82599(struct ixgbe_hw *hw, | ||
116 | struct ixgbe_dcb_config *dcb_config); | ||
117 | s32 ixgbe_dcb_config_tx_data_arbiter_82599(struct ixgbe_hw *hw, | ||
118 | struct ixgbe_dcb_config *dcb_config); | ||
119 | s32 ixgbe_dcb_config_rx_arbiter_82599(struct ixgbe_hw *hw, | ||
120 | struct ixgbe_dcb_config *dcb_config); | ||
121 | |||
122 | 104 | ||
123 | /* DCB hw initialization */ | 105 | /* DCB hw initialization */ |
124 | s32 ixgbe_dcb_hw_config_82599(struct ixgbe_hw *hw, | 106 | s32 ixgbe_dcb_hw_config_82599(struct ixgbe_hw *hw, |
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c index 95dbf60c8169..790a0dae1247 100644 --- a/drivers/net/ixgbe/ixgbe_main.c +++ b/drivers/net/ixgbe/ixgbe_main.c | |||
@@ -3374,7 +3374,6 @@ static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter) | |||
3374 | if (hw->mac.type == ixgbe_mac_82598EB) | 3374 | if (hw->mac.type == ixgbe_mac_82598EB) |
3375 | netif_set_gso_max_size(adapter->netdev, 32768); | 3375 | netif_set_gso_max_size(adapter->netdev, 32768); |
3376 | 3376 | ||
3377 | ixgbe_dcb_check_config(&adapter->dcb_cfg); | ||
3378 | ixgbe_dcb_calculate_tc_credits(&adapter->dcb_cfg, DCB_TX_CONFIG); | 3377 | ixgbe_dcb_calculate_tc_credits(&adapter->dcb_cfg, DCB_TX_CONFIG); |
3379 | ixgbe_dcb_calculate_tc_credits(&adapter->dcb_cfg, DCB_RX_CONFIG); | 3378 | ixgbe_dcb_calculate_tc_credits(&adapter->dcb_cfg, DCB_RX_CONFIG); |
3380 | 3379 | ||
diff --git a/drivers/net/ixgbe/ixgbe_mbx.c b/drivers/net/ixgbe/ixgbe_mbx.c index d75f9148eb1f..435e0281e1f8 100644 --- a/drivers/net/ixgbe/ixgbe_mbx.c +++ b/drivers/net/ixgbe/ixgbe_mbx.c | |||
@@ -200,7 +200,8 @@ out: | |||
200 | * returns SUCCESS if it successfully received a message notification and | 200 | * returns SUCCESS if it successfully received a message notification and |
201 | * copied it into the receive buffer. | 201 | * copied it into the receive buffer. |
202 | **/ | 202 | **/ |
203 | s32 ixgbe_read_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id) | 203 | static s32 ixgbe_read_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, |
204 | u16 mbx_id) | ||
204 | { | 205 | { |
205 | struct ixgbe_mbx_info *mbx = &hw->mbx; | 206 | struct ixgbe_mbx_info *mbx = &hw->mbx; |
206 | s32 ret_val = IXGBE_ERR_MBX; | 207 | s32 ret_val = IXGBE_ERR_MBX; |
@@ -227,7 +228,7 @@ out: | |||
227 | * returns SUCCESS if it successfully copied message into the buffer and | 228 | * returns SUCCESS if it successfully copied message into the buffer and |
228 | * received an ack to that message within delay * timeout period | 229 | * received an ack to that message within delay * timeout period |
229 | **/ | 230 | **/ |
230 | s32 ixgbe_write_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, | 231 | static s32 ixgbe_write_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, |
231 | u16 mbx_id) | 232 | u16 mbx_id) |
232 | { | 233 | { |
233 | struct ixgbe_mbx_info *mbx = &hw->mbx; | 234 | struct ixgbe_mbx_info *mbx = &hw->mbx; |
@@ -247,20 +248,6 @@ out: | |||
247 | return ret_val; | 248 | return ret_val; |
248 | } | 249 | } |
249 | 250 | ||
250 | /** | ||
251 | * ixgbe_init_mbx_ops_generic - Initialize MB function pointers | ||
252 | * @hw: pointer to the HW structure | ||
253 | * | ||
254 | * Setup the mailbox read and write message function pointers | ||
255 | **/ | ||
256 | void ixgbe_init_mbx_ops_generic(struct ixgbe_hw *hw) | ||
257 | { | ||
258 | struct ixgbe_mbx_info *mbx = &hw->mbx; | ||
259 | |||
260 | mbx->ops.read_posted = ixgbe_read_posted_mbx; | ||
261 | mbx->ops.write_posted = ixgbe_write_posted_mbx; | ||
262 | } | ||
263 | |||
264 | static s32 ixgbe_check_for_bit_pf(struct ixgbe_hw *hw, u32 mask, s32 index) | 251 | static s32 ixgbe_check_for_bit_pf(struct ixgbe_hw *hw, u32 mask, s32 index) |
265 | { | 252 | { |
266 | u32 mbvficr = IXGBE_READ_REG(hw, IXGBE_MBVFICR(index)); | 253 | u32 mbvficr = IXGBE_READ_REG(hw, IXGBE_MBVFICR(index)); |
diff --git a/drivers/net/ixgbe/ixgbe_mbx.h b/drivers/net/ixgbe/ixgbe_mbx.h index be7ab3309ab7..c5ae4b4da83a 100644 --- a/drivers/net/ixgbe/ixgbe_mbx.h +++ b/drivers/net/ixgbe/ixgbe_mbx.h | |||
@@ -83,12 +83,9 @@ | |||
83 | 83 | ||
84 | s32 ixgbe_read_mbx(struct ixgbe_hw *, u32 *, u16, u16); | 84 | s32 ixgbe_read_mbx(struct ixgbe_hw *, u32 *, u16, u16); |
85 | s32 ixgbe_write_mbx(struct ixgbe_hw *, u32 *, u16, u16); | 85 | s32 ixgbe_write_mbx(struct ixgbe_hw *, u32 *, u16, u16); |
86 | s32 ixgbe_read_posted_mbx(struct ixgbe_hw *, u32 *, u16, u16); | ||
87 | s32 ixgbe_write_posted_mbx(struct ixgbe_hw *, u32 *, u16, u16); | ||
88 | s32 ixgbe_check_for_msg(struct ixgbe_hw *, u16); | 86 | s32 ixgbe_check_for_msg(struct ixgbe_hw *, u16); |
89 | s32 ixgbe_check_for_ack(struct ixgbe_hw *, u16); | 87 | s32 ixgbe_check_for_ack(struct ixgbe_hw *, u16); |
90 | s32 ixgbe_check_for_rst(struct ixgbe_hw *, u16); | 88 | s32 ixgbe_check_for_rst(struct ixgbe_hw *, u16); |
91 | void ixgbe_init_mbx_ops_generic(struct ixgbe_hw *hw); | ||
92 | void ixgbe_init_mbx_params_pf(struct ixgbe_hw *); | 89 | void ixgbe_init_mbx_params_pf(struct ixgbe_hw *); |
93 | 90 | ||
94 | extern struct ixgbe_mbx_operations mbx_ops_82599; | 91 | extern struct ixgbe_mbx_operations mbx_ops_82599; |
diff --git a/drivers/net/ixgbe/ixgbe_sriov.c b/drivers/net/ixgbe/ixgbe_sriov.c index 49661a138e22..a6b720ae7fea 100644 --- a/drivers/net/ixgbe/ixgbe_sriov.c +++ b/drivers/net/ixgbe/ixgbe_sriov.c | |||
@@ -43,8 +43,8 @@ | |||
43 | 43 | ||
44 | #include "ixgbe_sriov.h" | 44 | #include "ixgbe_sriov.h" |
45 | 45 | ||
46 | int ixgbe_set_vf_multicasts(struct ixgbe_adapter *adapter, | 46 | static int ixgbe_set_vf_multicasts(struct ixgbe_adapter *adapter, |
47 | int entries, u16 *hash_list, u32 vf) | 47 | int entries, u16 *hash_list, u32 vf) |
48 | { | 48 | { |
49 | struct vf_data_storage *vfinfo = &adapter->vfinfo[vf]; | 49 | struct vf_data_storage *vfinfo = &adapter->vfinfo[vf]; |
50 | struct ixgbe_hw *hw = &adapter->hw; | 50 | struct ixgbe_hw *hw = &adapter->hw; |
@@ -104,13 +104,14 @@ void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter) | |||
104 | } | 104 | } |
105 | } | 105 | } |
106 | 106 | ||
107 | int ixgbe_set_vf_vlan(struct ixgbe_adapter *adapter, int add, int vid, u32 vf) | 107 | static int ixgbe_set_vf_vlan(struct ixgbe_adapter *adapter, int add, int vid, |
108 | u32 vf) | ||
108 | { | 109 | { |
109 | return adapter->hw.mac.ops.set_vfta(&adapter->hw, vid, vf, (bool)add); | 110 | return adapter->hw.mac.ops.set_vfta(&adapter->hw, vid, vf, (bool)add); |
110 | } | 111 | } |
111 | 112 | ||
112 | 113 | ||
113 | void ixgbe_set_vmolr(struct ixgbe_hw *hw, u32 vf, bool aupe) | 114 | static void ixgbe_set_vmolr(struct ixgbe_hw *hw, u32 vf, bool aupe) |
114 | { | 115 | { |
115 | u32 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf)); | 116 | u32 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf)); |
116 | vmolr |= (IXGBE_VMOLR_ROMPE | | 117 | vmolr |= (IXGBE_VMOLR_ROMPE | |
@@ -134,7 +135,7 @@ static void ixgbe_set_vmvir(struct ixgbe_adapter *adapter, u32 vid, u32 vf) | |||
134 | IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf), 0); | 135 | IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf), 0); |
135 | } | 136 | } |
136 | 137 | ||
137 | inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf) | 138 | static inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf) |
138 | { | 139 | { |
139 | struct ixgbe_hw *hw = &adapter->hw; | 140 | struct ixgbe_hw *hw = &adapter->hw; |
140 | int rar_entry = hw->mac.num_rar_entries - (vf + 1); | 141 | int rar_entry = hw->mac.num_rar_entries - (vf + 1); |
@@ -162,8 +163,8 @@ inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf) | |||
162 | hw->mac.ops.clear_rar(hw, rar_entry); | 163 | hw->mac.ops.clear_rar(hw, rar_entry); |
163 | } | 164 | } |
164 | 165 | ||
165 | int ixgbe_set_vf_mac(struct ixgbe_adapter *adapter, | 166 | static int ixgbe_set_vf_mac(struct ixgbe_adapter *adapter, |
166 | int vf, unsigned char *mac_addr) | 167 | int vf, unsigned char *mac_addr) |
167 | { | 168 | { |
168 | struct ixgbe_hw *hw = &adapter->hw; | 169 | struct ixgbe_hw *hw = &adapter->hw; |
169 | int rar_entry = hw->mac.num_rar_entries - (vf + 1); | 170 | int rar_entry = hw->mac.num_rar_entries - (vf + 1); |
@@ -197,7 +198,7 @@ int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask) | |||
197 | return 0; | 198 | return 0; |
198 | } | 199 | } |
199 | 200 | ||
200 | inline void ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf) | 201 | static inline void ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf) |
201 | { | 202 | { |
202 | struct ixgbe_hw *hw = &adapter->hw; | 203 | struct ixgbe_hw *hw = &adapter->hw; |
203 | u32 reg; | 204 | u32 reg; |
diff --git a/drivers/net/ixgbe/ixgbe_sriov.h b/drivers/net/ixgbe/ixgbe_sriov.h index 184730ecdfb6..9a424bb688c0 100644 --- a/drivers/net/ixgbe/ixgbe_sriov.h +++ b/drivers/net/ixgbe/ixgbe_sriov.h | |||
@@ -28,16 +28,8 @@ | |||
28 | #ifndef _IXGBE_SRIOV_H_ | 28 | #ifndef _IXGBE_SRIOV_H_ |
29 | #define _IXGBE_SRIOV_H_ | 29 | #define _IXGBE_SRIOV_H_ |
30 | 30 | ||
31 | int ixgbe_set_vf_multicasts(struct ixgbe_adapter *adapter, | ||
32 | int entries, u16 *hash_list, u32 vf); | ||
33 | void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter); | 31 | void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter); |
34 | int ixgbe_set_vf_vlan(struct ixgbe_adapter *adapter, int add, int vid, u32 vf); | ||
35 | void ixgbe_set_vmolr(struct ixgbe_hw *hw, u32 vf, bool aupe); | ||
36 | void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf); | ||
37 | void ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf); | ||
38 | void ixgbe_msg_task(struct ixgbe_adapter *adapter); | 32 | void ixgbe_msg_task(struct ixgbe_adapter *adapter); |
39 | int ixgbe_set_vf_mac(struct ixgbe_adapter *adapter, | ||
40 | int vf, unsigned char *mac_addr); | ||
41 | int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask); | 33 | int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask); |
42 | void ixgbe_disable_tx_rx(struct ixgbe_adapter *adapter); | 34 | void ixgbe_disable_tx_rx(struct ixgbe_adapter *adapter); |
43 | void ixgbe_ping_all_vfs(struct ixgbe_adapter *adapter); | 35 | void ixgbe_ping_all_vfs(struct ixgbe_adapter *adapter); |
diff --git a/drivers/s390/net/ctcm_mpc.c b/drivers/s390/net/ctcm_mpc.c index 2861e78773cb..b64881f33f23 100644 --- a/drivers/s390/net/ctcm_mpc.c +++ b/drivers/s390/net/ctcm_mpc.c | |||
@@ -540,7 +540,7 @@ void ctc_mpc_dealloc_ch(int port_num) | |||
540 | 540 | ||
541 | CTCM_DBF_TEXT_(MPC_SETUP, CTC_DBF_DEBUG, | 541 | CTCM_DBF_TEXT_(MPC_SETUP, CTC_DBF_DEBUG, |
542 | "%s: %s: refcount = %d\n", | 542 | "%s: %s: refcount = %d\n", |
543 | CTCM_FUNTAIL, dev->name, atomic_read(&dev->refcnt)); | 543 | CTCM_FUNTAIL, dev->name, netdev_refcnt_read(dev)); |
544 | 544 | ||
545 | fsm_deltimer(&priv->restart_timer); | 545 | fsm_deltimer(&priv->restart_timer); |
546 | grp->channels_terminating = 0; | 546 | grp->channels_terminating = 0; |
diff --git a/drivers/scsi/bnx2i/57xx_iscsi_constants.h b/drivers/scsi/bnx2i/57xx_iscsi_constants.h index 2fceb19eb27b..1b6f86b2482d 100644 --- a/drivers/scsi/bnx2i/57xx_iscsi_constants.h +++ b/drivers/scsi/bnx2i/57xx_iscsi_constants.h | |||
@@ -120,6 +120,8 @@ | |||
120 | /* additional LOM specific iSCSI license not installed */ | 120 | /* additional LOM specific iSCSI license not installed */ |
121 | #define ISCSI_KCQE_COMPLETION_STATUS_LOM_ISCSI_NOT_ENABLED (0x51) | 121 | #define ISCSI_KCQE_COMPLETION_STATUS_LOM_ISCSI_NOT_ENABLED (0x51) |
122 | 122 | ||
123 | #define ISCSI_KCQE_COMPLETION_STATUS_CID_BUSY (0x80) | ||
124 | |||
123 | /* SQ/RQ/CQ DB structure sizes */ | 125 | /* SQ/RQ/CQ DB structure sizes */ |
124 | #define ISCSI_SQ_DB_SIZE (16) | 126 | #define ISCSI_SQ_DB_SIZE (16) |
125 | #define ISCSI_RQ_DB_SIZE (16) | 127 | #define ISCSI_RQ_DB_SIZE (16) |
diff --git a/include/linux/phonet.h b/include/linux/phonet.h index e27cbf931740..26c8df786918 100644 --- a/include/linux/phonet.h +++ b/include/linux/phonet.h | |||
@@ -36,10 +36,9 @@ | |||
36 | /* Socket options for SOL_PNPIPE level */ | 36 | /* Socket options for SOL_PNPIPE level */ |
37 | #define PNPIPE_ENCAP 1 | 37 | #define PNPIPE_ENCAP 1 |
38 | #define PNPIPE_IFINDEX 2 | 38 | #define PNPIPE_IFINDEX 2 |
39 | #define PNPIPE_CREATE 3 | 39 | #define PNPIPE_PIPE_HANDLE 3 |
40 | #define PNPIPE_ENABLE 4 | 40 | #define PNPIPE_ENABLE 4 |
41 | /* unused slot */ | 41 | /* unused slot */ |
42 | #define PNPIPE_DESTROY 6 | ||
43 | 42 | ||
44 | #define PNADDR_ANY 0 | 43 | #define PNADDR_ANY 0 |
45 | #define PNADDR_BROADCAST 0xFC | 44 | #define PNADDR_BROADCAST 0xFC |
diff --git a/include/net/phonet/pep.h b/include/net/phonet/pep.h index def6cfa3f451..b60b28c99e87 100644 --- a/include/net/phonet/pep.h +++ b/include/net/phonet/pep.h | |||
@@ -46,8 +46,8 @@ struct pep_sock { | |||
46 | u8 init_enable; /* auto-enable at creation */ | 46 | u8 init_enable; /* auto-enable at creation */ |
47 | u8 aligned; | 47 | u8 aligned; |
48 | #ifdef CONFIG_PHONET_PIPECTRLR | 48 | #ifdef CONFIG_PHONET_PIPECTRLR |
49 | u16 remote_pep; | 49 | u8 pipe_state; |
50 | u8 pipe_state; | 50 | struct sockaddr_pn remote_pep; |
51 | #endif | 51 | #endif |
52 | }; | 52 | }; |
53 | 53 | ||
diff --git a/net/phonet/pep.c b/net/phonet/pep.c index f818f76d297d..9c903f9e5079 100644 --- a/net/phonet/pep.c +++ b/net/phonet/pep.c | |||
@@ -88,15 +88,6 @@ static int pep_reply(struct sock *sk, struct sk_buff *oskb, | |||
88 | const struct pnpipehdr *oph = pnp_hdr(oskb); | 88 | const struct pnpipehdr *oph = pnp_hdr(oskb); |
89 | struct pnpipehdr *ph; | 89 | struct pnpipehdr *ph; |
90 | struct sk_buff *skb; | 90 | struct sk_buff *skb; |
91 | #ifdef CONFIG_PHONET_PIPECTRLR | ||
92 | const struct phonethdr *hdr = pn_hdr(oskb); | ||
93 | struct sockaddr_pn spn = { | ||
94 | .spn_family = AF_PHONET, | ||
95 | .spn_resource = 0xD9, | ||
96 | .spn_dev = hdr->pn_sdev, | ||
97 | .spn_obj = hdr->pn_sobj, | ||
98 | }; | ||
99 | #endif | ||
100 | 91 | ||
101 | skb = alloc_skb(MAX_PNPIPE_HEADER + len, priority); | 92 | skb = alloc_skb(MAX_PNPIPE_HEADER + len, priority); |
102 | if (!skb) | 93 | if (!skb) |
@@ -114,11 +105,7 @@ static int pep_reply(struct sock *sk, struct sk_buff *oskb, | |||
114 | ph->pipe_handle = oph->pipe_handle; | 105 | ph->pipe_handle = oph->pipe_handle; |
115 | ph->error_code = code; | 106 | ph->error_code = code; |
116 | 107 | ||
117 | #ifdef CONFIG_PHONET_PIPECTRLR | ||
118 | return pn_skb_send(sk, skb, &spn); | ||
119 | #else | ||
120 | return pn_skb_send(sk, skb, &pipe_srv); | 108 | return pn_skb_send(sk, skb, &pipe_srv); |
121 | #endif | ||
122 | } | 109 | } |
123 | 110 | ||
124 | #define PAD 0x00 | 111 | #define PAD 0x00 |
@@ -188,18 +175,13 @@ static int pipe_get_flow_info(struct sock *sk, struct sk_buff *skb, | |||
188 | return 0; | 175 | return 0; |
189 | } | 176 | } |
190 | 177 | ||
191 | static int pipe_handler_send_req(struct sock *sk, u16 dobj, u8 utid, | 178 | static int pipe_handler_send_req(struct sock *sk, u8 utid, |
192 | u8 msg_id, u8 p_handle, gfp_t priority) | 179 | u8 msg_id, gfp_t priority) |
193 | { | 180 | { |
194 | int len; | 181 | int len; |
195 | struct pnpipehdr *ph; | 182 | struct pnpipehdr *ph; |
196 | struct sk_buff *skb; | 183 | struct sk_buff *skb; |
197 | struct sockaddr_pn spn = { | 184 | struct pep_sock *pn = pep_sk(sk); |
198 | .spn_family = AF_PHONET, | ||
199 | .spn_resource = 0xD9, | ||
200 | .spn_dev = pn_dev(dobj), | ||
201 | .spn_obj = pn_obj(dobj), | ||
202 | }; | ||
203 | 185 | ||
204 | static const u8 data[4] = { | 186 | static const u8 data[4] = { |
205 | PAD, PAD, PAD, PAD, | 187 | PAD, PAD, PAD, PAD, |
@@ -235,30 +217,25 @@ static int pipe_handler_send_req(struct sock *sk, u16 dobj, u8 utid, | |||
235 | ph = pnp_hdr(skb); | 217 | ph = pnp_hdr(skb); |
236 | ph->utid = utid; | 218 | ph->utid = utid; |
237 | ph->message_id = msg_id; | 219 | ph->message_id = msg_id; |
238 | ph->pipe_handle = p_handle; | 220 | ph->pipe_handle = pn->pipe_handle; |
239 | ph->error_code = PN_PIPE_NO_ERROR; | 221 | ph->error_code = PN_PIPE_NO_ERROR; |
240 | 222 | ||
241 | return pn_skb_send(sk, skb, &spn); | 223 | return pn_skb_send(sk, skb, &pn->remote_pep); |
242 | } | 224 | } |
243 | 225 | ||
244 | static int pipe_handler_send_created_ind(struct sock *sk, u16 dobj, | 226 | static int pipe_handler_send_created_ind(struct sock *sk, |
245 | u8 utid, u8 p_handle, u8 msg_id, u8 tx_fc, u8 rx_fc) | 227 | u8 utid, u8 msg_id) |
246 | { | 228 | { |
247 | int err_code; | 229 | int err_code; |
248 | struct pnpipehdr *ph; | 230 | struct pnpipehdr *ph; |
249 | struct sk_buff *skb; | 231 | struct sk_buff *skb; |
250 | struct sockaddr_pn spn = { | ||
251 | .spn_family = AF_PHONET, | ||
252 | .spn_resource = 0xD9, | ||
253 | .spn_dev = pn_dev(dobj), | ||
254 | .spn_obj = pn_obj(dobj), | ||
255 | }; | ||
256 | 232 | ||
233 | struct pep_sock *pn = pep_sk(sk); | ||
257 | static u8 data[4] = { | 234 | static u8 data[4] = { |
258 | 0x03, 0x04, | 235 | 0x03, 0x04, |
259 | }; | 236 | }; |
260 | data[2] = tx_fc; | 237 | data[2] = pn->tx_fc; |
261 | data[3] = rx_fc; | 238 | data[3] = pn->rx_fc; |
262 | 239 | ||
263 | /* | 240 | /* |
264 | * actually, below is number of sub-blocks and not error code. | 241 | * actually, below is number of sub-blocks and not error code. |
@@ -282,24 +259,18 @@ static int pipe_handler_send_created_ind(struct sock *sk, u16 dobj, | |||
282 | ph = pnp_hdr(skb); | 259 | ph = pnp_hdr(skb); |
283 | ph->utid = utid; | 260 | ph->utid = utid; |
284 | ph->message_id = msg_id; | 261 | ph->message_id = msg_id; |
285 | ph->pipe_handle = p_handle; | 262 | ph->pipe_handle = pn->pipe_handle; |
286 | ph->error_code = err_code; | 263 | ph->error_code = err_code; |
287 | 264 | ||
288 | return pn_skb_send(sk, skb, &spn); | 265 | return pn_skb_send(sk, skb, &pn->remote_pep); |
289 | } | 266 | } |
290 | 267 | ||
291 | static int pipe_handler_send_ind(struct sock *sk, u16 dobj, u8 utid, | 268 | static int pipe_handler_send_ind(struct sock *sk, u8 utid, u8 msg_id) |
292 | u8 p_handle, u8 msg_id) | ||
293 | { | 269 | { |
294 | int err_code; | 270 | int err_code; |
295 | struct pnpipehdr *ph; | 271 | struct pnpipehdr *ph; |
296 | struct sk_buff *skb; | 272 | struct sk_buff *skb; |
297 | struct sockaddr_pn spn = { | 273 | struct pep_sock *pn = pep_sk(sk); |
298 | .spn_family = AF_PHONET, | ||
299 | .spn_resource = 0xD9, | ||
300 | .spn_dev = pn_dev(dobj), | ||
301 | .spn_obj = pn_obj(dobj), | ||
302 | }; | ||
303 | 274 | ||
304 | /* | 275 | /* |
305 | * actually, below is a filler. | 276 | * actually, below is a filler. |
@@ -321,10 +292,10 @@ static int pipe_handler_send_ind(struct sock *sk, u16 dobj, u8 utid, | |||
321 | ph = pnp_hdr(skb); | 292 | ph = pnp_hdr(skb); |
322 | ph->utid = utid; | 293 | ph->utid = utid; |
323 | ph->message_id = msg_id; | 294 | ph->message_id = msg_id; |
324 | ph->pipe_handle = p_handle; | 295 | ph->pipe_handle = pn->pipe_handle; |
325 | ph->error_code = err_code; | 296 | ph->error_code = err_code; |
326 | 297 | ||
327 | return pn_skb_send(sk, skb, &spn); | 298 | return pn_skb_send(sk, skb, &pn->remote_pep); |
328 | } | 299 | } |
329 | 300 | ||
330 | static int pipe_handler_enable_pipe(struct sock *sk, int enable) | 301 | static int pipe_handler_enable_pipe(struct sock *sk, int enable) |
@@ -339,34 +310,7 @@ static int pipe_handler_enable_pipe(struct sock *sk, int enable) | |||
339 | utid = PNS_PIPE_DISABLE_UTID; | 310 | utid = PNS_PIPE_DISABLE_UTID; |
340 | req = PNS_PEP_DISABLE_REQ; | 311 | req = PNS_PEP_DISABLE_REQ; |
341 | } | 312 | } |
342 | return pipe_handler_send_req(sk, pn->pn_sk.sobject, utid, req, | 313 | return pipe_handler_send_req(sk, utid, req, GFP_ATOMIC); |
343 | pn->pipe_handle, GFP_ATOMIC); | ||
344 | } | ||
345 | |||
346 | static int pipe_handler_create_pipe(struct sock *sk, int pipe_handle, int cmd) | ||
347 | { | ||
348 | int ret; | ||
349 | struct pep_sock *pn = pep_sk(sk); | ||
350 | |||
351 | switch (cmd) { | ||
352 | case PNPIPE_CREATE: | ||
353 | ret = pipe_handler_send_req(sk, pn->pn_sk.sobject, | ||
354 | PNS_PEP_CONNECT_UTID, PNS_PEP_CONNECT_REQ, | ||
355 | pipe_handle, GFP_ATOMIC); | ||
356 | break; | ||
357 | |||
358 | case PNPIPE_DESTROY: | ||
359 | ret = pipe_handler_send_req(sk, pn->remote_pep, | ||
360 | PNS_PEP_DISCONNECT_UTID, | ||
361 | PNS_PEP_DISCONNECT_REQ, | ||
362 | pn->pipe_handle, GFP_ATOMIC); | ||
363 | break; | ||
364 | |||
365 | default: | ||
366 | ret = -EINVAL; | ||
367 | } | ||
368 | |||
369 | return ret; | ||
370 | } | 314 | } |
371 | #endif | 315 | #endif |
372 | 316 | ||
@@ -434,14 +378,6 @@ static int pipe_snd_status(struct sock *sk, u8 type, u8 status, gfp_t priority) | |||
434 | struct pep_sock *pn = pep_sk(sk); | 378 | struct pep_sock *pn = pep_sk(sk); |
435 | struct pnpipehdr *ph; | 379 | struct pnpipehdr *ph; |
436 | struct sk_buff *skb; | 380 | struct sk_buff *skb; |
437 | #ifdef CONFIG_PHONET_PIPECTRLR | ||
438 | struct sockaddr_pn spn = { | ||
439 | .spn_family = AF_PHONET, | ||
440 | .spn_resource = 0xD9, | ||
441 | .spn_dev = pn_dev(pn->remote_pep), | ||
442 | .spn_obj = pn_obj(pn->remote_pep), | ||
443 | }; | ||
444 | #endif | ||
445 | 381 | ||
446 | skb = alloc_skb(MAX_PNPIPE_HEADER + 4, priority); | 382 | skb = alloc_skb(MAX_PNPIPE_HEADER + 4, priority); |
447 | if (!skb) | 383 | if (!skb) |
@@ -462,7 +398,7 @@ static int pipe_snd_status(struct sock *sk, u8 type, u8 status, gfp_t priority) | |||
462 | ph->data[4] = status; | 398 | ph->data[4] = status; |
463 | 399 | ||
464 | #ifdef CONFIG_PHONET_PIPECTRLR | 400 | #ifdef CONFIG_PHONET_PIPECTRLR |
465 | return pn_skb_send(sk, skb, &spn); | 401 | return pn_skb_send(sk, skb, &pn->remote_pep); |
466 | #else | 402 | #else |
467 | return pn_skb_send(sk, skb, &pipe_srv); | 403 | return pn_skb_send(sk, skb, &pipe_srv); |
468 | #endif | 404 | #endif |
@@ -582,12 +518,6 @@ static int pipe_do_rcv(struct sock *sk, struct sk_buff *skb) | |||
582 | struct pnpipehdr *hdr = pnp_hdr(skb); | 518 | struct pnpipehdr *hdr = pnp_hdr(skb); |
583 | struct sk_buff_head *queue; | 519 | struct sk_buff_head *queue; |
584 | int err = 0; | 520 | int err = 0; |
585 | #ifdef CONFIG_PHONET_PIPECTRLR | ||
586 | struct phonethdr *ph = pn_hdr(skb); | ||
587 | static u8 host_pref_rx_fc[3], host_req_tx_fc[3]; | ||
588 | u8 remote_pref_rx_fc[3], remote_req_tx_fc[3]; | ||
589 | u8 negotiated_rx_fc, negotiated_tx_fc; | ||
590 | #endif | ||
591 | 521 | ||
592 | BUG_ON(sk->sk_state == TCP_CLOSE_WAIT); | 522 | BUG_ON(sk->sk_state == TCP_CLOSE_WAIT); |
593 | 523 | ||
@@ -596,40 +526,6 @@ static int pipe_do_rcv(struct sock *sk, struct sk_buff *skb) | |||
596 | pep_reject_conn(sk, skb, PN_PIPE_ERR_PEP_IN_USE); | 526 | pep_reject_conn(sk, skb, PN_PIPE_ERR_PEP_IN_USE); |
597 | break; | 527 | break; |
598 | 528 | ||
599 | #ifdef CONFIG_PHONET_PIPECTRLR | ||
600 | case PNS_PEP_CONNECT_RESP: | ||
601 | if ((ph->pn_sdev == pn_dev(pn->remote_pep)) && | ||
602 | (ph->pn_sobj == pn_obj(pn->remote_pep))) { | ||
603 | pipe_get_flow_info(sk, skb, remote_pref_rx_fc, | ||
604 | remote_req_tx_fc); | ||
605 | |||
606 | negotiated_tx_fc = pipe_negotiate_fc(remote_req_tx_fc, | ||
607 | host_pref_rx_fc, | ||
608 | sizeof(host_pref_rx_fc)); | ||
609 | negotiated_rx_fc = pipe_negotiate_fc(host_req_tx_fc, | ||
610 | remote_pref_rx_fc, | ||
611 | sizeof(host_pref_rx_fc)); | ||
612 | |||
613 | pn->pipe_state = PIPE_DISABLED; | ||
614 | pipe_handler_send_created_ind(sk, pn->remote_pep, | ||
615 | PNS_PIPE_CREATED_IND_UTID, | ||
616 | pn->pipe_handle, PNS_PIPE_CREATED_IND, | ||
617 | negotiated_tx_fc, negotiated_rx_fc); | ||
618 | pipe_handler_send_created_ind(sk, pn->pn_sk.sobject, | ||
619 | PNS_PIPE_CREATED_IND_UTID, | ||
620 | pn->pipe_handle, PNS_PIPE_CREATED_IND, | ||
621 | negotiated_tx_fc, negotiated_rx_fc); | ||
622 | } else { | ||
623 | pipe_handler_send_req(sk, pn->remote_pep, | ||
624 | PNS_PEP_CONNECT_UTID, | ||
625 | PNS_PEP_CONNECT_REQ, pn->pipe_handle, | ||
626 | GFP_ATOMIC); | ||
627 | pipe_get_flow_info(sk, skb, host_pref_rx_fc, | ||
628 | host_req_tx_fc); | ||
629 | } | ||
630 | break; | ||
631 | #endif | ||
632 | |||
633 | case PNS_PEP_DISCONNECT_REQ: | 529 | case PNS_PEP_DISCONNECT_REQ: |
634 | pep_reply(sk, skb, PN_PIPE_NO_ERROR, NULL, 0, GFP_ATOMIC); | 530 | pep_reply(sk, skb, PN_PIPE_NO_ERROR, NULL, 0, GFP_ATOMIC); |
635 | sk->sk_state = TCP_CLOSE_WAIT; | 531 | sk->sk_state = TCP_CLOSE_WAIT; |
@@ -640,10 +536,7 @@ static int pipe_do_rcv(struct sock *sk, struct sk_buff *skb) | |||
640 | #ifdef CONFIG_PHONET_PIPECTRLR | 536 | #ifdef CONFIG_PHONET_PIPECTRLR |
641 | case PNS_PEP_DISCONNECT_RESP: | 537 | case PNS_PEP_DISCONNECT_RESP: |
642 | pn->pipe_state = PIPE_IDLE; | 538 | pn->pipe_state = PIPE_IDLE; |
643 | pipe_handler_send_req(sk, pn->pn_sk.sobject, | 539 | sk->sk_state = TCP_CLOSE; |
644 | PNS_PEP_DISCONNECT_UTID, | ||
645 | PNS_PEP_DISCONNECT_REQ, pn->pipe_handle, | ||
646 | GFP_KERNEL); | ||
647 | break; | 540 | break; |
648 | #endif | 541 | #endif |
649 | 542 | ||
@@ -654,21 +547,18 @@ static int pipe_do_rcv(struct sock *sk, struct sk_buff *skb) | |||
654 | 547 | ||
655 | #ifdef CONFIG_PHONET_PIPECTRLR | 548 | #ifdef CONFIG_PHONET_PIPECTRLR |
656 | case PNS_PEP_ENABLE_RESP: | 549 | case PNS_PEP_ENABLE_RESP: |
657 | if ((ph->pn_sdev == pn_dev(pn->remote_pep)) && | 550 | pn->pipe_state = PIPE_ENABLED; |
658 | (ph->pn_sobj == pn_obj(pn->remote_pep))) { | 551 | pipe_handler_send_ind(sk, PNS_PIPE_ENABLED_IND_UTID, |
659 | pn->pipe_state = PIPE_ENABLED; | 552 | PNS_PIPE_ENABLED_IND); |
660 | pipe_handler_send_ind(sk, pn->remote_pep, | ||
661 | PNS_PIPE_ENABLED_IND_UTID, | ||
662 | pn->pipe_handle, PNS_PIPE_ENABLED_IND); | ||
663 | pipe_handler_send_ind(sk, pn->pn_sk.sobject, | ||
664 | PNS_PIPE_ENABLED_IND_UTID, | ||
665 | pn->pipe_handle, PNS_PIPE_ENABLED_IND); | ||
666 | } else | ||
667 | pipe_handler_send_req(sk, pn->remote_pep, | ||
668 | PNS_PIPE_ENABLE_UTID, | ||
669 | PNS_PEP_ENABLE_REQ, pn->pipe_handle, | ||
670 | GFP_KERNEL); | ||
671 | 553 | ||
554 | if (!pn_flow_safe(pn->tx_fc)) { | ||
555 | atomic_set(&pn->tx_credits, 1); | ||
556 | sk->sk_write_space(sk); | ||
557 | } | ||
558 | if (sk->sk_state == TCP_ESTABLISHED) | ||
559 | break; /* Nothing to do */ | ||
560 | sk->sk_state = TCP_ESTABLISHED; | ||
561 | pipe_grant_credits(sk); | ||
672 | break; | 562 | break; |
673 | #endif | 563 | #endif |
674 | 564 | ||
@@ -692,22 +582,12 @@ static int pipe_do_rcv(struct sock *sk, struct sk_buff *skb) | |||
692 | 582 | ||
693 | #ifdef CONFIG_PHONET_PIPECTRLR | 583 | #ifdef CONFIG_PHONET_PIPECTRLR |
694 | case PNS_PEP_DISABLE_RESP: | 584 | case PNS_PEP_DISABLE_RESP: |
695 | if ((ph->pn_sdev == pn_dev(pn->remote_pep)) && | 585 | pn->pipe_state = PIPE_DISABLED; |
696 | (ph->pn_sobj == pn_obj(pn->remote_pep))) { | 586 | atomic_set(&pn->tx_credits, 0); |
697 | pn->pipe_state = PIPE_DISABLED; | 587 | pipe_handler_send_ind(sk, PNS_PIPE_DISABLED_IND_UTID, |
698 | pipe_handler_send_ind(sk, pn->remote_pep, | 588 | PNS_PIPE_DISABLED_IND); |
699 | PNS_PIPE_DISABLED_IND_UTID, | 589 | sk->sk_state = TCP_SYN_RECV; |
700 | pn->pipe_handle, | 590 | pn->rx_credits = 0; |
701 | PNS_PIPE_DISABLED_IND); | ||
702 | pipe_handler_send_ind(sk, pn->pn_sk.sobject, | ||
703 | PNS_PIPE_DISABLED_IND_UTID, | ||
704 | pn->pipe_handle, | ||
705 | PNS_PIPE_DISABLED_IND); | ||
706 | } else | ||
707 | pipe_handler_send_req(sk, pn->remote_pep, | ||
708 | PNS_PIPE_DISABLE_UTID, | ||
709 | PNS_PEP_DISABLE_REQ, pn->pipe_handle, | ||
710 | GFP_KERNEL); | ||
711 | break; | 591 | break; |
712 | #endif | 592 | #endif |
713 | 593 | ||
@@ -802,6 +682,42 @@ static void pipe_destruct(struct sock *sk) | |||
802 | skb_queue_purge(&pn->ctrlreq_queue); | 682 | skb_queue_purge(&pn->ctrlreq_queue); |
803 | } | 683 | } |
804 | 684 | ||
685 | #ifdef CONFIG_PHONET_PIPECTRLR | ||
686 | static int pep_connresp_rcv(struct sock *sk, struct sk_buff *skb) | ||
687 | { | ||
688 | struct pep_sock *pn = pep_sk(sk); | ||
689 | u8 host_pref_rx_fc[3] = {3, 2, 1}, host_req_tx_fc[3] = {3, 2, 1}; | ||
690 | u8 remote_pref_rx_fc[3], remote_req_tx_fc[3]; | ||
691 | u8 negotiated_rx_fc, negotiated_tx_fc; | ||
692 | int ret; | ||
693 | |||
694 | pipe_get_flow_info(sk, skb, remote_pref_rx_fc, | ||
695 | remote_req_tx_fc); | ||
696 | negotiated_tx_fc = pipe_negotiate_fc(remote_req_tx_fc, | ||
697 | host_pref_rx_fc, | ||
698 | sizeof(host_pref_rx_fc)); | ||
699 | negotiated_rx_fc = pipe_negotiate_fc(host_req_tx_fc, | ||
700 | remote_pref_rx_fc, | ||
701 | sizeof(host_pref_rx_fc)); | ||
702 | |||
703 | pn->pipe_state = PIPE_DISABLED; | ||
704 | sk->sk_state = TCP_SYN_RECV; | ||
705 | sk->sk_backlog_rcv = pipe_do_rcv; | ||
706 | sk->sk_destruct = pipe_destruct; | ||
707 | pn->rx_credits = 0; | ||
708 | pn->rx_fc = negotiated_rx_fc; | ||
709 | pn->tx_fc = negotiated_tx_fc; | ||
710 | sk->sk_state_change(sk); | ||
711 | |||
712 | ret = pipe_handler_send_created_ind(sk, | ||
713 | PNS_PIPE_CREATED_IND_UTID, | ||
714 | PNS_PIPE_CREATED_IND | ||
715 | ); | ||
716 | |||
717 | return ret; | ||
718 | } | ||
719 | #endif | ||
720 | |||
805 | static int pep_connreq_rcv(struct sock *sk, struct sk_buff *skb) | 721 | static int pep_connreq_rcv(struct sock *sk, struct sk_buff *skb) |
806 | { | 722 | { |
807 | struct sock *newsk; | 723 | struct sock *newsk; |
@@ -884,9 +800,6 @@ static int pep_connreq_rcv(struct sock *sk, struct sk_buff *skb) | |||
884 | newpn->rx_fc = newpn->tx_fc = PN_LEGACY_FLOW_CONTROL; | 800 | newpn->rx_fc = newpn->tx_fc = PN_LEGACY_FLOW_CONTROL; |
885 | newpn->init_enable = enabled; | 801 | newpn->init_enable = enabled; |
886 | newpn->aligned = aligned; | 802 | newpn->aligned = aligned; |
887 | #ifdef CONFIG_PHONET_PIPECTRLR | ||
888 | newpn->remote_pep = pn->remote_pep; | ||
889 | #endif | ||
890 | 803 | ||
891 | BUG_ON(!skb_queue_empty(&newsk->sk_receive_queue)); | 804 | BUG_ON(!skb_queue_empty(&newsk->sk_receive_queue)); |
892 | skb_queue_head(&newsk->sk_receive_queue, skb); | 805 | skb_queue_head(&newsk->sk_receive_queue, skb); |
@@ -968,6 +881,12 @@ static int pep_do_rcv(struct sock *sk, struct sk_buff *skb) | |||
968 | err = pep_connreq_rcv(sk, skb); | 881 | err = pep_connreq_rcv(sk, skb); |
969 | break; | 882 | break; |
970 | 883 | ||
884 | #ifdef CONFIG_PHONET_PIPECTRLR | ||
885 | case PNS_PEP_CONNECT_RESP: | ||
886 | err = pep_connresp_rcv(sk, skb); | ||
887 | break; | ||
888 | #endif | ||
889 | |||
971 | case PNS_PEP_DISCONNECT_REQ: | 890 | case PNS_PEP_DISCONNECT_REQ: |
972 | pep_reply(sk, skb, PN_PIPE_NO_ERROR, NULL, 0, GFP_ATOMIC); | 891 | pep_reply(sk, skb, PN_PIPE_NO_ERROR, NULL, 0, GFP_ATOMIC); |
973 | break; | 892 | break; |
@@ -1032,6 +951,18 @@ static void pep_sock_close(struct sock *sk, long timeout) | |||
1032 | /* Forcefully remove dangling Phonet pipe */ | 951 | /* Forcefully remove dangling Phonet pipe */ |
1033 | pipe_do_remove(sk); | 952 | pipe_do_remove(sk); |
1034 | 953 | ||
954 | #ifdef CONFIG_PHONET_PIPECTRLR | ||
955 | if (pn->pipe_state != PIPE_IDLE) { | ||
956 | /* send pep disconnect request */ | ||
957 | pipe_handler_send_req(sk, | ||
958 | PNS_PEP_DISCONNECT_UTID, PNS_PEP_DISCONNECT_REQ, | ||
959 | GFP_KERNEL); | ||
960 | |||
961 | pn->pipe_state = PIPE_IDLE; | ||
962 | sk->sk_state = TCP_CLOSE; | ||
963 | } | ||
964 | #endif | ||
965 | |||
1035 | ifindex = pn->ifindex; | 966 | ifindex = pn->ifindex; |
1036 | pn->ifindex = 0; | 967 | pn->ifindex = 0; |
1037 | release_sock(sk); | 968 | release_sock(sk); |
@@ -1108,6 +1039,20 @@ out: | |||
1108 | return newsk; | 1039 | return newsk; |
1109 | } | 1040 | } |
1110 | 1041 | ||
1042 | #ifdef CONFIG_PHONET_PIPECTRLR | ||
1043 | static int pep_sock_connect(struct sock *sk, struct sockaddr *addr, int len) | ||
1044 | { | ||
1045 | struct pep_sock *pn = pep_sk(sk); | ||
1046 | struct sockaddr_pn *spn = (struct sockaddr_pn *)addr; | ||
1047 | |||
1048 | memcpy(&pn->remote_pep, spn, sizeof(struct sockaddr_pn)); | ||
1049 | |||
1050 | return pipe_handler_send_req(sk, | ||
1051 | PNS_PEP_CONNECT_UTID, PNS_PEP_CONNECT_REQ, | ||
1052 | GFP_ATOMIC); | ||
1053 | } | ||
1054 | #endif | ||
1055 | |||
1111 | static int pep_ioctl(struct sock *sk, int cmd, unsigned long arg) | 1056 | static int pep_ioctl(struct sock *sk, int cmd, unsigned long arg) |
1112 | { | 1057 | { |
1113 | struct pep_sock *pn = pep_sk(sk); | 1058 | struct pep_sock *pn = pep_sk(sk); |
@@ -1149,10 +1094,6 @@ static int pep_setsockopt(struct sock *sk, int level, int optname, | |||
1149 | { | 1094 | { |
1150 | struct pep_sock *pn = pep_sk(sk); | 1095 | struct pep_sock *pn = pep_sk(sk); |
1151 | int val = 0, err = 0; | 1096 | int val = 0, err = 0; |
1152 | #ifdef CONFIG_PHONET_PIPECTRLR | ||
1153 | int remote_pep; | ||
1154 | int pipe_handle; | ||
1155 | #endif | ||
1156 | 1097 | ||
1157 | if (level != SOL_PNPIPE) | 1098 | if (level != SOL_PNPIPE) |
1158 | return -ENOPROTOOPT; | 1099 | return -ENOPROTOOPT; |
@@ -1164,28 +1105,15 @@ static int pep_setsockopt(struct sock *sk, int level, int optname, | |||
1164 | lock_sock(sk); | 1105 | lock_sock(sk); |
1165 | switch (optname) { | 1106 | switch (optname) { |
1166 | #ifdef CONFIG_PHONET_PIPECTRLR | 1107 | #ifdef CONFIG_PHONET_PIPECTRLR |
1167 | case PNPIPE_CREATE: | 1108 | case PNPIPE_PIPE_HANDLE: |
1168 | if (val) { | 1109 | if (val) { |
1169 | if (pn->pipe_state > PIPE_IDLE) { | 1110 | if (pn->pipe_state > PIPE_IDLE) { |
1170 | err = -EFAULT; | 1111 | err = -EFAULT; |
1171 | break; | 1112 | break; |
1172 | } | 1113 | } |
1173 | remote_pep = val & 0xFFFF; | 1114 | pn->pipe_handle = val; |
1174 | pipe_handle = (val >> 16) & 0xFF; | ||
1175 | pn->remote_pep = remote_pep; | ||
1176 | err = pipe_handler_create_pipe(sk, pipe_handle, | ||
1177 | PNPIPE_CREATE); | ||
1178 | break; | ||
1179 | } | ||
1180 | |||
1181 | case PNPIPE_DESTROY: | ||
1182 | if (pn->pipe_state < PIPE_DISABLED) { | ||
1183 | err = -EFAULT; | ||
1184 | break; | 1115 | break; |
1185 | } | 1116 | } |
1186 | |||
1187 | err = pipe_handler_create_pipe(sk, 0x0, PNPIPE_DESTROY); | ||
1188 | break; | ||
1189 | #endif | 1117 | #endif |
1190 | 1118 | ||
1191 | case PNPIPE_ENCAP: | 1119 | case PNPIPE_ENCAP: |
@@ -1278,14 +1206,6 @@ static int pipe_skb_send(struct sock *sk, struct sk_buff *skb) | |||
1278 | struct pep_sock *pn = pep_sk(sk); | 1206 | struct pep_sock *pn = pep_sk(sk); |
1279 | struct pnpipehdr *ph; | 1207 | struct pnpipehdr *ph; |
1280 | int err; | 1208 | int err; |
1281 | #ifdef CONFIG_PHONET_PIPECTRLR | ||
1282 | struct sockaddr_pn spn = { | ||
1283 | .spn_family = AF_PHONET, | ||
1284 | .spn_resource = 0xD9, | ||
1285 | .spn_dev = pn_dev(pn->remote_pep), | ||
1286 | .spn_obj = pn_obj(pn->remote_pep), | ||
1287 | }; | ||
1288 | #endif | ||
1289 | 1209 | ||
1290 | if (pn_flow_safe(pn->tx_fc) && | 1210 | if (pn_flow_safe(pn->tx_fc) && |
1291 | !atomic_add_unless(&pn->tx_credits, -1, 0)) { | 1211 | !atomic_add_unless(&pn->tx_credits, -1, 0)) { |
@@ -1304,7 +1224,7 @@ static int pipe_skb_send(struct sock *sk, struct sk_buff *skb) | |||
1304 | ph->message_id = PNS_PIPE_DATA; | 1224 | ph->message_id = PNS_PIPE_DATA; |
1305 | ph->pipe_handle = pn->pipe_handle; | 1225 | ph->pipe_handle = pn->pipe_handle; |
1306 | #ifdef CONFIG_PHONET_PIPECTRLR | 1226 | #ifdef CONFIG_PHONET_PIPECTRLR |
1307 | err = pn_skb_send(sk, skb, &spn); | 1227 | err = pn_skb_send(sk, skb, &pn->remote_pep); |
1308 | #else | 1228 | #else |
1309 | err = pn_skb_send(sk, skb, &pipe_srv); | 1229 | err = pn_skb_send(sk, skb, &pipe_srv); |
1310 | #endif | 1230 | #endif |
@@ -1504,6 +1424,8 @@ static void pep_sock_unhash(struct sock *sk) | |||
1504 | struct sock *skparent = NULL; | 1424 | struct sock *skparent = NULL; |
1505 | 1425 | ||
1506 | lock_sock(sk); | 1426 | lock_sock(sk); |
1427 | |||
1428 | #ifndef CONFIG_PHONET_PIPECTRLR | ||
1507 | if ((1 << sk->sk_state) & ~(TCPF_CLOSE|TCPF_LISTEN)) { | 1429 | if ((1 << sk->sk_state) & ~(TCPF_CLOSE|TCPF_LISTEN)) { |
1508 | skparent = pn->listener; | 1430 | skparent = pn->listener; |
1509 | release_sock(sk); | 1431 | release_sock(sk); |
@@ -1513,6 +1435,7 @@ static void pep_sock_unhash(struct sock *sk) | |||
1513 | sk_del_node_init(sk); | 1435 | sk_del_node_init(sk); |
1514 | sk = skparent; | 1436 | sk = skparent; |
1515 | } | 1437 | } |
1438 | #endif | ||
1516 | /* Unhash a listening sock only when it is closed | 1439 | /* Unhash a listening sock only when it is closed |
1517 | * and all of its active connected pipes are closed. */ | 1440 | * and all of its active connected pipes are closed. */ |
1518 | if (hlist_empty(&pn->hlist)) | 1441 | if (hlist_empty(&pn->hlist)) |
@@ -1526,6 +1449,9 @@ static void pep_sock_unhash(struct sock *sk) | |||
1526 | static struct proto pep_proto = { | 1449 | static struct proto pep_proto = { |
1527 | .close = pep_sock_close, | 1450 | .close = pep_sock_close, |
1528 | .accept = pep_sock_accept, | 1451 | .accept = pep_sock_accept, |
1452 | #ifdef CONFIG_PHONET_PIPECTRLR | ||
1453 | .connect = pep_sock_connect, | ||
1454 | #endif | ||
1529 | .ioctl = pep_ioctl, | 1455 | .ioctl = pep_ioctl, |
1530 | .init = pep_init, | 1456 | .init = pep_init, |
1531 | .setsockopt = pep_setsockopt, | 1457 | .setsockopt = pep_setsockopt, |
diff --git a/net/phonet/socket.c b/net/phonet/socket.c index aca8fba099e9..25f746d20c1f 100644 --- a/net/phonet/socket.c +++ b/net/phonet/socket.c | |||
@@ -225,6 +225,101 @@ static int pn_socket_autobind(struct socket *sock) | |||
225 | return 0; /* socket was already bound */ | 225 | return 0; /* socket was already bound */ |
226 | } | 226 | } |
227 | 227 | ||
228 | #ifdef CONFIG_PHONET_PIPECTRLR | ||
229 | static int pn_socket_connect(struct socket *sock, struct sockaddr *addr, | ||
230 | int len, int flags) | ||
231 | { | ||
232 | struct sock *sk = sock->sk; | ||
233 | struct sockaddr_pn *spn = (struct sockaddr_pn *)addr; | ||
234 | long timeo; | ||
235 | int err; | ||
236 | |||
237 | if (len < sizeof(struct sockaddr_pn)) | ||
238 | return -EINVAL; | ||
239 | if (spn->spn_family != AF_PHONET) | ||
240 | return -EAFNOSUPPORT; | ||
241 | |||
242 | lock_sock(sk); | ||
243 | |||
244 | switch (sock->state) { | ||
245 | case SS_UNCONNECTED: | ||
246 | sk->sk_state = TCP_CLOSE; | ||
247 | break; | ||
248 | case SS_CONNECTING: | ||
249 | switch (sk->sk_state) { | ||
250 | case TCP_SYN_RECV: | ||
251 | sock->state = SS_CONNECTED; | ||
252 | err = -EISCONN; | ||
253 | goto out; | ||
254 | case TCP_CLOSE: | ||
255 | err = -EALREADY; | ||
256 | if (flags & O_NONBLOCK) | ||
257 | goto out; | ||
258 | goto wait_connect; | ||
259 | } | ||
260 | break; | ||
261 | case SS_CONNECTED: | ||
262 | switch (sk->sk_state) { | ||
263 | case TCP_SYN_RECV: | ||
264 | err = -EISCONN; | ||
265 | goto out; | ||
266 | case TCP_CLOSE: | ||
267 | sock->state = SS_UNCONNECTED; | ||
268 | break; | ||
269 | } | ||
270 | break; | ||
271 | case SS_DISCONNECTING: | ||
272 | case SS_FREE: | ||
273 | break; | ||
274 | } | ||
275 | sk->sk_state = TCP_CLOSE; | ||
276 | sk_stream_kill_queues(sk); | ||
277 | |||
278 | sock->state = SS_CONNECTING; | ||
279 | err = sk->sk_prot->connect(sk, addr, len); | ||
280 | if (err < 0) { | ||
281 | sock->state = SS_UNCONNECTED; | ||
282 | sk->sk_state = TCP_CLOSE; | ||
283 | goto out; | ||
284 | } | ||
285 | |||
286 | err = -EINPROGRESS; | ||
287 | wait_connect: | ||
288 | if (sk->sk_state != TCP_SYN_RECV && (flags & O_NONBLOCK)) | ||
289 | goto out; | ||
290 | |||
291 | timeo = sock_sndtimeo(sk, flags & O_NONBLOCK); | ||
292 | release_sock(sk); | ||
293 | |||
294 | err = -ERESTARTSYS; | ||
295 | timeo = wait_event_interruptible_timeout(*sk_sleep(sk), | ||
296 | sk->sk_state != TCP_CLOSE, | ||
297 | timeo); | ||
298 | |||
299 | lock_sock(sk); | ||
300 | if (timeo < 0) | ||
301 | goto out; /* -ERESTARTSYS */ | ||
302 | |||
303 | err = -ETIMEDOUT; | ||
304 | if (timeo == 0 && sk->sk_state != TCP_SYN_RECV) | ||
305 | goto out; | ||
306 | |||
307 | if (sk->sk_state != TCP_SYN_RECV) { | ||
308 | sock->state = SS_UNCONNECTED; | ||
309 | err = sock_error(sk); | ||
310 | if (!err) | ||
311 | err = -ECONNREFUSED; | ||
312 | goto out; | ||
313 | } | ||
314 | sock->state = SS_CONNECTED; | ||
315 | err = 0; | ||
316 | |||
317 | out: | ||
318 | release_sock(sk); | ||
319 | return err; | ||
320 | } | ||
321 | #endif | ||
322 | |||
228 | static int pn_socket_accept(struct socket *sock, struct socket *newsock, | 323 | static int pn_socket_accept(struct socket *sock, struct socket *newsock, |
229 | int flags) | 324 | int flags) |
230 | { | 325 | { |
@@ -393,7 +488,11 @@ const struct proto_ops phonet_stream_ops = { | |||
393 | .owner = THIS_MODULE, | 488 | .owner = THIS_MODULE, |
394 | .release = pn_socket_release, | 489 | .release = pn_socket_release, |
395 | .bind = pn_socket_bind, | 490 | .bind = pn_socket_bind, |
491 | #ifdef CONFIG_PHONET_PIPECTRLR | ||
492 | .connect = pn_socket_connect, | ||
493 | #else | ||
396 | .connect = sock_no_connect, | 494 | .connect = sock_no_connect, |
495 | #endif | ||
397 | .socketpair = sock_no_socketpair, | 496 | .socketpair = sock_no_socketpair, |
398 | .accept = pn_socket_accept, | 497 | .accept = pn_socket_accept, |
399 | .getname = pn_socket_getname, | 498 | .getname = pn_socket_getname, |
diff --git a/net/tipc/config.c b/net/tipc/config.c index 961d1b097146..c429b0d488a3 100644 --- a/net/tipc/config.c +++ b/net/tipc/config.c | |||
@@ -120,139 +120,6 @@ struct sk_buff *tipc_cfg_reply_string_type(u16 tlv_type, char *string) | |||
120 | return buf; | 120 | return buf; |
121 | } | 121 | } |
122 | 122 | ||
123 | |||
124 | #if 0 | ||
125 | |||
126 | /* Now obsolete code for handling commands not yet implemented the new way */ | ||
127 | |||
128 | /* | ||
129 | * Some of this code assumed that the manager structure contains two added | ||
130 | * fields: | ||
131 | * u32 link_subscriptions; | ||
132 | * struct list_head link_subscribers; | ||
133 | * which are currently not present. These fields may need to be re-introduced | ||
134 | * if and when support for link subscriptions is added. | ||
135 | */ | ||
136 | |||
137 | void tipc_cfg_link_event(u32 addr, char *name, int up) | ||
138 | { | ||
139 | /* TIPC DOESN'T HANDLE LINK EVENT SUBSCRIPTIONS AT THE MOMENT */ | ||
140 | } | ||
141 | |||
142 | int tipc_cfg_cmd(const struct tipc_cmd_msg * msg, | ||
143 | char *data, | ||
144 | u32 sz, | ||
145 | u32 *ret_size, | ||
146 | struct tipc_portid *orig) | ||
147 | { | ||
148 | int rv = -EINVAL; | ||
149 | u32 cmd = msg->cmd; | ||
150 | |||
151 | *ret_size = 0; | ||
152 | switch (cmd) { | ||
153 | case TIPC_REMOVE_LINK: | ||
154 | case TIPC_CMD_BLOCK_LINK: | ||
155 | case TIPC_CMD_UNBLOCK_LINK: | ||
156 | if (!cfg_check_connection(orig)) | ||
157 | rv = link_control(msg->argv.link_name, msg->cmd, 0); | ||
158 | break; | ||
159 | case TIPC_ESTABLISH: | ||
160 | { | ||
161 | int connected; | ||
162 | |||
163 | tipc_isconnected(mng.conn_port_ref, &connected); | ||
164 | if (connected || !orig) { | ||
165 | rv = TIPC_FAILURE; | ||
166 | break; | ||
167 | } | ||
168 | rv = tipc_connect2port(mng.conn_port_ref, orig); | ||
169 | if (rv == TIPC_OK) | ||
170 | orig = 0; | ||
171 | break; | ||
172 | } | ||
173 | case TIPC_GET_PEER_ADDRESS: | ||
174 | *ret_size = link_peer_addr(msg->argv.link_name, data, sz); | ||
175 | break; | ||
176 | case TIPC_GET_ROUTES: | ||
177 | rv = TIPC_OK; | ||
178 | break; | ||
179 | default: {} | ||
180 | } | ||
181 | if (*ret_size) | ||
182 | rv = TIPC_OK; | ||
183 | return rv; | ||
184 | } | ||
185 | |||
186 | static void cfg_cmd_event(struct tipc_cmd_msg *msg, | ||
187 | char *data, | ||
188 | u32 sz, | ||
189 | struct tipc_portid const *orig) | ||
190 | { | ||
191 | int rv = -EINVAL; | ||
192 | struct tipc_cmd_result_msg rmsg; | ||
193 | struct iovec msg_sect[2]; | ||
194 | int *arg; | ||
195 | |||
196 | msg->cmd = ntohl(msg->cmd); | ||
197 | |||
198 | cfg_prepare_res_msg(msg->cmd, msg->usr_handle, rv, &rmsg, msg_sect, | ||
199 | data, 0); | ||
200 | if (ntohl(msg->magic) != TIPC_MAGIC) | ||
201 | goto exit; | ||
202 | |||
203 | switch (msg->cmd) { | ||
204 | case TIPC_CREATE_LINK: | ||
205 | if (!cfg_check_connection(orig)) | ||
206 | rv = disc_create_link(&msg->argv.create_link); | ||
207 | break; | ||
208 | case TIPC_LINK_SUBSCRIBE: | ||
209 | { | ||
210 | struct subscr_data *sub; | ||
211 | |||
212 | if (mng.link_subscriptions > 64) | ||
213 | break; | ||
214 | sub = kmalloc(sizeof(*sub), | ||
215 | GFP_ATOMIC); | ||
216 | if (sub == NULL) { | ||
217 | warn("Memory squeeze; dropped remote link subscription\n"); | ||
218 | break; | ||
219 | } | ||
220 | INIT_LIST_HEAD(&sub->subd_list); | ||
221 | tipc_createport(mng.user_ref, | ||
222 | (void *)sub, | ||
223 | TIPC_HIGH_IMPORTANCE, | ||
224 | 0, | ||
225 | 0, | ||
226 | (tipc_conn_shutdown_event)cfg_linksubscr_cancel, | ||
227 | 0, | ||
228 | 0, | ||
229 | (tipc_conn_msg_event)cfg_linksubscr_cancel, | ||
230 | 0, | ||
231 | &sub->port_ref); | ||
232 | if (!sub->port_ref) { | ||
233 | kfree(sub); | ||
234 | break; | ||
235 | } | ||
236 | memcpy(sub->usr_handle,msg->usr_handle, | ||
237 | sizeof(sub->usr_handle)); | ||
238 | sub->domain = msg->argv.domain; | ||
239 | list_add_tail(&sub->subd_list, &mng.link_subscribers); | ||
240 | tipc_connect2port(sub->port_ref, orig); | ||
241 | rmsg.retval = TIPC_OK; | ||
242 | tipc_send(sub->port_ref, 2u, msg_sect); | ||
243 | mng.link_subscriptions++; | ||
244 | return; | ||
245 | } | ||
246 | default: | ||
247 | rv = tipc_cfg_cmd(msg, data, sz, (u32 *)&msg_sect[1].iov_len, orig); | ||
248 | } | ||
249 | exit: | ||
250 | rmsg.result_len = htonl(msg_sect[1].iov_len); | ||
251 | rmsg.retval = htonl(rv); | ||
252 | tipc_cfg_respond(msg_sect, 2u, orig); | ||
253 | } | ||
254 | #endif | ||
255 | |||
256 | #define MAX_STATS_INFO 2000 | 123 | #define MAX_STATS_INFO 2000 |
257 | 124 | ||
258 | static struct sk_buff *tipc_show_stats(void) | 125 | static struct sk_buff *tipc_show_stats(void) |
@@ -557,14 +424,6 @@ struct sk_buff *tipc_cfg_do_cmd(u32 orig_node, u16 cmd, const void *request_area | |||
557 | case TIPC_CMD_SHOW_PORTS: | 424 | case TIPC_CMD_SHOW_PORTS: |
558 | rep_tlv_buf = tipc_port_get_ports(); | 425 | rep_tlv_buf = tipc_port_get_ports(); |
559 | break; | 426 | break; |
560 | #if 0 | ||
561 | case TIPC_CMD_SHOW_PORT_STATS: | ||
562 | rep_tlv_buf = port_show_stats(req_tlv_area, req_tlv_space); | ||
563 | break; | ||
564 | case TIPC_CMD_RESET_PORT_STATS: | ||
565 | rep_tlv_buf = tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED); | ||
566 | break; | ||
567 | #endif | ||
568 | case TIPC_CMD_SET_LOG_SIZE: | 427 | case TIPC_CMD_SET_LOG_SIZE: |
569 | rep_tlv_buf = tipc_log_resize_cmd(req_tlv_area, req_tlv_space); | 428 | rep_tlv_buf = tipc_log_resize_cmd(req_tlv_area, req_tlv_space); |
570 | break; | 429 | break; |
diff --git a/net/tipc/discover.c b/net/tipc/discover.c index f28d1ae93125..dbd79c67d7c0 100644 --- a/net/tipc/discover.c +++ b/net/tipc/discover.c | |||
@@ -46,16 +46,6 @@ | |||
46 | #define TIPC_LINK_REQ_FAST 2000 /* normal delay if bearer has no links */ | 46 | #define TIPC_LINK_REQ_FAST 2000 /* normal delay if bearer has no links */ |
47 | #define TIPC_LINK_REQ_SLOW 600000 /* normal delay if bearer has links */ | 47 | #define TIPC_LINK_REQ_SLOW 600000 /* normal delay if bearer has links */ |
48 | 48 | ||
49 | #if 0 | ||
50 | #define GET_NODE_INFO 300 | ||
51 | #define GET_NODE_INFO_RESULT 301 | ||
52 | #define FORWARD_LINK_PROBE 302 | ||
53 | #define LINK_REQUEST_REJECTED 303 | ||
54 | #define LINK_REQUEST_ACCEPTED 304 | ||
55 | #define DROP_LINK_REQUEST 305 | ||
56 | #define CHECK_LINK_COUNT 306 | ||
57 | #endif | ||
58 | |||
59 | /* | 49 | /* |
60 | * TODO: Most of the inter-cluster setup stuff should be | 50 | * TODO: Most of the inter-cluster setup stuff should be |
61 | * rewritten, and be made conformant with specification. | 51 | * rewritten, and be made conformant with specification. |
@@ -79,16 +69,6 @@ struct link_req { | |||
79 | }; | 69 | }; |
80 | 70 | ||
81 | 71 | ||
82 | #if 0 | ||
83 | int disc_create_link(const struct tipc_link_create *argv) | ||
84 | { | ||
85 | /* | ||
86 | * Code for inter cluster link setup here | ||
87 | */ | ||
88 | return TIPC_OK; | ||
89 | } | ||
90 | #endif | ||
91 | |||
92 | /* | 72 | /* |
93 | * disc_lost_link(): A link has lost contact | 73 | * disc_lost_link(): A link has lost contact |
94 | */ | 74 | */ |
diff --git a/net/tipc/discover.h b/net/tipc/discover.h index c36eaeb7d5d0..9d064c3639bf 100644 --- a/net/tipc/discover.h +++ b/net/tipc/discover.h | |||
@@ -51,8 +51,5 @@ void tipc_disc_stop_link_req(struct link_req *req); | |||
51 | void tipc_disc_recv_msg(struct sk_buff *buf, struct bearer *b_ptr); | 51 | void tipc_disc_recv_msg(struct sk_buff *buf, struct bearer *b_ptr); |
52 | 52 | ||
53 | void tipc_disc_link_event(u32 addr, char *name, int up); | 53 | void tipc_disc_link_event(u32 addr, char *name, int up); |
54 | #if 0 | ||
55 | int disc_create_link(const struct tipc_link_create *argv); | ||
56 | #endif | ||
57 | 54 | ||
58 | #endif | 55 | #endif |
diff --git a/net/tipc/link.c b/net/tipc/link.c index b8cf1e9d0b86..4be78ecf4a67 100644 --- a/net/tipc/link.c +++ b/net/tipc/link.c | |||
@@ -99,23 +99,6 @@ struct link_name { | |||
99 | char if_peer[TIPC_MAX_IF_NAME]; | 99 | char if_peer[TIPC_MAX_IF_NAME]; |
100 | }; | 100 | }; |
101 | 101 | ||
102 | #if 0 | ||
103 | |||
104 | /* LINK EVENT CODE IS NOT SUPPORTED AT PRESENT */ | ||
105 | |||
106 | /** | ||
107 | * struct link_event - link up/down event notification | ||
108 | */ | ||
109 | |||
110 | struct link_event { | ||
111 | u32 addr; | ||
112 | int up; | ||
113 | void (*fcn)(u32, char *, int); | ||
114 | char name[TIPC_MAX_LINK_NAME]; | ||
115 | }; | ||
116 | |||
117 | #endif | ||
118 | |||
119 | static void link_handle_out_of_seq_msg(struct link *l_ptr, | 102 | static void link_handle_out_of_seq_msg(struct link *l_ptr, |
120 | struct sk_buff *buf); | 103 | struct sk_buff *buf); |
121 | static void link_recv_proto_msg(struct link *l_ptr, struct sk_buff *buf); | 104 | static void link_recv_proto_msg(struct link *l_ptr, struct sk_buff *buf); |
@@ -634,39 +617,9 @@ void tipc_link_stop(struct link *l_ptr) | |||
634 | l_ptr->proto_msg_queue = NULL; | 617 | l_ptr->proto_msg_queue = NULL; |
635 | } | 618 | } |
636 | 619 | ||
637 | #if 0 | ||
638 | |||
639 | /* LINK EVENT CODE IS NOT SUPPORTED AT PRESENT */ | 620 | /* LINK EVENT CODE IS NOT SUPPORTED AT PRESENT */ |
640 | |||
641 | static void link_recv_event(struct link_event *ev) | ||
642 | { | ||
643 | ev->fcn(ev->addr, ev->name, ev->up); | ||
644 | kfree(ev); | ||
645 | } | ||
646 | |||
647 | static void link_send_event(void (*fcn)(u32 a, char *n, int up), | ||
648 | struct link *l_ptr, int up) | ||
649 | { | ||
650 | struct link_event *ev; | ||
651 | |||
652 | ev = kmalloc(sizeof(*ev), GFP_ATOMIC); | ||
653 | if (!ev) { | ||
654 | warn("Link event allocation failure\n"); | ||
655 | return; | ||
656 | } | ||
657 | ev->addr = l_ptr->addr; | ||
658 | ev->up = up; | ||
659 | ev->fcn = fcn; | ||
660 | memcpy(ev->name, l_ptr->name, TIPC_MAX_LINK_NAME); | ||
661 | tipc_k_signal((Handler)link_recv_event, (unsigned long)ev); | ||
662 | } | ||
663 | |||
664 | #else | ||
665 | |||
666 | #define link_send_event(fcn, l_ptr, up) do { } while (0) | 621 | #define link_send_event(fcn, l_ptr, up) do { } while (0) |
667 | 622 | ||
668 | #endif | ||
669 | |||
670 | void tipc_link_reset(struct link *l_ptr) | 623 | void tipc_link_reset(struct link *l_ptr) |
671 | { | 624 | { |
672 | struct sk_buff *buf; | 625 | struct sk_buff *buf; |
@@ -690,10 +643,7 @@ void tipc_link_reset(struct link *l_ptr) | |||
690 | 643 | ||
691 | tipc_node_link_down(l_ptr->owner, l_ptr); | 644 | tipc_node_link_down(l_ptr->owner, l_ptr); |
692 | tipc_bearer_remove_dest(l_ptr->b_ptr, l_ptr->addr); | 645 | tipc_bearer_remove_dest(l_ptr->b_ptr, l_ptr->addr); |
693 | #if 0 | 646 | |
694 | tipc_printf(TIPC_CONS, "\nReset link <%s>\n", l_ptr->name); | ||
695 | dbg_link_dump(); | ||
696 | #endif | ||
697 | if (was_active_link && tipc_node_has_active_links(l_ptr->owner) && | 647 | if (was_active_link && tipc_node_has_active_links(l_ptr->owner) && |
698 | l_ptr->owner->permit_changeover) { | 648 | l_ptr->owner->permit_changeover) { |
699 | l_ptr->reset_checkpoint = checkpoint; | 649 | l_ptr->reset_checkpoint = checkpoint; |
@@ -3197,44 +3147,6 @@ struct sk_buff *tipc_link_cmd_show_stats(const void *req_tlv_area, int req_tlv_s | |||
3197 | return buf; | 3147 | return buf; |
3198 | } | 3148 | } |
3199 | 3149 | ||
3200 | #if 0 | ||
3201 | int link_control(const char *name, u32 op, u32 val) | ||
3202 | { | ||
3203 | int res = -EINVAL; | ||
3204 | struct link *l_ptr; | ||
3205 | u32 bearer_id; | ||
3206 | struct tipc_node * node; | ||
3207 | u32 a; | ||
3208 | |||
3209 | a = link_name2addr(name, &bearer_id); | ||
3210 | read_lock_bh(&tipc_net_lock); | ||
3211 | node = tipc_node_find(a); | ||
3212 | if (node) { | ||
3213 | tipc_node_lock(node); | ||
3214 | l_ptr = node->links[bearer_id]; | ||
3215 | if (l_ptr) { | ||
3216 | if (op == TIPC_REMOVE_LINK) { | ||
3217 | struct bearer *b_ptr = l_ptr->b_ptr; | ||
3218 | spin_lock_bh(&b_ptr->publ.lock); | ||
3219 | tipc_link_delete(l_ptr); | ||
3220 | spin_unlock_bh(&b_ptr->publ.lock); | ||
3221 | } | ||
3222 | if (op == TIPC_CMD_BLOCK_LINK) { | ||
3223 | tipc_link_reset(l_ptr); | ||
3224 | l_ptr->blocked = 1; | ||
3225 | } | ||
3226 | if (op == TIPC_CMD_UNBLOCK_LINK) { | ||
3227 | l_ptr->blocked = 0; | ||
3228 | } | ||
3229 | res = 0; | ||
3230 | } | ||
3231 | tipc_node_unlock(node); | ||
3232 | } | ||
3233 | read_unlock_bh(&tipc_net_lock); | ||
3234 | return res; | ||
3235 | } | ||
3236 | #endif | ||
3237 | |||
3238 | /** | 3150 | /** |
3239 | * tipc_link_get_max_pkt - get maximum packet size to use when sending to destination | 3151 | * tipc_link_get_max_pkt - get maximum packet size to use when sending to destination |
3240 | * @dest: network address of destination node | 3152 | * @dest: network address of destination node |
@@ -3265,28 +3177,6 @@ u32 tipc_link_get_max_pkt(u32 dest, u32 selector) | |||
3265 | return res; | 3177 | return res; |
3266 | } | 3178 | } |
3267 | 3179 | ||
3268 | #if 0 | ||
3269 | static void link_dump_rec_queue(struct link *l_ptr) | ||
3270 | { | ||
3271 | struct sk_buff *crs; | ||
3272 | |||
3273 | if (!l_ptr->oldest_deferred_in) { | ||
3274 | info("Reception queue empty\n"); | ||
3275 | return; | ||
3276 | } | ||
3277 | info("Contents of Reception queue:\n"); | ||
3278 | crs = l_ptr->oldest_deferred_in; | ||
3279 | while (crs) { | ||
3280 | if (crs->data == (void *)0x0000a3a3) { | ||
3281 | info("buffer %x invalid\n", crs); | ||
3282 | return; | ||
3283 | } | ||
3284 | msg_dbg(buf_msg(crs), "In rec queue:\n"); | ||
3285 | crs = crs->next; | ||
3286 | } | ||
3287 | } | ||
3288 | #endif | ||
3289 | |||
3290 | static void link_dump_send_queue(struct link *l_ptr) | 3180 | static void link_dump_send_queue(struct link *l_ptr) |
3291 | { | 3181 | { |
3292 | if (l_ptr->next_out) { | 3182 | if (l_ptr->next_out) { |
diff --git a/net/tipc/link.h b/net/tipc/link.h index 26151d30589d..4e944ef4a540 100644 --- a/net/tipc/link.h +++ b/net/tipc/link.h | |||
@@ -210,10 +210,6 @@ struct link { | |||
210 | u32 msg_length_counts; | 210 | u32 msg_length_counts; |
211 | u32 msg_lengths_total; | 211 | u32 msg_lengths_total; |
212 | u32 msg_length_profile[7]; | 212 | u32 msg_length_profile[7]; |
213 | #if 0 | ||
214 | u32 sent_tunneled; | ||
215 | u32 recv_tunneled; | ||
216 | #endif | ||
217 | } stats; | 213 | } stats; |
218 | 214 | ||
219 | struct print_buf print_buf; | 215 | struct print_buf print_buf; |
diff --git a/net/tipc/name_table.c b/net/tipc/name_table.c index 9ca4b0689237..3a8de4334da1 100644 --- a/net/tipc/name_table.c +++ b/net/tipc/name_table.c | |||
@@ -1009,16 +1009,6 @@ static void nametbl_list(struct print_buf *buf, u32 depth_info, | |||
1009 | } | 1009 | } |
1010 | } | 1010 | } |
1011 | 1011 | ||
1012 | #if 0 | ||
1013 | void tipc_nametbl_print(struct print_buf *buf, const char *str) | ||
1014 | { | ||
1015 | tipc_printf(buf, str); | ||
1016 | read_lock_bh(&tipc_nametbl_lock); | ||
1017 | nametbl_list(buf, 0, 0, 0, 0); | ||
1018 | read_unlock_bh(&tipc_nametbl_lock); | ||
1019 | } | ||
1020 | #endif | ||
1021 | |||
1022 | #define MAX_NAME_TBL_QUERY 32768 | 1012 | #define MAX_NAME_TBL_QUERY 32768 |
1023 | 1013 | ||
1024 | struct sk_buff *tipc_nametbl_get(const void *req_tlv_area, int req_tlv_space) | 1014 | struct sk_buff *tipc_nametbl_get(const void *req_tlv_area, int req_tlv_space) |
@@ -1051,13 +1041,6 @@ struct sk_buff *tipc_nametbl_get(const void *req_tlv_area, int req_tlv_space) | |||
1051 | return buf; | 1041 | return buf; |
1052 | } | 1042 | } |
1053 | 1043 | ||
1054 | #if 0 | ||
1055 | void tipc_nametbl_dump(void) | ||
1056 | { | ||
1057 | nametbl_list(TIPC_CONS, 0, 0, 0, 0); | ||
1058 | } | ||
1059 | #endif | ||
1060 | |||
1061 | int tipc_nametbl_init(void) | 1044 | int tipc_nametbl_init(void) |
1062 | { | 1045 | { |
1063 | table.types = kcalloc(tipc_nametbl_size, sizeof(struct hlist_head), | 1046 | table.types = kcalloc(tipc_nametbl_size, sizeof(struct hlist_head), |
diff --git a/net/tipc/net.c b/net/tipc/net.c index 7e05af47a196..1a621cfd6604 100644 --- a/net/tipc/net.c +++ b/net/tipc/net.c | |||
@@ -129,15 +129,6 @@ u32 tipc_net_select_router(u32 addr, u32 ref) | |||
129 | return tipc_zone_select_router(tipc_net.zones[tipc_zone(addr)], addr, ref); | 129 | return tipc_zone_select_router(tipc_net.zones[tipc_zone(addr)], addr, ref); |
130 | } | 130 | } |
131 | 131 | ||
132 | #if 0 | ||
133 | u32 tipc_net_next_node(u32 a) | ||
134 | { | ||
135 | if (tipc_net.zones[tipc_zone(a)]) | ||
136 | return tipc_zone_next_node(a); | ||
137 | return 0; | ||
138 | } | ||
139 | #endif | ||
140 | |||
141 | void tipc_net_remove_as_router(u32 router) | 132 | void tipc_net_remove_as_router(u32 router) |
142 | { | 133 | { |
143 | u32 z_num; | 134 | u32 z_num; |
diff --git a/net/tipc/node.c b/net/tipc/node.c index 7c49cd056df7..823e9abb7ef5 100644 --- a/net/tipc/node.c +++ b/net/tipc/node.c | |||
@@ -125,16 +125,6 @@ void tipc_node_delete(struct tipc_node *n_ptr) | |||
125 | if (!n_ptr) | 125 | if (!n_ptr) |
126 | return; | 126 | return; |
127 | 127 | ||
128 | #if 0 | ||
129 | /* Not needed because links are already deleted via tipc_bearer_stop() */ | ||
130 | |||
131 | u32 l_num; | ||
132 | |||
133 | for (l_num = 0; l_num < MAX_BEARERS; l_num++) { | ||
134 | link_delete(n_ptr->links[l_num]); | ||
135 | } | ||
136 | #endif | ||
137 | |||
138 | dbg("node %x deleted\n", n_ptr->addr); | 128 | dbg("node %x deleted\n", n_ptr->addr); |
139 | kfree(n_ptr); | 129 | kfree(n_ptr); |
140 | } | 130 | } |
@@ -597,22 +587,6 @@ void tipc_node_remove_router(struct tipc_node *n_ptr, u32 router) | |||
597 | node_lost_contact(n_ptr); | 587 | node_lost_contact(n_ptr); |
598 | } | 588 | } |
599 | 589 | ||
600 | #if 0 | ||
601 | void node_print(struct print_buf *buf, struct tipc_node *n_ptr, char *str) | ||
602 | { | ||
603 | u32 i; | ||
604 | |||
605 | tipc_printf(buf, "\n\n%s", str); | ||
606 | for (i = 0; i < MAX_BEARERS; i++) { | ||
607 | if (!n_ptr->links[i]) | ||
608 | continue; | ||
609 | tipc_printf(buf, "Links[%u]: %x, ", i, n_ptr->links[i]); | ||
610 | } | ||
611 | tipc_printf(buf, "Active links: [%x,%x]\n", | ||
612 | n_ptr->active_links[0], n_ptr->active_links[1]); | ||
613 | } | ||
614 | #endif | ||
615 | |||
616 | u32 tipc_available_nodes(const u32 domain) | 590 | u32 tipc_available_nodes(const u32 domain) |
617 | { | 591 | { |
618 | struct tipc_node *n_ptr; | 592 | struct tipc_node *n_ptr; |
diff --git a/net/tipc/port.c b/net/tipc/port.c index d760336f2ca8..5c4285b2d555 100644 --- a/net/tipc/port.c +++ b/net/tipc/port.c | |||
@@ -710,50 +710,6 @@ struct sk_buff *tipc_port_get_ports(void) | |||
710 | return buf; | 710 | return buf; |
711 | } | 711 | } |
712 | 712 | ||
713 | #if 0 | ||
714 | |||
715 | #define MAX_PORT_STATS 2000 | ||
716 | |||
717 | struct sk_buff *port_show_stats(const void *req_tlv_area, int req_tlv_space) | ||
718 | { | ||
719 | u32 ref; | ||
720 | struct port *p_ptr; | ||
721 | struct sk_buff *buf; | ||
722 | struct tlv_desc *rep_tlv; | ||
723 | struct print_buf pb; | ||
724 | int str_len; | ||
725 | |||
726 | if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_PORT_REF)) | ||
727 | return cfg_reply_error_string(TIPC_CFG_TLV_ERROR); | ||
728 | |||
729 | ref = *(u32 *)TLV_DATA(req_tlv_area); | ||
730 | ref = ntohl(ref); | ||
731 | |||
732 | p_ptr = tipc_port_lock(ref); | ||
733 | if (!p_ptr) | ||
734 | return cfg_reply_error_string("port not found"); | ||
735 | |||
736 | buf = tipc_cfg_reply_alloc(TLV_SPACE(MAX_PORT_STATS)); | ||
737 | if (!buf) { | ||
738 | tipc_port_unlock(p_ptr); | ||
739 | return NULL; | ||
740 | } | ||
741 | rep_tlv = (struct tlv_desc *)buf->data; | ||
742 | |||
743 | tipc_printbuf_init(&pb, TLV_DATA(rep_tlv), MAX_PORT_STATS); | ||
744 | port_print(p_ptr, &pb, 1); | ||
745 | /* NEED TO FILL IN ADDITIONAL PORT STATISTICS HERE */ | ||
746 | tipc_port_unlock(p_ptr); | ||
747 | str_len = tipc_printbuf_validate(&pb); | ||
748 | |||
749 | skb_put(buf, TLV_SPACE(str_len)); | ||
750 | TLV_SET(rep_tlv, TIPC_TLV_ULTRA_STRING, NULL, str_len); | ||
751 | |||
752 | return buf; | ||
753 | } | ||
754 | |||
755 | #endif | ||
756 | |||
757 | void tipc_port_reinit(void) | 713 | void tipc_port_reinit(void) |
758 | { | 714 | { |
759 | struct port *p_ptr; | 715 | struct port *p_ptr; |