diff options
author | Karicheri, Muralidharan <m-karicheri2@ti.com> | 2016-02-19 12:58:42 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2016-02-21 22:03:15 -0500 |
commit | 9ecfe875c4f311618cc918aded716017dcd2ddf1 (patch) | |
tree | 2b7ac7aa22f5dc9c91c1875418c7b5cea1519892 | |
parent | 3301be32d4ba95136ff52fb0aee2bdb6077f40b1 (diff) |
net: ti: netcp: restore get/set_pad_info() functionality
The commit 899077791403 ("netcp: try to reduce type confusion in
descriptors") introduces a regression in Kernel 4.5-rc1 and it breaks
get/set_pad_info() functionality.
The TI NETCP driver uses pad0 and pad1 fields of knav_dma_desc to
store DMA/MEM buffer pointer and buffer size respectively. And in both
cases for Keystone 2 the pointer type size is 32 bit regardless of
LAPE enabled or not, because CONFIG_ARCH_DMA_ADDR_T_64BIT originally
is not expected to be defined.
Unfortunately, above commit changed buffer's pointers save/restore
code (get/set_pad_info()) and added intermediate conversation to u64
which works incorrectly on 32bit Keystone 2 and causes TI NETCP driver
crash in RX/TX path due to "Unable to handle kernel NULL pointer"
exception. This issue was reported and discussed in [1].
Hence, fix it by partially reverting above commit and restoring
get/set_pad_info() functionality as it was before.
[1] https://www.mail-archive.com/netdev@vger.kernel.org/msg95361.html
Cc: Wingman Kwok <w-kwok2@ti.com>
Cc: Mugunthan V N <mugunthanvnm@ti.com>
CC: David Laight <David.Laight@ACULAB.COM>
CC: Arnd Bergmann <arnd@arndb.de>
Reported-by: Franklin S Cooper Jr <fcooper@ti.com>
Signed-off-by: Grygorii Strashko <grygorii.strashko@ti.com>
Signed-off-by: Murali Karicheri <m-karicheri2@ti.com>
Acked-by: Arnd Bergmann <arnd@arndb.de>
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | drivers/net/ethernet/ti/netcp_core.c | 59 |
1 files changed, 18 insertions, 41 deletions
diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c index c61d66d38634..0b26e5209413 100644 --- a/drivers/net/ethernet/ti/netcp_core.c +++ b/drivers/net/ethernet/ti/netcp_core.c | |||
@@ -117,20 +117,10 @@ static void get_pkt_info(dma_addr_t *buff, u32 *buff_len, dma_addr_t *ndesc, | |||
117 | *ndesc = le32_to_cpu(desc->next_desc); | 117 | *ndesc = le32_to_cpu(desc->next_desc); |
118 | } | 118 | } |
119 | 119 | ||
120 | static void get_pad_info(u32 *pad0, u32 *pad1, u32 *pad2, struct knav_dma_desc *desc) | 120 | static void get_pad_info(u32 *pad0, u32 *pad1, struct knav_dma_desc *desc) |
121 | { | 121 | { |
122 | *pad0 = le32_to_cpu(desc->pad[0]); | 122 | *pad0 = le32_to_cpu(desc->pad[0]); |
123 | *pad1 = le32_to_cpu(desc->pad[1]); | 123 | *pad1 = le32_to_cpu(desc->pad[1]); |
124 | *pad2 = le32_to_cpu(desc->pad[2]); | ||
125 | } | ||
126 | |||
127 | static void get_pad_ptr(void **padptr, struct knav_dma_desc *desc) | ||
128 | { | ||
129 | u64 pad64; | ||
130 | |||
131 | pad64 = le32_to_cpu(desc->pad[0]) + | ||
132 | ((u64)le32_to_cpu(desc->pad[1]) << 32); | ||
133 | *padptr = (void *)(uintptr_t)pad64; | ||
134 | } | 124 | } |
135 | 125 | ||
136 | static void get_org_pkt_info(dma_addr_t *buff, u32 *buff_len, | 126 | static void get_org_pkt_info(dma_addr_t *buff, u32 *buff_len, |
@@ -163,11 +153,10 @@ static void set_desc_info(u32 desc_info, u32 pkt_info, | |||
163 | desc->packet_info = cpu_to_le32(pkt_info); | 153 | desc->packet_info = cpu_to_le32(pkt_info); |
164 | } | 154 | } |
165 | 155 | ||
166 | static void set_pad_info(u32 pad0, u32 pad1, u32 pad2, struct knav_dma_desc *desc) | 156 | static void set_pad_info(u32 pad0, u32 pad1, struct knav_dma_desc *desc) |
167 | { | 157 | { |
168 | desc->pad[0] = cpu_to_le32(pad0); | 158 | desc->pad[0] = cpu_to_le32(pad0); |
169 | desc->pad[1] = cpu_to_le32(pad1); | 159 | desc->pad[1] = cpu_to_le32(pad1); |
170 | desc->pad[2] = cpu_to_le32(pad1); | ||
171 | } | 160 | } |
172 | 161 | ||
173 | static void set_org_pkt_info(dma_addr_t buff, u32 buff_len, | 162 | static void set_org_pkt_info(dma_addr_t buff, u32 buff_len, |
@@ -581,7 +570,6 @@ static void netcp_free_rx_desc_chain(struct netcp_intf *netcp, | |||
581 | dma_addr_t dma_desc, dma_buf; | 570 | dma_addr_t dma_desc, dma_buf; |
582 | unsigned int buf_len, dma_sz = sizeof(*ndesc); | 571 | unsigned int buf_len, dma_sz = sizeof(*ndesc); |
583 | void *buf_ptr; | 572 | void *buf_ptr; |
584 | u32 pad[2]; | ||
585 | u32 tmp; | 573 | u32 tmp; |
586 | 574 | ||
587 | get_words(&dma_desc, 1, &desc->next_desc); | 575 | get_words(&dma_desc, 1, &desc->next_desc); |
@@ -593,14 +581,12 @@ static void netcp_free_rx_desc_chain(struct netcp_intf *netcp, | |||
593 | break; | 581 | break; |
594 | } | 582 | } |
595 | get_pkt_info(&dma_buf, &tmp, &dma_desc, ndesc); | 583 | get_pkt_info(&dma_buf, &tmp, &dma_desc, ndesc); |
596 | get_pad_ptr(&buf_ptr, ndesc); | 584 | get_pad_info((u32 *)&buf_ptr, &buf_len, ndesc); |
597 | dma_unmap_page(netcp->dev, dma_buf, PAGE_SIZE, DMA_FROM_DEVICE); | 585 | dma_unmap_page(netcp->dev, dma_buf, PAGE_SIZE, DMA_FROM_DEVICE); |
598 | __free_page(buf_ptr); | 586 | __free_page(buf_ptr); |
599 | knav_pool_desc_put(netcp->rx_pool, desc); | 587 | knav_pool_desc_put(netcp->rx_pool, desc); |
600 | } | 588 | } |
601 | 589 | get_pad_info((u32 *)&buf_ptr, &buf_len, desc); | |
602 | get_pad_info(&pad[0], &pad[1], &buf_len, desc); | ||
603 | buf_ptr = (void *)(uintptr_t)(pad[0] + ((u64)pad[1] << 32)); | ||
604 | 590 | ||
605 | if (buf_ptr) | 591 | if (buf_ptr) |
606 | netcp_frag_free(buf_len <= PAGE_SIZE, buf_ptr); | 592 | netcp_frag_free(buf_len <= PAGE_SIZE, buf_ptr); |
@@ -639,8 +625,8 @@ static int netcp_process_one_rx_packet(struct netcp_intf *netcp) | |||
639 | dma_addr_t dma_desc, dma_buff; | 625 | dma_addr_t dma_desc, dma_buff; |
640 | struct netcp_packet p_info; | 626 | struct netcp_packet p_info; |
641 | struct sk_buff *skb; | 627 | struct sk_buff *skb; |
642 | u32 pad[2]; | ||
643 | void *org_buf_ptr; | 628 | void *org_buf_ptr; |
629 | u32 tmp; | ||
644 | 630 | ||
645 | dma_desc = knav_queue_pop(netcp->rx_queue, &dma_sz); | 631 | dma_desc = knav_queue_pop(netcp->rx_queue, &dma_sz); |
646 | if (!dma_desc) | 632 | if (!dma_desc) |
@@ -653,8 +639,7 @@ static int netcp_process_one_rx_packet(struct netcp_intf *netcp) | |||
653 | } | 639 | } |
654 | 640 | ||
655 | get_pkt_info(&dma_buff, &buf_len, &dma_desc, desc); | 641 | get_pkt_info(&dma_buff, &buf_len, &dma_desc, desc); |
656 | get_pad_info(&pad[0], &pad[1], &org_buf_len, desc); | 642 | get_pad_info((u32 *)&org_buf_ptr, &org_buf_len, desc); |
657 | org_buf_ptr = (void *)(uintptr_t)(pad[0] + ((u64)pad[1] << 32)); | ||
658 | 643 | ||
659 | if (unlikely(!org_buf_ptr)) { | 644 | if (unlikely(!org_buf_ptr)) { |
660 | dev_err(netcp->ndev_dev, "NULL bufptr in desc\n"); | 645 | dev_err(netcp->ndev_dev, "NULL bufptr in desc\n"); |
@@ -679,7 +664,6 @@ static int netcp_process_one_rx_packet(struct netcp_intf *netcp) | |||
679 | /* Fill in the page fragment list */ | 664 | /* Fill in the page fragment list */ |
680 | while (dma_desc) { | 665 | while (dma_desc) { |
681 | struct page *page; | 666 | struct page *page; |
682 | void *ptr; | ||
683 | 667 | ||
684 | ndesc = knav_pool_desc_unmap(netcp->rx_pool, dma_desc, dma_sz); | 668 | ndesc = knav_pool_desc_unmap(netcp->rx_pool, dma_desc, dma_sz); |
685 | if (unlikely(!ndesc)) { | 669 | if (unlikely(!ndesc)) { |
@@ -688,8 +672,7 @@ static int netcp_process_one_rx_packet(struct netcp_intf *netcp) | |||
688 | } | 672 | } |
689 | 673 | ||
690 | get_pkt_info(&dma_buff, &buf_len, &dma_desc, ndesc); | 674 | get_pkt_info(&dma_buff, &buf_len, &dma_desc, ndesc); |
691 | get_pad_ptr(&ptr, ndesc); | 675 | get_pad_info((u32 *)&page, &tmp, ndesc); |
692 | page = ptr; | ||
693 | 676 | ||
694 | if (likely(dma_buff && buf_len && page)) { | 677 | if (likely(dma_buff && buf_len && page)) { |
695 | dma_unmap_page(netcp->dev, dma_buff, PAGE_SIZE, | 678 | dma_unmap_page(netcp->dev, dma_buff, PAGE_SIZE, |
@@ -767,6 +750,7 @@ static void netcp_free_rx_buf(struct netcp_intf *netcp, int fdq) | |||
767 | unsigned int buf_len, dma_sz; | 750 | unsigned int buf_len, dma_sz; |
768 | dma_addr_t dma; | 751 | dma_addr_t dma; |
769 | void *buf_ptr; | 752 | void *buf_ptr; |
753 | u32 tmp; | ||
770 | 754 | ||
771 | /* Allocate descriptor */ | 755 | /* Allocate descriptor */ |
772 | while ((dma = knav_queue_pop(netcp->rx_fdq[fdq], &dma_sz))) { | 756 | while ((dma = knav_queue_pop(netcp->rx_fdq[fdq], &dma_sz))) { |
@@ -777,7 +761,7 @@ static void netcp_free_rx_buf(struct netcp_intf *netcp, int fdq) | |||
777 | } | 761 | } |
778 | 762 | ||
779 | get_org_pkt_info(&dma, &buf_len, desc); | 763 | get_org_pkt_info(&dma, &buf_len, desc); |
780 | get_pad_ptr(&buf_ptr, desc); | 764 | get_pad_info((u32 *)&buf_ptr, &tmp, desc); |
781 | 765 | ||
782 | if (unlikely(!dma)) { | 766 | if (unlikely(!dma)) { |
783 | dev_err(netcp->ndev_dev, "NULL orig_buff in desc\n"); | 767 | dev_err(netcp->ndev_dev, "NULL orig_buff in desc\n"); |
@@ -829,7 +813,7 @@ static int netcp_allocate_rx_buf(struct netcp_intf *netcp, int fdq) | |||
829 | struct page *page; | 813 | struct page *page; |
830 | dma_addr_t dma; | 814 | dma_addr_t dma; |
831 | void *bufptr; | 815 | void *bufptr; |
832 | u32 pad[3]; | 816 | u32 pad[2]; |
833 | 817 | ||
834 | /* Allocate descriptor */ | 818 | /* Allocate descriptor */ |
835 | hwdesc = knav_pool_desc_get(netcp->rx_pool); | 819 | hwdesc = knav_pool_desc_get(netcp->rx_pool); |
@@ -846,7 +830,7 @@ static int netcp_allocate_rx_buf(struct netcp_intf *netcp, int fdq) | |||
846 | SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); | 830 | SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); |
847 | 831 | ||
848 | bufptr = netdev_alloc_frag(primary_buf_len); | 832 | bufptr = netdev_alloc_frag(primary_buf_len); |
849 | pad[2] = primary_buf_len; | 833 | pad[1] = primary_buf_len; |
850 | 834 | ||
851 | if (unlikely(!bufptr)) { | 835 | if (unlikely(!bufptr)) { |
852 | dev_warn_ratelimited(netcp->ndev_dev, | 836 | dev_warn_ratelimited(netcp->ndev_dev, |
@@ -858,9 +842,7 @@ static int netcp_allocate_rx_buf(struct netcp_intf *netcp, int fdq) | |||
858 | if (unlikely(dma_mapping_error(netcp->dev, dma))) | 842 | if (unlikely(dma_mapping_error(netcp->dev, dma))) |
859 | goto fail; | 843 | goto fail; |
860 | 844 | ||
861 | pad[0] = lower_32_bits((uintptr_t)bufptr); | 845 | pad[0] = (u32)bufptr; |
862 | pad[1] = upper_32_bits((uintptr_t)bufptr); | ||
863 | |||
864 | } else { | 846 | } else { |
865 | /* Allocate a secondary receive queue entry */ | 847 | /* Allocate a secondary receive queue entry */ |
866 | page = alloc_page(GFP_ATOMIC | GFP_DMA | __GFP_COLD); | 848 | page = alloc_page(GFP_ATOMIC | GFP_DMA | __GFP_COLD); |
@@ -870,9 +852,8 @@ static int netcp_allocate_rx_buf(struct netcp_intf *netcp, int fdq) | |||
870 | } | 852 | } |
871 | buf_len = PAGE_SIZE; | 853 | buf_len = PAGE_SIZE; |
872 | dma = dma_map_page(netcp->dev, page, 0, buf_len, DMA_TO_DEVICE); | 854 | dma = dma_map_page(netcp->dev, page, 0, buf_len, DMA_TO_DEVICE); |
873 | pad[0] = lower_32_bits(dma); | 855 | pad[0] = (u32)page; |
874 | pad[1] = upper_32_bits(dma); | 856 | pad[1] = 0; |
875 | pad[2] = 0; | ||
876 | } | 857 | } |
877 | 858 | ||
878 | desc_info = KNAV_DMA_DESC_PS_INFO_IN_DESC; | 859 | desc_info = KNAV_DMA_DESC_PS_INFO_IN_DESC; |
@@ -882,7 +863,7 @@ static int netcp_allocate_rx_buf(struct netcp_intf *netcp, int fdq) | |||
882 | pkt_info |= (netcp->rx_queue_id & KNAV_DMA_DESC_RETQ_MASK) << | 863 | pkt_info |= (netcp->rx_queue_id & KNAV_DMA_DESC_RETQ_MASK) << |
883 | KNAV_DMA_DESC_RETQ_SHIFT; | 864 | KNAV_DMA_DESC_RETQ_SHIFT; |
884 | set_org_pkt_info(dma, buf_len, hwdesc); | 865 | set_org_pkt_info(dma, buf_len, hwdesc); |
885 | set_pad_info(pad[0], pad[1], pad[2], hwdesc); | 866 | set_pad_info(pad[0], pad[1], hwdesc); |
886 | set_desc_info(desc_info, pkt_info, hwdesc); | 867 | set_desc_info(desc_info, pkt_info, hwdesc); |
887 | 868 | ||
888 | /* Push to FDQs */ | 869 | /* Push to FDQs */ |
@@ -971,11 +952,11 @@ static int netcp_process_tx_compl_packets(struct netcp_intf *netcp, | |||
971 | unsigned int budget) | 952 | unsigned int budget) |
972 | { | 953 | { |
973 | struct knav_dma_desc *desc; | 954 | struct knav_dma_desc *desc; |
974 | void *ptr; | ||
975 | struct sk_buff *skb; | 955 | struct sk_buff *skb; |
976 | unsigned int dma_sz; | 956 | unsigned int dma_sz; |
977 | dma_addr_t dma; | 957 | dma_addr_t dma; |
978 | int pkts = 0; | 958 | int pkts = 0; |
959 | u32 tmp; | ||
979 | 960 | ||
980 | while (budget--) { | 961 | while (budget--) { |
981 | dma = knav_queue_pop(netcp->tx_compl_q, &dma_sz); | 962 | dma = knav_queue_pop(netcp->tx_compl_q, &dma_sz); |
@@ -988,8 +969,7 @@ static int netcp_process_tx_compl_packets(struct netcp_intf *netcp, | |||
988 | continue; | 969 | continue; |
989 | } | 970 | } |
990 | 971 | ||
991 | get_pad_ptr(&ptr, desc); | 972 | get_pad_info((u32 *)&skb, &tmp, desc); |
992 | skb = ptr; | ||
993 | netcp_free_tx_desc_chain(netcp, desc, dma_sz); | 973 | netcp_free_tx_desc_chain(netcp, desc, dma_sz); |
994 | if (!skb) { | 974 | if (!skb) { |
995 | dev_err(netcp->ndev_dev, "No skb in Tx desc\n"); | 975 | dev_err(netcp->ndev_dev, "No skb in Tx desc\n"); |
@@ -1194,10 +1174,7 @@ static int netcp_tx_submit_skb(struct netcp_intf *netcp, | |||
1194 | } | 1174 | } |
1195 | 1175 | ||
1196 | set_words(&tmp, 1, &desc->packet_info); | 1176 | set_words(&tmp, 1, &desc->packet_info); |
1197 | tmp = lower_32_bits((uintptr_t)&skb); | 1177 | set_words((u32 *)&skb, 1, &desc->pad[0]); |
1198 | set_words(&tmp, 1, &desc->pad[0]); | ||
1199 | tmp = upper_32_bits((uintptr_t)&skb); | ||
1200 | set_words(&tmp, 1, &desc->pad[1]); | ||
1201 | 1178 | ||
1202 | if (tx_pipe->flags & SWITCH_TO_PORT_IN_TAGINFO) { | 1179 | if (tx_pipe->flags & SWITCH_TO_PORT_IN_TAGINFO) { |
1203 | tmp = tx_pipe->switch_to_port; | 1180 | tmp = tx_pipe->switch_to_port; |