aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/can
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2014-04-11 04:13:16 -0400
committerMarc Kleine-Budde <mkl@pengutronix.de>2014-04-24 16:09:00 -0400
commit2b9aecdce227e099349b73e3a074936d3c51f2a9 (patch)
tree3f197c3aeac943e03ce073ea9af77b14a3895e91 /drivers/net/can
parentfa39b54ccf28a0a85256f04881297cd75b8ef204 (diff)
can: c_can: Disable rx split as workaround
The RX buffer split causes packet loss in the hardware: What happens is: RX Packet 1 --> message buffer 1 (newdat bit is not cleared) RX Packet 2 --> message buffer 2 (newdat bit is not cleared) RX Packet 3 --> message buffer 3 (newdat bit is not cleared) RX Packet 4 --> message buffer 4 (newdat bit is not cleared) RX Packet 5 --> message buffer 5 (newdat bit is not cleared) RX Packet 6 --> message buffer 6 (newdat bit is not cleared) RX Packet 7 --> message buffer 7 (newdat bit is not cleared) RX Packet 8 --> message buffer 8 (newdat bit is not cleared) Clear newdat bit in message buffer 1 Clear newdat bit in message buffer 2 Clear newdat bit in message buffer 3 Clear newdat bit in message buffer 4 Clear newdat bit in message buffer 5 Clear newdat bit in message buffer 6 Clear newdat bit in message buffer 7 Clear newdat bit in message buffer 8 Now if during that clearing of newdat bits, a new message comes in, the HW gets confused and drops it. It does not matter how many of them you clear. I put a delay between clear of buffer 1 and buffer 2 which was long enough that the message should have been queued either in buffer 1 or buffer 9. But it did not show up anywhere. The next message ended up in buffer 1. So the hardware lost a packet of course without telling it via one of the error handlers. That does not happen on all clear newdat bit events. I see one of 10k packets dropped in the scenario which allows us to reproduce. But the trace looks always the same. Not splitting the RX Buffer avoids the packet loss but can cause reordering. It's hard to trigger, but it CAN happen. With that mode we use the HW as it was probably designed for. We read from the buffer 1 upwards and clear the buffer as we get the message. That's how all microcontrollers use it. So I assume that the way we handle the buffers was never really tested. According to the public documentation it should just work :) Let the user decide which evil is the lesser one. [ Oliver Hartkopp: Provided a sane config option and help text and made me switch to favour potential and unlikely reordering over packet loss ] Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Tested-by: Alexander Stein <alexander.stein@systec-electronic.com> Signed-off-by: Marc Kleine-Budde <mkl@pengutronix.de>
Diffstat (limited to 'drivers/net/can')
-rw-r--r--drivers/net/can/c_can/Kconfig7
-rw-r--r--drivers/net/can/c_can/c_can.c62
2 files changed, 55 insertions, 14 deletions
diff --git a/drivers/net/can/c_can/Kconfig b/drivers/net/can/c_can/Kconfig
index 61ffc12d8fd8..8ab7103d4f44 100644
--- a/drivers/net/can/c_can/Kconfig
+++ b/drivers/net/can/c_can/Kconfig
@@ -14,6 +14,13 @@ config CAN_C_CAN_PLATFORM
14 SPEAr1310 and SPEAr320 evaluation boards & TI (www.ti.com) 14 SPEAr1310 and SPEAr320 evaluation boards & TI (www.ti.com)
15 boards like am335x, dm814x, dm813x and dm811x. 15 boards like am335x, dm814x, dm813x and dm811x.
16 16
17config CAN_C_CAN_STRICT_FRAME_ORDERING
18 bool "Force a strict RX CAN frame order (may cause frame loss)"
19 ---help---
20 The RX split buffer prevents packet reordering but can cause packet
21 loss. Only enable this option when you accept to lose CAN frames
22 in favour of getting the received CAN frames in the correct order.
23
17config CAN_C_CAN_PCI 24config CAN_C_CAN_PCI
18 tristate "Generic PCI Bus based C_CAN/D_CAN driver" 25 tristate "Generic PCI Bus based C_CAN/D_CAN driver"
19 depends on PCI 26 depends on PCI
diff --git a/drivers/net/can/c_can/c_can.c b/drivers/net/can/c_can/c_can.c
index 09cb68772737..c1a8684ed1c8 100644
--- a/drivers/net/can/c_can/c_can.c
+++ b/drivers/net/can/c_can/c_can.c
@@ -791,18 +791,39 @@ static u32 c_can_adjust_pending(u32 pend)
791 return pend & ~((1 << lasts) - 1); 791 return pend & ~((1 << lasts) - 1);
792} 792}
793 793
794static inline void c_can_rx_object_get(struct net_device *dev, u32 obj)
795{
796#ifdef CONFIG_CAN_C_CAN_STRICT_FRAME_ORDERING
797 if (obj < C_CAN_MSG_RX_LOW_LAST)
798 c_can_object_get(dev, IF_RX, obj, IF_COMM_RCV_LOW);
799 else
800#endif
801 c_can_object_get(dev, IF_RX, obj, IF_COMM_RCV_HIGH);
802}
803
804static inline void c_can_rx_finalize(struct net_device *dev,
805 struct c_can_priv *priv, u32 obj)
806{
807#ifdef CONFIG_CAN_C_CAN_STRICT_FRAME_ORDERING
808 if (obj < C_CAN_MSG_RX_LOW_LAST)
809 priv->rxmasked |= BIT(obj - 1);
810 else if (obj == C_CAN_MSG_RX_LOW_LAST) {
811 priv->rxmasked = 0;
812 /* activate all lower message objects */
813 c_can_activate_all_lower_rx_msg_obj(dev, IF_RX);
814 }
815#endif
816}
817
794static int c_can_read_objects(struct net_device *dev, struct c_can_priv *priv, 818static int c_can_read_objects(struct net_device *dev, struct c_can_priv *priv,
795 u32 pend, int quota) 819 u32 pend, int quota)
796{ 820{
797 u32 pkts = 0, ctrl, obj, mcmd; 821 u32 pkts = 0, ctrl, obj;
798 822
799 while ((obj = ffs(pend)) && quota > 0) { 823 while ((obj = ffs(pend)) && quota > 0) {
800 pend &= ~BIT(obj - 1); 824 pend &= ~BIT(obj - 1);
801 825
802 mcmd = obj < C_CAN_MSG_RX_LOW_LAST ? 826 c_can_rx_object_get(dev, obj);
803 IF_COMM_RCV_LOW : IF_COMM_RCV_HIGH;
804
805 c_can_object_get(dev, IF_RX, obj, mcmd);
806 ctrl = priv->read_reg(priv, C_CAN_IFACE(MSGCTRL_REG, IF_RX)); 827 ctrl = priv->read_reg(priv, C_CAN_IFACE(MSGCTRL_REG, IF_RX));
807 828
808 if (ctrl & IF_MCONT_MSGLST) { 829 if (ctrl & IF_MCONT_MSGLST) {
@@ -824,13 +845,7 @@ static int c_can_read_objects(struct net_device *dev, struct c_can_priv *priv,
824 /* read the data from the message object */ 845 /* read the data from the message object */
825 c_can_read_msg_object(dev, IF_RX, ctrl); 846 c_can_read_msg_object(dev, IF_RX, ctrl);
826 847
827 if (obj < C_CAN_MSG_RX_LOW_LAST) 848 c_can_rx_finalize(dev, priv, obj);
828 priv->rxmasked |= BIT(obj - 1);
829 else if (obj == C_CAN_MSG_RX_LOW_LAST) {
830 priv->rxmasked = 0;
831 /* activate all lower message objects */
832 c_can_activate_all_lower_rx_msg_obj(dev, IF_RX);
833 }
834 849
835 pkts++; 850 pkts++;
836 quota--; 851 quota--;
@@ -839,6 +854,16 @@ static int c_can_read_objects(struct net_device *dev, struct c_can_priv *priv,
839 return pkts; 854 return pkts;
840} 855}
841 856
857static inline u32 c_can_get_pending(struct c_can_priv *priv)
858{
859 u32 pend = priv->read_reg(priv, C_CAN_NEWDAT1_REG);
860
861#ifdef CONFIG_CAN_C_CAN_STRICT_FRAME_ORDERING
862 pend &= ~priv->rxmasked;
863#endif
864 return pend;
865}
866
842/* 867/*
843 * theory of operation: 868 * theory of operation:
844 * 869 *
@@ -848,6 +873,8 @@ static int c_can_read_objects(struct net_device *dev, struct c_can_priv *priv,
848 * has arrived. To work-around this issue, we keep two groups of message 873 * has arrived. To work-around this issue, we keep two groups of message
849 * objects whose partitioning is defined by C_CAN_MSG_OBJ_RX_SPLIT. 874 * objects whose partitioning is defined by C_CAN_MSG_OBJ_RX_SPLIT.
850 * 875 *
876 * If CONFIG_CAN_C_CAN_STRICT_FRAME_ORDERING = y
877 *
851 * To ensure in-order frame reception we use the following 878 * To ensure in-order frame reception we use the following
852 * approach while re-activating a message object to receive further 879 * approach while re-activating a message object to receive further
853 * frames: 880 * frames:
@@ -860,6 +887,14 @@ static int c_can_read_objects(struct net_device *dev, struct c_can_priv *priv,
860 * - if the current message object number is greater than 887 * - if the current message object number is greater than
861 * C_CAN_MSG_RX_LOW_LAST then clear the NEWDAT bit of 888 * C_CAN_MSG_RX_LOW_LAST then clear the NEWDAT bit of
862 * only this message object. 889 * only this message object.
890 *
891 * This can cause packet loss!
892 *
893 * If CONFIG_CAN_C_CAN_STRICT_FRAME_ORDERING = n
894 *
895 * We clear the newdat bit right away.
896 *
897 * This can result in packet reordering when the readout is slow.
863 */ 898 */
864static int c_can_do_rx_poll(struct net_device *dev, int quota) 899static int c_can_do_rx_poll(struct net_device *dev, int quota)
865{ 900{
@@ -875,8 +910,7 @@ static int c_can_do_rx_poll(struct net_device *dev, int quota)
875 910
876 while (quota > 0) { 911 while (quota > 0) {
877 if (!pend) { 912 if (!pend) {
878 pend = priv->read_reg(priv, C_CAN_NEWDAT1_REG); 913 pend = c_can_get_pending(priv);
879 pend &= ~priv->rxmasked;
880 if (!pend) 914 if (!pend)
881 break; 915 break;
882 /* 916 /*