aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2019-09-05 06:17:50 -0400
committerDavid S. Miller <davem@davemloft.net>2019-09-05 06:17:50 -0400
commit44c40910b66f786d33ffd2682ef38750eebb567c (patch)
tree69b08b2eb39c5d39996d2e29016a31874381be01
parent8330f73fe9742f201f467639f8356cf58756fb9f (diff)
parent9d71dd0c70099914fcd063135da3c580865e924c (diff)
Merge tag 'linux-can-next-for-5.4-20190904' of git://git.kernel.org/pub/scm/linux/kernel/git/mkl/linux-can-next
Marc Kleine-Budde says: ==================== pull-request: can-next 2019-09-04 j1939 this is a pull request for net-next/master consisting of 21 patches. the first 12 patches are by me and target the CAN core infrastructure. They clean up the names of variables , structs and struct members, convert can_rx_register() to use max() instead of open coding it and remove unneeded code from the can_pernet_exit() callback. The next three patches are also by me and they introduce and make use of the CAN midlayer private structure. It is used to hold protocol specific per device data structures. The next patch is by Oleksij Rempel, switches the &net->can.rcvlists_lock from a spin_lock() to a spin_lock_bh(), so that it can be used from NAPI (soft IRQ) context. The next 4 patches are by Kurt Van Dijck, he first updates his email address via mailmap and then extends sockaddr_can to include j1939 members. The final patch is the collective effort of many entities (The j1939 authors: Oliver Hartkopp, Bastian Stender, Elenita Hinds, kbuild test robot, Kurt Van Dijck, Maxime Jayat, Robin van der Gracht, Oleksij Rempel, Marc Kleine-Budde). It adds support of SAE J1939 protocol to the CAN networking stack. SAE J1939 is the vehicle bus recommended practice used for communication and diagnostics among vehicle components. Originating in the car and heavy-duty truck industry in the United States, it is now widely used in other parts of the world. P.S.: This pull request doesn't invalidate my last pull request: "pull-request: can-next 2019-09-03". ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--.mailmap1
-rw-r--r--Documentation/networking/index.rst1
-rw-r--r--Documentation/networking/j1939.rst422
-rw-r--r--MAINTAINERS10
-rw-r--r--drivers/net/can/dev.c24
-rw-r--r--drivers/net/can/slcan.c6
-rw-r--r--drivers/net/can/vcan.c7
-rw-r--r--drivers/net/can/vxcan.c4
-rw-r--r--include/linux/can/can-ml.h68
-rw-r--r--include/linux/can/core.h8
-rw-r--r--include/net/netns/can.h14
-rw-r--r--include/uapi/linux/can.h20
-rw-r--r--include/uapi/linux/can/j1939.h99
-rw-r--r--net/can/Kconfig2
-rw-r--r--net/can/Makefile2
-rw-r--r--net/can/af_can.c302
-rw-r--r--net/can/af_can.h19
-rw-r--r--net/can/bcm.c4
-rw-r--r--net/can/j1939/Kconfig15
-rw-r--r--net/can/j1939/Makefile10
-rw-r--r--net/can/j1939/address-claim.c230
-rw-r--r--net/can/j1939/bus.c333
-rw-r--r--net/can/j1939/j1939-priv.h338
-rw-r--r--net/can/j1939/main.c403
-rw-r--r--net/can/j1939/socket.c1160
-rw-r--r--net/can/j1939/transport.c2027
-rw-r--r--net/can/proc.c163
-rw-r--r--net/can/raw.c4
28 files changed, 5398 insertions, 298 deletions
diff --git a/.mailmap b/.mailmap
index afaad605284a..dfd7fedbf578 100644
--- a/.mailmap
+++ b/.mailmap
@@ -63,6 +63,7 @@ Dengcheng Zhu <dzhu@wavecomp.com> <dengcheng.zhu@mips.com>
63Dengcheng Zhu <dzhu@wavecomp.com> <dengcheng.zhu@imgtec.com> 63Dengcheng Zhu <dzhu@wavecomp.com> <dengcheng.zhu@imgtec.com>
64Dengcheng Zhu <dzhu@wavecomp.com> <dczhu@mips.com> 64Dengcheng Zhu <dzhu@wavecomp.com> <dczhu@mips.com>
65Dengcheng Zhu <dzhu@wavecomp.com> <dengcheng.zhu@gmail.com> 65Dengcheng Zhu <dzhu@wavecomp.com> <dengcheng.zhu@gmail.com>
66<dev.kurt@vandijck-laurijssen.be> <kurt.van.dijck@eia.be>
66Dmitry Eremin-Solenikov <dbaryshkov@gmail.com> 67Dmitry Eremin-Solenikov <dbaryshkov@gmail.com>
67Dmitry Safonov <0x7f454c46@gmail.com> <dsafonov@virtuozzo.com> 68Dmitry Safonov <0x7f454c46@gmail.com> <dsafonov@virtuozzo.com>
68Dmitry Safonov <0x7f454c46@gmail.com> <d.safonov@partner.samsung.com> 69Dmitry Safonov <0x7f454c46@gmail.com> <d.safonov@partner.samsung.com>
diff --git a/Documentation/networking/index.rst b/Documentation/networking/index.rst
index 37eabc17894c..0481d0ffebed 100644
--- a/Documentation/networking/index.rst
+++ b/Documentation/networking/index.rst
@@ -17,6 +17,7 @@ Contents:
17 devlink-trap 17 devlink-trap
18 devlink-trap-netdevsim 18 devlink-trap-netdevsim
19 ieee802154 19 ieee802154
20 j1939
20 kapi 21 kapi
21 z8530book 22 z8530book
22 msg_zerocopy 23 msg_zerocopy
diff --git a/Documentation/networking/j1939.rst b/Documentation/networking/j1939.rst
new file mode 100644
index 000000000000..ce7e7a044e08
--- /dev/null
+++ b/Documentation/networking/j1939.rst
@@ -0,0 +1,422 @@
1.. SPDX-License-Identifier: (GPL-2.0 OR MIT)
2
3===================
4J1939 Documentation
5===================
6
7Overview / What Is J1939
8========================
9
10SAE J1939 defines a higher layer protocol on CAN. It implements a more
11sophisticated addressing scheme and extends the maximum packet size above 8
12bytes. Several derived specifications exist, which differ from the original
13J1939 on the application level, like MilCAN A, NMEA2000 and especially
14ISO-11783 (ISOBUS). This last one specifies the so-called ETP (Extended
15Transport Protocol) which is has been included in this implementation. This
16results in a maximum packet size of ((2 ^ 24) - 1) * 7 bytes == 111 MiB.
17
18Specifications used
19-------------------
20
21* SAE J1939-21 : data link layer
22* SAE J1939-81 : network management
23* ISO 11783-6 : Virtual Terminal (Extended Transport Protocol)
24
25.. _j1939-motivation:
26
27Motivation
28==========
29
30Given the fact there's something like SocketCAN with an API similar to BSD
31sockets, we found some reasons to justify a kernel implementation for the
32addressing and transport methods used by J1939.
33
34* **Addressing:** when a process on an ECU communicates via J1939, it should
35 not necessarily know its source address. Although at least one process per
36 ECU should know the source address. Other processes should be able to reuse
37 that address. This way, address parameters for different processes
38 cooperating for the same ECU, are not duplicated. This way of working is
39 closely related to the UNIX concept where programs do just one thing, and do
40 it well.
41
42* **Dynamic addressing:** Address Claiming in J1939 is time critical.
43 Furthermore data transport should be handled properly during the address
44 negotiation. Putting this functionality in the kernel eliminates it as a
45 requirement for _every_ user space process that communicates via J1939. This
46 results in a consistent J1939 bus with proper addressing.
47
48* **Transport:** both TP & ETP reuse some PGNs to relay big packets over them.
49 Different processes may thus use the same TP & ETP PGNs without actually
50 knowing it. The individual TP & ETP sessions _must_ be serialized
51 (synchronized) between different processes. The kernel solves this problem
52 properly and eliminates the serialization (synchronization) as a requirement
53 for _every_ user space process that communicates via J1939.
54
55J1939 defines some other features (relaying, gateway, fast packet transport,
56...). In-kernel code for these would not contribute to protocol stability.
57Therefore, these parts are left to user space.
58
59The J1939 sockets operate on CAN network devices (see SocketCAN). Any J1939
60user space library operating on CAN raw sockets will still operate properly.
61Since such library does not communicate with the in-kernel implementation, care
62must be taken that these two do not interfere. In practice, this means they
63cannot share ECU addresses. A single ECU (or virtual ECU) address is used by
64the library exclusively, or by the in-kernel system exclusively.
65
66J1939 concepts
67==============
68
69PGN
70---
71
72The PGN (Parameter Group Number) is a number to identify a packet. The PGN
73is composed as follows:
741 bit : Reserved Bit
751 bit : Data Page
768 bits : PF (PDU Format)
778 bits : PS (PDU Specific)
78
79In J1939-21 distinction is made between PDU1 format (where PF < 240) and PDU2
80format (where PF >= 240). Furthermore, when using PDU2 format, the PS-field
81contains a so-called Group Extension, which is part of the PGN. When using PDU2
82format, the Group Extension is set in the PS-field.
83
84On the other hand, when using PDU1 format, the PS-field contains a so-called
85Destination Address, which is _not_ part of the PGN. When communicating a PGN
86from user space to kernel (or visa versa) and PDU2 format is used, the PS-field
87of the PGN shall be set to zero. The Destination Address shall be set
88elsewhere.
89
90Regarding PGN mapping to 29-bit CAN identifier, the Destination Address shall
91be get/set from/to the appropriate bits of the identifier by the kernel.
92
93
94Addressing
95----------
96
97Both static and dynamic addressing methods can be used.
98
99For static addresses, no extra checks are made by the kernel, and provided
100addresses are considered right. This responsibility is for the OEM or system
101integrator.
102
103For dynamic addressing, so-called Address Claiming, extra support is foreseen
104in the kernel. In J1939 any ECU is known by it's 64-bit NAME. At the moment of
105a successful address claim, the kernel keeps track of both NAME and source
106address being claimed. This serves as a base for filter schemes. By default,
107packets with a destination that is not locally, will be rejected.
108
109Mixed mode packets (from a static to a dynamic address or vice versa) are
110allowed. The BSD sockets define separate API calls for getting/setting the
111local & remote address and are applicable for J1939 sockets.
112
113Filtering
114---------
115
116J1939 defines white list filters per socket that a user can set in order to
117receive a subset of the J1939 traffic. Filtering can be based on:
118
119* SA
120* SOURCE_NAME
121* PGN
122
123When multiple filters are in place for a single socket, and a packet comes in
124that matches several of those filters, the packet is only received once for
125that socket.
126
127How to Use J1939
128================
129
130API Calls
131---------
132
133On CAN, you first need to open a socket for communicating over a CAN network.
134To use J1939, #include <linux/can/j1939.h>. From there, <linux/can.h> will be
135included too. To open a socket, use:
136
137.. code-block:: C
138
139 s = socket(PF_CAN, SOCK_DGRAM, CAN_J1939);
140
141J1939 does use SOCK_DGRAM sockets. In the J1939 specification, connections are
142mentioned in the context of transport protocol sessions. These still deliver
143packets to the other end (using several CAN packets). SOCK_STREAM is not
144supported.
145
146After the successful creation of the socket, you would normally use the bind(2)
147and/or connect(2) system call to bind the socket to a CAN interface. After
148binding and/or connecting the socket, you can read(2) and write(2) from/to the
149socket or use send(2), sendto(2), sendmsg(2) and the recv*() counterpart
150operations on the socket as usual. There are also J1939 specific socket options
151described below.
152
153In order to send data, a bind(2) must have been successful. bind(2) assigns a
154local address to a socket.
155
156Different from CAN is that the payload data is just the data that get send,
157without it's header info. The header info is derived from the sockaddr supplied
158to bind(2), connect(2), sendto(2) and recvfrom(2). A write(2) with size 4 will
159result in a packet with 4 bytes.
160
161The sockaddr structure has extensions for use with J1939 as specified below:
162
163.. code-block:: C
164
165 struct sockaddr_can {
166 sa_family_t can_family;
167 int can_ifindex;
168 union {
169 struct {
170 __u64 name;
171 /* pgn:
172 * 8 bit: PS in PDU2 case, else 0
173 * 8 bit: PF
174 * 1 bit: DP
175 * 1 bit: reserved
176 */
177 __u32 pgn;
178 __u8 addr;
179 } j1939;
180 } can_addr;
181 }
182
183can_family & can_ifindex serve the same purpose as for other SocketCAN sockets.
184
185can_addr.j1939.pgn specifies the PGN (max 0x3ffff). Individual bits are
186specified above.
187
188can_addr.j1939.name contains the 64-bit J1939 NAME.
189
190can_addr.j1939.addr contains the address.
191
192The bind(2) system call assigns the local address, i.e. the source address when
193sending packages. If a PGN during bind(2) is set, it's used as a RX filter.
194I.e. only packets with a matching PGN are received. If an ADDR or NAME is set
195it is used as a receive filter, too. It will match the destination NAME or ADDR
196of the incoming packet. The NAME filter will work only if appropriate Address
197Claiming for this name was done on the CAN bus and registered/cached by the
198kernel.
199
200On the other hand connect(2) assigns the remote address, i.e. the destination
201address. The PGN from connect(2) is used as the default PGN when sending
202packets. If ADDR or NAME is set it will be used as the default destination ADDR
203or NAME. Further a set ADDR or NAME during connect(2) is used as a receive
204filter. It will match the source NAME or ADDR of the incoming packet.
205
206Both write(2) and send(2) will send a packet with local address from bind(2) and
207the remote address from connect(2). Use sendto(2) to overwrite the destination
208address.
209
210If can_addr.j1939.name is set (!= 0) the NAME is looked up by the kernel and
211the corresponding ADDR is used. If can_addr.j1939.name is not set (== 0),
212can_addr.j1939.addr is used.
213
214When creating a socket, reasonable defaults are set. Some options can be
215modified with setsockopt(2) & getsockopt(2).
216
217RX path related options:
218
219- SO_J1939_FILTER - configure array of filters
220- SO_J1939_PROMISC - disable filters set by bind(2) and connect(2)
221
222By default no broadcast packets can be send or received. To enable sending or
223receiving broadcast packets use the socket option SO_BROADCAST:
224
225.. code-block:: C
226
227 int value = 1;
228 setsockopt(sock, SOL_SOCKET, SO_BROADCAST, &value, sizeof(value));
229
230The following diagram illustrates the RX path:
231
232.. code::
233
234 +--------------------+
235 | incoming packet |
236 +--------------------+
237 |
238 V
239 +--------------------+
240 | SO_J1939_PROMISC? |
241 +--------------------+
242 | |
243 no | | yes
244 | |
245 .---------' `---------.
246 | |
247 +---------------------------+ |
248 | bind() + connect() + | |
249 | SOCK_BROADCAST filter | |
250 +---------------------------+ |
251 | |
252 |<---------------------'
253 V
254 +---------------------------+
255 | SO_J1939_FILTER |
256 +---------------------------+
257 |
258 V
259 +---------------------------+
260 | socket recv() |
261 +---------------------------+
262
263TX path related options:
264SO_J1939_SEND_PRIO - change default send priority for the socket
265
266Message Flags during send() and Related System Calls
267^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
268
269send(2), sendto(2) and sendmsg(2) take a 'flags' argument. Currently
270supported flags are:
271
272* MSG_DONTWAIT, i.e. non-blocking operation.
273
274recvmsg(2)
275^^^^^^^^^
276
277In most cases recvmsg(2) is needed if you want to extract more information than
278recvfrom(2) can provide. For example package priority and timestamp. The
279Destination Address, name and packet priority (if applicable) are attached to
280the msghdr in the recvmsg(2) call. They can be extracted using cmsg(3) macros,
281with cmsg_level == SOL_J1939 && cmsg_type == SCM_J1939_DEST_ADDR,
282SCM_J1939_DEST_NAME or SCM_J1939_PRIO. The returned data is a uint8_t for
283priority and dst_addr, and uint64_t for dst_name.
284
285.. code-block:: C
286
287 uint8_t priority, dst_addr;
288 uint64_t dst_name;
289
290 for (cmsg = CMSG_FIRSTHDR(&msg); cmsg; cmsg = CMSG_NXTHDR(&msg, cmsg)) {
291 switch (cmsg->cmsg_level) {
292 case SOL_CAN_J1939:
293 if (cmsg->cmsg_type == SCM_J1939_DEST_ADDR)
294 dst_addr = *CMSG_DATA(cmsg);
295 else if (cmsg->cmsg_type == SCM_J1939_DEST_NAME)
296 memcpy(&dst_name, CMSG_DATA(cmsg), cmsg->cmsg_len - CMSG_LEN(0));
297 else if (cmsg->cmsg_type == SCM_J1939_PRIO)
298 priority = *CMSG_DATA(cmsg);
299 break;
300 }
301 }
302
303Dynamic Addressing
304------------------
305
306Distinction has to be made between using the claimed address and doing an
307address claim. To use an already claimed address, one has to fill in the
308j1939.name member and provide it to bind(2). If the name had claimed an address
309earlier, all further messages being sent will use that address. And the
310j1939.addr member will be ignored.
311
312An exception on this is PGN 0x0ee00. This is the "Address Claim/Cannot Claim
313Address" message and the kernel will use the j1939.addr member for that PGN if
314necessary.
315
316To claim an address following code example can be used:
317
318.. code-block:: C
319
320 struct sockaddr_can baddr = {
321 .can_family = AF_CAN,
322 .can_addr.j1939 = {
323 .name = name,
324 .addr = J1939_IDLE_ADDR,
325 .pgn = J1939_NO_PGN, /* to disable bind() rx filter for PGN */
326 },
327 .can_ifindex = if_nametoindex("can0"),
328 };
329
330 bind(sock, (struct sockaddr *)&baddr, sizeof(baddr));
331
332 /* for Address Claiming broadcast must be allowed */
333 int value = 1;
334 setsockopt(sock, SOL_SOCKET, SO_BROADCAST, &value, sizeof(value));
335
336 /* configured advanced RX filter with PGN needed for Address Claiming */
337 const struct j1939_filter filt[] = {
338 {
339 .pgn = J1939_PGN_ADDRESS_CLAIMED,
340 .pgn_mask = J1939_PGN_PDU1_MAX,
341 }, {
342 .pgn = J1939_PGN_ADDRESS_REQUEST,
343 .pgn_mask = J1939_PGN_PDU1_MAX,
344 }, {
345 .pgn = J1939_PGN_ADDRESS_COMMANDED,
346 .pgn_mask = J1939_PGN_MAX,
347 },
348 };
349
350 setsockopt(sock, SOL_CAN_J1939, SO_J1939_FILTER, &filt, sizeof(filt));
351
352 uint64_t dat = htole64(name);
353 const struct sockaddr_can saddr = {
354 .can_family = AF_CAN,
355 .can_addr.j1939 = {
356 .pgn = J1939_PGN_ADDRESS_CLAIMED,
357 .addr = J1939_NO_ADDR,
358 },
359 };
360
361 /* Afterwards do a sendto(2) with data set to the NAME (Little Endian). If the
362 * NAME provided, does not match the j1939.name provided to bind(2), EPROTO
363 * will be returned.
364 */
365 sendto(sock, dat, sizeof(dat), 0, (const struct sockaddr *)&saddr, sizeof(saddr));
366
367If no-one else contests the address claim within 250ms after transmission, the
368kernel marks the NAME-SA assignment as valid. The valid assignment will be kept
369among other valid NAME-SA assignments. From that point, any socket bound to the
370NAME can send packets.
371
372If another ECU claims the address, the kernel will mark the NAME-SA expired.
373No socket bound to the NAME can send packets (other than address claims). To
374claim another address, some socket bound to NAME, must bind(2) again, but with
375only j1939.addr changed to the new SA, and must then send a valid address claim
376packet. This restarts the state machine in the kernel (and any other
377participant on the bus) for this NAME.
378
379can-utils also include the jacd tool, so it can be used as code example or as
380default Address Claiming daemon.
381
382Send Examples
383-------------
384
385Static Addressing
386^^^^^^^^^^^^^^^^^
387
388This example will send a PGN (0x12300) from SA 0x20 to DA 0x30.
389
390Bind:
391
392.. code-block:: C
393
394 struct sockaddr_can baddr = {
395 .can_family = AF_CAN,
396 .can_addr.j1939 = {
397 .name = J1939_NO_NAME,
398 .addr = 0x20,
399 .pgn = J1939_NO_PGN,
400 },
401 .can_ifindex = if_nametoindex("can0"),
402 };
403
404 bind(sock, (struct sockaddr *)&baddr, sizeof(baddr));
405
406Now, the socket 'sock' is bound to the SA 0x20. Since no connect(2) was called,
407at this point we can use only sendto(2) or sendmsg(2).
408
409Send:
410
411.. code-block:: C
412
413 const struct sockaddr_can saddr = {
414 .can_family = AF_CAN,
415 .can_addr.j1939 = {
416 .name = J1939_NO_NAME;
417 .pgn = 0x30,
418 .addr = 0x12300,
419 },
420 };
421
422 sendto(sock, dat, sizeof(dat), 0, (const struct sockaddr *)&saddr, sizeof(saddr));
diff --git a/MAINTAINERS b/MAINTAINERS
index c1fb8fb3b2ee..84bb34727f81 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -3669,6 +3669,16 @@ F: include/uapi/linux/can/bcm.h
3669F: include/uapi/linux/can/raw.h 3669F: include/uapi/linux/can/raw.h
3670F: include/uapi/linux/can/gw.h 3670F: include/uapi/linux/can/gw.h
3671 3671
3672CAN-J1939 NETWORK LAYER
3673M: Robin van der Gracht <robin@protonic.nl>
3674M: Oleksij Rempel <o.rempel@pengutronix.de>
3675R: Pengutronix Kernel Team <kernel@pengutronix.de>
3676L: linux-can@vger.kernel.org
3677S: Maintained
3678F: Documentation/networking/j1939.txt
3679F: net/can/j1939/
3680F: include/uapi/linux/can/j1939.h
3681
3672CAPABILITIES 3682CAPABILITIES
3673M: Serge Hallyn <serge@hallyn.com> 3683M: Serge Hallyn <serge@hallyn.com>
3674L: linux-security-module@vger.kernel.org 3684L: linux-security-module@vger.kernel.org
diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
index 0929c7d83e15..ac86be52b461 100644
--- a/drivers/net/can/dev.c
+++ b/drivers/net/can/dev.c
@@ -11,6 +11,7 @@
11#include <linux/if_arp.h> 11#include <linux/if_arp.h>
12#include <linux/workqueue.h> 12#include <linux/workqueue.h>
13#include <linux/can.h> 13#include <linux/can.h>
14#include <linux/can/can-ml.h>
14#include <linux/can/dev.h> 15#include <linux/can/dev.h>
15#include <linux/can/skb.h> 16#include <linux/can/skb.h>
16#include <linux/can/netlink.h> 17#include <linux/can/netlink.h>
@@ -713,11 +714,24 @@ struct net_device *alloc_candev_mqs(int sizeof_priv, unsigned int echo_skb_max,
713 struct can_priv *priv; 714 struct can_priv *priv;
714 int size; 715 int size;
715 716
717 /* We put the driver's priv, the CAN mid layer priv and the
718 * echo skb into the netdevice's priv. The memory layout for
719 * the netdev_priv is like this:
720 *
721 * +-------------------------+
722 * | driver's priv |
723 * +-------------------------+
724 * | struct can_ml_priv |
725 * +-------------------------+
726 * | array of struct sk_buff |
727 * +-------------------------+
728 */
729
730 size = ALIGN(sizeof_priv, NETDEV_ALIGN) + sizeof(struct can_ml_priv);
731
716 if (echo_skb_max) 732 if (echo_skb_max)
717 size = ALIGN(sizeof_priv, sizeof(struct sk_buff *)) + 733 size = ALIGN(size, sizeof(struct sk_buff *)) +
718 echo_skb_max * sizeof(struct sk_buff *); 734 echo_skb_max * sizeof(struct sk_buff *);
719 else
720 size = sizeof_priv;
721 735
722 dev = alloc_netdev_mqs(size, "can%d", NET_NAME_UNKNOWN, can_setup, 736 dev = alloc_netdev_mqs(size, "can%d", NET_NAME_UNKNOWN, can_setup,
723 txqs, rxqs); 737 txqs, rxqs);
@@ -727,10 +741,12 @@ struct net_device *alloc_candev_mqs(int sizeof_priv, unsigned int echo_skb_max,
727 priv = netdev_priv(dev); 741 priv = netdev_priv(dev);
728 priv->dev = dev; 742 priv->dev = dev;
729 743
744 dev->ml_priv = (void *)priv + ALIGN(sizeof_priv, NETDEV_ALIGN);
745
730 if (echo_skb_max) { 746 if (echo_skb_max) {
731 priv->echo_skb_max = echo_skb_max; 747 priv->echo_skb_max = echo_skb_max;
732 priv->echo_skb = (void *)priv + 748 priv->echo_skb = (void *)priv +
733 ALIGN(sizeof_priv, sizeof(struct sk_buff *)); 749 (size - echo_skb_max * sizeof(struct sk_buff *));
734 } 750 }
735 751
736 priv->state = CAN_STATE_STOPPED; 752 priv->state = CAN_STATE_STOPPED;
diff --git a/drivers/net/can/slcan.c b/drivers/net/can/slcan.c
index aa97dbc797b6..bb6032211043 100644
--- a/drivers/net/can/slcan.c
+++ b/drivers/net/can/slcan.c
@@ -55,6 +55,7 @@
55#include <linux/workqueue.h> 55#include <linux/workqueue.h>
56#include <linux/can.h> 56#include <linux/can.h>
57#include <linux/can/skb.h> 57#include <linux/can/skb.h>
58#include <linux/can/can-ml.h>
58 59
59MODULE_ALIAS_LDISC(N_SLCAN); 60MODULE_ALIAS_LDISC(N_SLCAN);
60MODULE_DESCRIPTION("serial line CAN interface"); 61MODULE_DESCRIPTION("serial line CAN interface");
@@ -514,6 +515,7 @@ static struct slcan *slc_alloc(void)
514 char name[IFNAMSIZ]; 515 char name[IFNAMSIZ];
515 struct net_device *dev = NULL; 516 struct net_device *dev = NULL;
516 struct slcan *sl; 517 struct slcan *sl;
518 int size;
517 519
518 for (i = 0; i < maxdev; i++) { 520 for (i = 0; i < maxdev; i++) {
519 dev = slcan_devs[i]; 521 dev = slcan_devs[i];
@@ -527,12 +529,14 @@ static struct slcan *slc_alloc(void)
527 return NULL; 529 return NULL;
528 530
529 sprintf(name, "slcan%d", i); 531 sprintf(name, "slcan%d", i);
530 dev = alloc_netdev(sizeof(*sl), name, NET_NAME_UNKNOWN, slc_setup); 532 size = ALIGN(sizeof(*sl), NETDEV_ALIGN) + sizeof(struct can_ml_priv);
533 dev = alloc_netdev(size, name, NET_NAME_UNKNOWN, slc_setup);
531 if (!dev) 534 if (!dev)
532 return NULL; 535 return NULL;
533 536
534 dev->base_addr = i; 537 dev->base_addr = i;
535 sl = netdev_priv(dev); 538 sl = netdev_priv(dev);
539 dev->ml_priv = (void *)sl + ALIGN(sizeof(*sl), NETDEV_ALIGN);
536 540
537 /* Initialize channel control data */ 541 /* Initialize channel control data */
538 sl->magic = SLCAN_MAGIC; 542 sl->magic = SLCAN_MAGIC;
diff --git a/drivers/net/can/vcan.c b/drivers/net/can/vcan.c
index daf27133887b..39ca14b0585d 100644
--- a/drivers/net/can/vcan.c
+++ b/drivers/net/can/vcan.c
@@ -46,6 +46,7 @@
46#include <linux/if_arp.h> 46#include <linux/if_arp.h>
47#include <linux/if_ether.h> 47#include <linux/if_ether.h>
48#include <linux/can.h> 48#include <linux/can.h>
49#include <linux/can/can-ml.h>
49#include <linux/can/dev.h> 50#include <linux/can/dev.h>
50#include <linux/can/skb.h> 51#include <linux/can/skb.h>
51#include <linux/slab.h> 52#include <linux/slab.h>
@@ -152,6 +153,7 @@ static void vcan_setup(struct net_device *dev)
152 dev->addr_len = 0; 153 dev->addr_len = 0;
153 dev->tx_queue_len = 0; 154 dev->tx_queue_len = 0;
154 dev->flags = IFF_NOARP; 155 dev->flags = IFF_NOARP;
156 dev->ml_priv = netdev_priv(dev);
155 157
156 /* set flags according to driver capabilities */ 158 /* set flags according to driver capabilities */
157 if (echo) 159 if (echo)
@@ -162,8 +164,9 @@ static void vcan_setup(struct net_device *dev)
162} 164}
163 165
164static struct rtnl_link_ops vcan_link_ops __read_mostly = { 166static struct rtnl_link_ops vcan_link_ops __read_mostly = {
165 .kind = DRV_NAME, 167 .kind = DRV_NAME,
166 .setup = vcan_setup, 168 .priv_size = sizeof(struct can_ml_priv),
169 .setup = vcan_setup,
167}; 170};
168 171
169static __init int vcan_init_module(void) 172static __init int vcan_init_module(void)
diff --git a/drivers/net/can/vxcan.c b/drivers/net/can/vxcan.c
index b2106292230e..d6ba9426be4d 100644
--- a/drivers/net/can/vxcan.c
+++ b/drivers/net/can/vxcan.c
@@ -18,6 +18,7 @@
18#include <linux/can/dev.h> 18#include <linux/can/dev.h>
19#include <linux/can/skb.h> 19#include <linux/can/skb.h>
20#include <linux/can/vxcan.h> 20#include <linux/can/vxcan.h>
21#include <linux/can/can-ml.h>
21#include <linux/slab.h> 22#include <linux/slab.h>
22#include <net/rtnetlink.h> 23#include <net/rtnetlink.h>
23 24
@@ -146,6 +147,7 @@ static void vxcan_setup(struct net_device *dev)
146 dev->flags = (IFF_NOARP|IFF_ECHO); 147 dev->flags = (IFF_NOARP|IFF_ECHO);
147 dev->netdev_ops = &vxcan_netdev_ops; 148 dev->netdev_ops = &vxcan_netdev_ops;
148 dev->needs_free_netdev = true; 149 dev->needs_free_netdev = true;
150 dev->ml_priv = netdev_priv(dev) + ALIGN(sizeof(struct vxcan_priv), NETDEV_ALIGN);
149} 151}
150 152
151/* forward declaration for rtnl_create_link() */ 153/* forward declaration for rtnl_create_link() */
@@ -281,7 +283,7 @@ static struct net *vxcan_get_link_net(const struct net_device *dev)
281 283
282static struct rtnl_link_ops vxcan_link_ops = { 284static struct rtnl_link_ops vxcan_link_ops = {
283 .kind = DRV_NAME, 285 .kind = DRV_NAME,
284 .priv_size = sizeof(struct vxcan_priv), 286 .priv_size = ALIGN(sizeof(struct vxcan_priv), NETDEV_ALIGN) + sizeof(struct can_ml_priv),
285 .setup = vxcan_setup, 287 .setup = vxcan_setup,
286 .newlink = vxcan_newlink, 288 .newlink = vxcan_newlink,
287 .dellink = vxcan_dellink, 289 .dellink = vxcan_dellink,
diff --git a/include/linux/can/can-ml.h b/include/linux/can/can-ml.h
new file mode 100644
index 000000000000..2f5d731ae251
--- /dev/null
+++ b/include/linux/can/can-ml.h
@@ -0,0 +1,68 @@
1/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */
2/* Copyright (c) 2002-2007 Volkswagen Group Electronic Research
3 * Copyright (c) 2017 Pengutronix, Marc Kleine-Budde <kernel@pengutronix.de>
4 *
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of Volkswagen nor the names of its contributors
16 * may be used to endorse or promote products derived from this software
17 * without specific prior written permission.
18 *
19 * Alternatively, provided that this notice is retained in full, this
20 * software may be distributed under the terms of the GNU General
21 * Public License ("GPL") version 2, in which case the provisions of the
22 * GPL apply INSTEAD OF those given above.
23 *
24 * The provided data structures and external interfaces from this code
25 * are not restricted to be used by modules with a GPL compatible license.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
28 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
29 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
30 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
31 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
32 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
33 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
34 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
35 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
36 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
37 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
38 * DAMAGE.
39 *
40 */
41
42#ifndef CAN_ML_H
43#define CAN_ML_H
44
45#include <linux/can.h>
46#include <linux/list.h>
47
48#define CAN_SFF_RCV_ARRAY_SZ (1 << CAN_SFF_ID_BITS)
49#define CAN_EFF_RCV_HASH_BITS 10
50#define CAN_EFF_RCV_ARRAY_SZ (1 << CAN_EFF_RCV_HASH_BITS)
51
52enum { RX_ERR, RX_ALL, RX_FIL, RX_INV, RX_MAX };
53
54struct can_dev_rcv_lists {
55 struct hlist_head rx[RX_MAX];
56 struct hlist_head rx_sff[CAN_SFF_RCV_ARRAY_SZ];
57 struct hlist_head rx_eff[CAN_EFF_RCV_ARRAY_SZ];
58 int entries;
59};
60
61struct can_ml_priv {
62 struct can_dev_rcv_lists dev_rcv_lists;
63#ifdef CAN_J1939
64 struct j1939_priv *j1939_priv;
65#endif
66};
67
68#endif /* CAN_ML_H */
diff --git a/include/linux/can/core.h b/include/linux/can/core.h
index 708c10d3417a..8339071ab08b 100644
--- a/include/linux/can/core.h
+++ b/include/linux/can/core.h
@@ -41,6 +41,14 @@ struct can_proto {
41 struct proto *prot; 41 struct proto *prot;
42}; 42};
43 43
44/* required_size
45 * macro to find the minimum size of a struct
46 * that includes a requested member
47 */
48#define CAN_REQUIRED_SIZE(struct_type, member) \
49 (offsetof(typeof(struct_type), member) + \
50 sizeof(((typeof(struct_type) *)(NULL))->member))
51
44/* function prototypes for the CAN networklayer core (af_can.c) */ 52/* function prototypes for the CAN networklayer core (af_can.c) */
45 53
46extern int can_proto_register(const struct can_proto *cp); 54extern int can_proto_register(const struct can_proto *cp);
diff --git a/include/net/netns/can.h b/include/net/netns/can.h
index ca9bd9fba5b5..b6ab7d1530d7 100644
--- a/include/net/netns/can.h
+++ b/include/net/netns/can.h
@@ -9,8 +9,8 @@
9#include <linux/spinlock.h> 9#include <linux/spinlock.h>
10 10
11struct can_dev_rcv_lists; 11struct can_dev_rcv_lists;
12struct s_stats; 12struct can_pkg_stats;
13struct s_pstats; 13struct can_rcv_lists_stats;
14 14
15struct netns_can { 15struct netns_can {
16#if IS_ENABLED(CONFIG_PROC_FS) 16#if IS_ENABLED(CONFIG_PROC_FS)
@@ -28,11 +28,11 @@ struct netns_can {
28#endif 28#endif
29 29
30 /* receive filters subscribed for 'all' CAN devices */ 30 /* receive filters subscribed for 'all' CAN devices */
31 struct can_dev_rcv_lists *can_rx_alldev_list; 31 struct can_dev_rcv_lists *rx_alldev_list;
32 spinlock_t can_rcvlists_lock; 32 spinlock_t rcvlists_lock;
33 struct timer_list can_stattimer;/* timer for statistics update */ 33 struct timer_list stattimer; /* timer for statistics update */
34 struct s_stats *can_stats; /* packet statistics */ 34 struct can_pkg_stats *pkg_stats;
35 struct s_pstats *can_pstats; /* receive list statistics */ 35 struct can_rcv_lists_stats *rcv_lists_stats;
36 36
37 /* CAN GW per-net gateway jobs */ 37 /* CAN GW per-net gateway jobs */
38 struct hlist_head cgw_list; 38 struct hlist_head cgw_list;
diff --git a/include/uapi/linux/can.h b/include/uapi/linux/can.h
index 0afb7d8e867f..1e988fdeba34 100644
--- a/include/uapi/linux/can.h
+++ b/include/uapi/linux/can.h
@@ -157,7 +157,8 @@ struct canfd_frame {
157#define CAN_TP20 4 /* VAG Transport Protocol v2.0 */ 157#define CAN_TP20 4 /* VAG Transport Protocol v2.0 */
158#define CAN_MCNET 5 /* Bosch MCNet */ 158#define CAN_MCNET 5 /* Bosch MCNet */
159#define CAN_ISOTP 6 /* ISO 15765-2 Transport Protocol */ 159#define CAN_ISOTP 6 /* ISO 15765-2 Transport Protocol */
160#define CAN_NPROTO 7 160#define CAN_J1939 7 /* SAE J1939 */
161#define CAN_NPROTO 8
161 162
162#define SOL_CAN_BASE 100 163#define SOL_CAN_BASE 100
163 164
@@ -174,6 +175,23 @@ struct sockaddr_can {
174 /* transport protocol class address information (e.g. ISOTP) */ 175 /* transport protocol class address information (e.g. ISOTP) */
175 struct { canid_t rx_id, tx_id; } tp; 176 struct { canid_t rx_id, tx_id; } tp;
176 177
178 /* J1939 address information */
179 struct {
180 /* 8 byte name when using dynamic addressing */
181 __u64 name;
182
183 /* pgn:
184 * 8 bit: PS in PDU2 case, else 0
185 * 8 bit: PF
186 * 1 bit: DP
187 * 1 bit: reserved
188 */
189 __u32 pgn;
190
191 /* 1 byte address */
192 __u8 addr;
193 } j1939;
194
177 /* reserved for future CAN protocols address information */ 195 /* reserved for future CAN protocols address information */
178 } can_addr; 196 } can_addr;
179}; 197};
diff --git a/include/uapi/linux/can/j1939.h b/include/uapi/linux/can/j1939.h
new file mode 100644
index 000000000000..c32325342d30
--- /dev/null
+++ b/include/uapi/linux/can/j1939.h
@@ -0,0 +1,99 @@
1/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
2/*
3 * j1939.h
4 *
5 * Copyright (c) 2010-2011 EIA Electronics
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#ifndef _UAPI_CAN_J1939_H_
13#define _UAPI_CAN_J1939_H_
14
15#include <linux/types.h>
16#include <linux/socket.h>
17#include <linux/can.h>
18
19#define J1939_MAX_UNICAST_ADDR 0xfd
20#define J1939_IDLE_ADDR 0xfe
21#define J1939_NO_ADDR 0xff /* == broadcast or no addr */
22#define J1939_NO_NAME 0
23#define J1939_PGN_REQUEST 0x0ea00 /* Request PG */
24#define J1939_PGN_ADDRESS_CLAIMED 0x0ee00 /* Address Claimed */
25#define J1939_PGN_ADDRESS_COMMANDED 0x0fed8 /* Commanded Address */
26#define J1939_PGN_PDU1_MAX 0x3ff00
27#define J1939_PGN_MAX 0x3ffff
28#define J1939_NO_PGN 0x40000
29
30/* J1939 Parameter Group Number
31 *
32 * bit 0-7 : PDU Specific (PS)
33 * bit 8-15 : PDU Format (PF)
34 * bit 16 : Data Page (DP)
35 * bit 17 : Reserved (R)
36 * bit 19-31 : set to zero
37 */
38typedef __u32 pgn_t;
39
40/* J1939 Priority
41 *
42 * bit 0-2 : Priority (P)
43 * bit 3-7 : set to zero
44 */
45typedef __u8 priority_t;
46
47/* J1939 NAME
48 *
49 * bit 0-20 : Identity Number
50 * bit 21-31 : Manufacturer Code
51 * bit 32-34 : ECU Instance
52 * bit 35-39 : Function Instance
53 * bit 40-47 : Function
54 * bit 48 : Reserved
55 * bit 49-55 : Vehicle System
56 * bit 56-59 : Vehicle System Instance
57 * bit 60-62 : Industry Group
58 * bit 63 : Arbitrary Address Capable
59 */
60typedef __u64 name_t;
61
62/* J1939 socket options */
63#define SOL_CAN_J1939 (SOL_CAN_BASE + CAN_J1939)
64enum {
65 SO_J1939_FILTER = 1, /* set filters */
66 SO_J1939_PROMISC = 2, /* set/clr promiscuous mode */
67 SO_J1939_SEND_PRIO = 3,
68 SO_J1939_ERRQUEUE = 4,
69};
70
71enum {
72 SCM_J1939_DEST_ADDR = 1,
73 SCM_J1939_DEST_NAME = 2,
74 SCM_J1939_PRIO = 3,
75 SCM_J1939_ERRQUEUE = 4,
76};
77
78enum {
79 J1939_NLA_PAD,
80 J1939_NLA_BYTES_ACKED,
81};
82
83enum {
84 J1939_EE_INFO_NONE,
85 J1939_EE_INFO_TX_ABORT,
86};
87
88struct j1939_filter {
89 name_t name;
90 name_t name_mask;
91 pgn_t pgn;
92 pgn_t pgn_mask;
93 __u8 addr;
94 __u8 addr_mask;
95};
96
97#define J1939_FILTER_MAX 512 /* maximum number of j1939_filter set via setsockopt() */
98
99#endif /* !_UAPI_CAN_J1939_H_ */
diff --git a/net/can/Kconfig b/net/can/Kconfig
index d4319aa3e1b1..d77042752457 100644
--- a/net/can/Kconfig
+++ b/net/can/Kconfig
@@ -53,6 +53,8 @@ config CAN_GW
53 They can be modified with AND/OR/XOR/SET operations as configured 53 They can be modified with AND/OR/XOR/SET operations as configured
54 by the netlink configuration interface known e.g. from iptables. 54 by the netlink configuration interface known e.g. from iptables.
55 55
56source "net/can/j1939/Kconfig"
57
56source "drivers/net/can/Kconfig" 58source "drivers/net/can/Kconfig"
57 59
58endif 60endif
diff --git a/net/can/Makefile b/net/can/Makefile
index 1242bbbfe57f..08bd217fc051 100644
--- a/net/can/Makefile
+++ b/net/can/Makefile
@@ -15,3 +15,5 @@ can-bcm-y := bcm.o
15 15
16obj-$(CONFIG_CAN_GW) += can-gw.o 16obj-$(CONFIG_CAN_GW) += can-gw.o
17can-gw-y := gw.o 17can-gw-y := gw.o
18
19obj-$(CONFIG_CAN_J1939) += j1939/
diff --git a/net/can/af_can.c b/net/can/af_can.c
index 9a9a51847c7c..5518a7d9eed9 100644
--- a/net/can/af_can.c
+++ b/net/can/af_can.c
@@ -58,6 +58,7 @@
58#include <linux/can.h> 58#include <linux/can.h>
59#include <linux/can/core.h> 59#include <linux/can/core.h>
60#include <linux/can/skb.h> 60#include <linux/can/skb.h>
61#include <linux/can/can-ml.h>
61#include <linux/ratelimit.h> 62#include <linux/ratelimit.h>
62#include <net/net_namespace.h> 63#include <net/net_namespace.h>
63#include <net/sock.h> 64#include <net/sock.h>
@@ -198,7 +199,7 @@ int can_send(struct sk_buff *skb, int loop)
198{ 199{
199 struct sk_buff *newskb = NULL; 200 struct sk_buff *newskb = NULL;
200 struct canfd_frame *cfd = (struct canfd_frame *)skb->data; 201 struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
201 struct s_stats *can_stats = dev_net(skb->dev)->can.can_stats; 202 struct can_pkg_stats *pkg_stats = dev_net(skb->dev)->can.pkg_stats;
202 int err = -EINVAL; 203 int err = -EINVAL;
203 204
204 if (skb->len == CAN_MTU) { 205 if (skb->len == CAN_MTU) {
@@ -285,8 +286,8 @@ int can_send(struct sk_buff *skb, int loop)
285 netif_rx_ni(newskb); 286 netif_rx_ni(newskb);
286 287
287 /* update statistics */ 288 /* update statistics */
288 can_stats->tx_frames++; 289 pkg_stats->tx_frames++;
289 can_stats->tx_frames_delta++; 290 pkg_stats->tx_frames_delta++;
290 291
291 return 0; 292 return 0;
292 293
@@ -298,13 +299,15 @@ EXPORT_SYMBOL(can_send);
298 299
299/* af_can rx path */ 300/* af_can rx path */
300 301
301static struct can_dev_rcv_lists *find_dev_rcv_lists(struct net *net, 302static struct can_dev_rcv_lists *can_dev_rcv_lists_find(struct net *net,
302 struct net_device *dev) 303 struct net_device *dev)
303{ 304{
304 if (!dev) 305 if (dev) {
305 return net->can.can_rx_alldev_list; 306 struct can_ml_priv *ml_priv = dev->ml_priv;
306 else 307 return &ml_priv->dev_rcv_lists;
307 return (struct can_dev_rcv_lists *)dev->ml_priv; 308 } else {
309 return net->can.rx_alldev_list;
310 }
308} 311}
309 312
310/** 313/**
@@ -331,7 +334,7 @@ static unsigned int effhash(canid_t can_id)
331} 334}
332 335
333/** 336/**
334 * find_rcv_list - determine optimal filterlist inside device filter struct 337 * can_rcv_list_find - determine optimal filterlist inside device filter struct
335 * @can_id: pointer to CAN identifier of a given can_filter 338 * @can_id: pointer to CAN identifier of a given can_filter
336 * @mask: pointer to CAN mask of a given can_filter 339 * @mask: pointer to CAN mask of a given can_filter
337 * @d: pointer to the device filter struct 340 * @d: pointer to the device filter struct
@@ -357,8 +360,8 @@ static unsigned int effhash(canid_t can_id)
357 * Constistency checked mask. 360 * Constistency checked mask.
358 * Reduced can_id to have a preprocessed filter compare value. 361 * Reduced can_id to have a preprocessed filter compare value.
359 */ 362 */
360static struct hlist_head *find_rcv_list(canid_t *can_id, canid_t *mask, 363static struct hlist_head *can_rcv_list_find(canid_t *can_id, canid_t *mask,
361 struct can_dev_rcv_lists *d) 364 struct can_dev_rcv_lists *dev_rcv_lists)
362{ 365{
363 canid_t inv = *can_id & CAN_INV_FILTER; /* save flag before masking */ 366 canid_t inv = *can_id & CAN_INV_FILTER; /* save flag before masking */
364 367
@@ -366,7 +369,7 @@ static struct hlist_head *find_rcv_list(canid_t *can_id, canid_t *mask,
366 if (*mask & CAN_ERR_FLAG) { 369 if (*mask & CAN_ERR_FLAG) {
367 /* clear CAN_ERR_FLAG in filter entry */ 370 /* clear CAN_ERR_FLAG in filter entry */
368 *mask &= CAN_ERR_MASK; 371 *mask &= CAN_ERR_MASK;
369 return &d->rx[RX_ERR]; 372 return &dev_rcv_lists->rx[RX_ERR];
370 } 373 }
371 374
372 /* with cleared CAN_ERR_FLAG we have a simple mask/value filterpair */ 375 /* with cleared CAN_ERR_FLAG we have a simple mask/value filterpair */
@@ -382,26 +385,26 @@ static struct hlist_head *find_rcv_list(canid_t *can_id, canid_t *mask,
382 385
383 /* inverse can_id/can_mask filter */ 386 /* inverse can_id/can_mask filter */
384 if (inv) 387 if (inv)
385 return &d->rx[RX_INV]; 388 return &dev_rcv_lists->rx[RX_INV];
386 389
387 /* mask == 0 => no condition testing at receive time */ 390 /* mask == 0 => no condition testing at receive time */
388 if (!(*mask)) 391 if (!(*mask))
389 return &d->rx[RX_ALL]; 392 return &dev_rcv_lists->rx[RX_ALL];
390 393
391 /* extra filterlists for the subscription of a single non-RTR can_id */ 394 /* extra filterlists for the subscription of a single non-RTR can_id */
392 if (((*mask & CAN_EFF_RTR_FLAGS) == CAN_EFF_RTR_FLAGS) && 395 if (((*mask & CAN_EFF_RTR_FLAGS) == CAN_EFF_RTR_FLAGS) &&
393 !(*can_id & CAN_RTR_FLAG)) { 396 !(*can_id & CAN_RTR_FLAG)) {
394 if (*can_id & CAN_EFF_FLAG) { 397 if (*can_id & CAN_EFF_FLAG) {
395 if (*mask == (CAN_EFF_MASK | CAN_EFF_RTR_FLAGS)) 398 if (*mask == (CAN_EFF_MASK | CAN_EFF_RTR_FLAGS))
396 return &d->rx_eff[effhash(*can_id)]; 399 return &dev_rcv_lists->rx_eff[effhash(*can_id)];
397 } else { 400 } else {
398 if (*mask == (CAN_SFF_MASK | CAN_EFF_RTR_FLAGS)) 401 if (*mask == (CAN_SFF_MASK | CAN_EFF_RTR_FLAGS))
399 return &d->rx_sff[*can_id]; 402 return &dev_rcv_lists->rx_sff[*can_id];
400 } 403 }
401 } 404 }
402 405
403 /* default: filter via can_id/can_mask */ 406 /* default: filter via can_id/can_mask */
404 return &d->rx[RX_FIL]; 407 return &dev_rcv_lists->rx[RX_FIL];
405} 408}
406 409
407/** 410/**
@@ -438,10 +441,10 @@ int can_rx_register(struct net *net, struct net_device *dev, canid_t can_id,
438 canid_t mask, void (*func)(struct sk_buff *, void *), 441 canid_t mask, void (*func)(struct sk_buff *, void *),
439 void *data, char *ident, struct sock *sk) 442 void *data, char *ident, struct sock *sk)
440{ 443{
441 struct receiver *r; 444 struct receiver *rcv;
442 struct hlist_head *rl; 445 struct hlist_head *rcv_list;
443 struct can_dev_rcv_lists *d; 446 struct can_dev_rcv_lists *dev_rcv_lists;
444 struct s_pstats *can_pstats = net->can.can_pstats; 447 struct can_rcv_lists_stats *rcv_lists_stats = net->can.rcv_lists_stats;
445 int err = 0; 448 int err = 0;
446 449
447 /* insert new receiver (dev,canid,mask) -> (func,data) */ 450 /* insert new receiver (dev,canid,mask) -> (func,data) */
@@ -452,36 +455,30 @@ int can_rx_register(struct net *net, struct net_device *dev, canid_t can_id,
452 if (dev && !net_eq(net, dev_net(dev))) 455 if (dev && !net_eq(net, dev_net(dev)))
453 return -ENODEV; 456 return -ENODEV;
454 457
455 r = kmem_cache_alloc(rcv_cache, GFP_KERNEL); 458 rcv = kmem_cache_alloc(rcv_cache, GFP_KERNEL);
456 if (!r) 459 if (!rcv)
457 return -ENOMEM; 460 return -ENOMEM;
458 461
459 spin_lock(&net->can.can_rcvlists_lock); 462 spin_lock_bh(&net->can.rcvlists_lock);
460 463
461 d = find_dev_rcv_lists(net, dev); 464 dev_rcv_lists = can_dev_rcv_lists_find(net, dev);
462 if (d) { 465 rcv_list = can_rcv_list_find(&can_id, &mask, dev_rcv_lists);
463 rl = find_rcv_list(&can_id, &mask, d);
464 466
465 r->can_id = can_id; 467 rcv->can_id = can_id;
466 r->mask = mask; 468 rcv->mask = mask;
467 r->matches = 0; 469 rcv->matches = 0;
468 r->func = func; 470 rcv->func = func;
469 r->data = data; 471 rcv->data = data;
470 r->ident = ident; 472 rcv->ident = ident;
471 r->sk = sk; 473 rcv->sk = sk;
472 474
473 hlist_add_head_rcu(&r->list, rl); 475 hlist_add_head_rcu(&rcv->list, rcv_list);
474 d->entries++; 476 dev_rcv_lists->entries++;
475
476 can_pstats->rcv_entries++;
477 if (can_pstats->rcv_entries_max < can_pstats->rcv_entries)
478 can_pstats->rcv_entries_max = can_pstats->rcv_entries;
479 } else {
480 kmem_cache_free(rcv_cache, r);
481 err = -ENODEV;
482 }
483 477
484 spin_unlock(&net->can.can_rcvlists_lock); 478 rcv_lists_stats->rcv_entries++;
479 rcv_lists_stats->rcv_entries_max = max(rcv_lists_stats->rcv_entries_max,
480 rcv_lists_stats->rcv_entries);
481 spin_unlock_bh(&net->can.rcvlists_lock);
485 482
486 return err; 483 return err;
487} 484}
@@ -490,10 +487,10 @@ EXPORT_SYMBOL(can_rx_register);
490/* can_rx_delete_receiver - rcu callback for single receiver entry removal */ 487/* can_rx_delete_receiver - rcu callback for single receiver entry removal */
491static void can_rx_delete_receiver(struct rcu_head *rp) 488static void can_rx_delete_receiver(struct rcu_head *rp)
492{ 489{
493 struct receiver *r = container_of(rp, struct receiver, rcu); 490 struct receiver *rcv = container_of(rp, struct receiver, rcu);
494 struct sock *sk = r->sk; 491 struct sock *sk = rcv->sk;
495 492
496 kmem_cache_free(rcv_cache, r); 493 kmem_cache_free(rcv_cache, rcv);
497 if (sk) 494 if (sk)
498 sock_put(sk); 495 sock_put(sk);
499} 496}
@@ -513,10 +510,10 @@ void can_rx_unregister(struct net *net, struct net_device *dev, canid_t can_id,
513 canid_t mask, void (*func)(struct sk_buff *, void *), 510 canid_t mask, void (*func)(struct sk_buff *, void *),
514 void *data) 511 void *data)
515{ 512{
516 struct receiver *r = NULL; 513 struct receiver *rcv = NULL;
517 struct hlist_head *rl; 514 struct hlist_head *rcv_list;
518 struct s_pstats *can_pstats = net->can.can_pstats; 515 struct can_rcv_lists_stats *rcv_lists_stats = net->can.rcv_lists_stats;
519 struct can_dev_rcv_lists *d; 516 struct can_dev_rcv_lists *dev_rcv_lists;
520 517
521 if (dev && dev->type != ARPHRD_CAN) 518 if (dev && dev->type != ARPHRD_CAN)
522 return; 519 return;
@@ -524,83 +521,69 @@ void can_rx_unregister(struct net *net, struct net_device *dev, canid_t can_id,
524 if (dev && !net_eq(net, dev_net(dev))) 521 if (dev && !net_eq(net, dev_net(dev)))
525 return; 522 return;
526 523
527 spin_lock(&net->can.can_rcvlists_lock); 524 spin_lock_bh(&net->can.rcvlists_lock);
528
529 d = find_dev_rcv_lists(net, dev);
530 if (!d) {
531 pr_err("BUG: receive list not found for dev %s, id %03X, mask %03X\n",
532 DNAME(dev), can_id, mask);
533 goto out;
534 }
535 525
536 rl = find_rcv_list(&can_id, &mask, d); 526 dev_rcv_lists = can_dev_rcv_lists_find(net, dev);
527 rcv_list = can_rcv_list_find(&can_id, &mask, dev_rcv_lists);
537 528
538 /* Search the receiver list for the item to delete. This should 529 /* Search the receiver list for the item to delete. This should
539 * exist, since no receiver may be unregistered that hasn't 530 * exist, since no receiver may be unregistered that hasn't
540 * been registered before. 531 * been registered before.
541 */ 532 */
542 533 hlist_for_each_entry_rcu(rcv, rcv_list, list) {
543 hlist_for_each_entry_rcu(r, rl, list) { 534 if (rcv->can_id == can_id && rcv->mask == mask &&
544 if (r->can_id == can_id && r->mask == mask && 535 rcv->func == func && rcv->data == data)
545 r->func == func && r->data == data)
546 break; 536 break;
547 } 537 }
548 538
549 /* Check for bugs in CAN protocol implementations using af_can.c: 539 /* Check for bugs in CAN protocol implementations using af_can.c:
550 * 'r' will be NULL if no matching list item was found for removal. 540 * 'rcv' will be NULL if no matching list item was found for removal.
551 */ 541 */
552 542 if (!rcv) {
553 if (!r) {
554 WARN(1, "BUG: receive list entry not found for dev %s, id %03X, mask %03X\n", 543 WARN(1, "BUG: receive list entry not found for dev %s, id %03X, mask %03X\n",
555 DNAME(dev), can_id, mask); 544 DNAME(dev), can_id, mask);
556 goto out; 545 goto out;
557 } 546 }
558 547
559 hlist_del_rcu(&r->list); 548 hlist_del_rcu(&rcv->list);
560 d->entries--; 549 dev_rcv_lists->entries--;
561 550
562 if (can_pstats->rcv_entries > 0) 551 if (rcv_lists_stats->rcv_entries > 0)
563 can_pstats->rcv_entries--; 552 rcv_lists_stats->rcv_entries--;
564
565 /* remove device structure requested by NETDEV_UNREGISTER */
566 if (d->remove_on_zero_entries && !d->entries) {
567 kfree(d);
568 dev->ml_priv = NULL;
569 }
570 553
571 out: 554 out:
572 spin_unlock(&net->can.can_rcvlists_lock); 555 spin_unlock_bh(&net->can.rcvlists_lock);
573 556
574 /* schedule the receiver item for deletion */ 557 /* schedule the receiver item for deletion */
575 if (r) { 558 if (rcv) {
576 if (r->sk) 559 if (rcv->sk)
577 sock_hold(r->sk); 560 sock_hold(rcv->sk);
578 call_rcu(&r->rcu, can_rx_delete_receiver); 561 call_rcu(&rcv->rcu, can_rx_delete_receiver);
579 } 562 }
580} 563}
581EXPORT_SYMBOL(can_rx_unregister); 564EXPORT_SYMBOL(can_rx_unregister);
582 565
583static inline void deliver(struct sk_buff *skb, struct receiver *r) 566static inline void deliver(struct sk_buff *skb, struct receiver *rcv)
584{ 567{
585 r->func(skb, r->data); 568 rcv->func(skb, rcv->data);
586 r->matches++; 569 rcv->matches++;
587} 570}
588 571
589static int can_rcv_filter(struct can_dev_rcv_lists *d, struct sk_buff *skb) 572static int can_rcv_filter(struct can_dev_rcv_lists *dev_rcv_lists, struct sk_buff *skb)
590{ 573{
591 struct receiver *r; 574 struct receiver *rcv;
592 int matches = 0; 575 int matches = 0;
593 struct can_frame *cf = (struct can_frame *)skb->data; 576 struct can_frame *cf = (struct can_frame *)skb->data;
594 canid_t can_id = cf->can_id; 577 canid_t can_id = cf->can_id;
595 578
596 if (d->entries == 0) 579 if (dev_rcv_lists->entries == 0)
597 return 0; 580 return 0;
598 581
599 if (can_id & CAN_ERR_FLAG) { 582 if (can_id & CAN_ERR_FLAG) {
600 /* check for error message frame entries only */ 583 /* check for error message frame entries only */
601 hlist_for_each_entry_rcu(r, &d->rx[RX_ERR], list) { 584 hlist_for_each_entry_rcu(rcv, &dev_rcv_lists->rx[RX_ERR], list) {
602 if (can_id & r->mask) { 585 if (can_id & rcv->mask) {
603 deliver(skb, r); 586 deliver(skb, rcv);
604 matches++; 587 matches++;
605 } 588 }
606 } 589 }
@@ -608,23 +591,23 @@ static int can_rcv_filter(struct can_dev_rcv_lists *d, struct sk_buff *skb)
608 } 591 }
609 592
610 /* check for unfiltered entries */ 593 /* check for unfiltered entries */
611 hlist_for_each_entry_rcu(r, &d->rx[RX_ALL], list) { 594 hlist_for_each_entry_rcu(rcv, &dev_rcv_lists->rx[RX_ALL], list) {
612 deliver(skb, r); 595 deliver(skb, rcv);
613 matches++; 596 matches++;
614 } 597 }
615 598
616 /* check for can_id/mask entries */ 599 /* check for can_id/mask entries */
617 hlist_for_each_entry_rcu(r, &d->rx[RX_FIL], list) { 600 hlist_for_each_entry_rcu(rcv, &dev_rcv_lists->rx[RX_FIL], list) {
618 if ((can_id & r->mask) == r->can_id) { 601 if ((can_id & rcv->mask) == rcv->can_id) {
619 deliver(skb, r); 602 deliver(skb, rcv);
620 matches++; 603 matches++;
621 } 604 }
622 } 605 }
623 606
624 /* check for inverted can_id/mask entries */ 607 /* check for inverted can_id/mask entries */
625 hlist_for_each_entry_rcu(r, &d->rx[RX_INV], list) { 608 hlist_for_each_entry_rcu(rcv, &dev_rcv_lists->rx[RX_INV], list) {
626 if ((can_id & r->mask) != r->can_id) { 609 if ((can_id & rcv->mask) != rcv->can_id) {
627 deliver(skb, r); 610 deliver(skb, rcv);
628 matches++; 611 matches++;
629 } 612 }
630 } 613 }
@@ -634,16 +617,16 @@ static int can_rcv_filter(struct can_dev_rcv_lists *d, struct sk_buff *skb)
634 return matches; 617 return matches;
635 618
636 if (can_id & CAN_EFF_FLAG) { 619 if (can_id & CAN_EFF_FLAG) {
637 hlist_for_each_entry_rcu(r, &d->rx_eff[effhash(can_id)], list) { 620 hlist_for_each_entry_rcu(rcv, &dev_rcv_lists->rx_eff[effhash(can_id)], list) {
638 if (r->can_id == can_id) { 621 if (rcv->can_id == can_id) {
639 deliver(skb, r); 622 deliver(skb, rcv);
640 matches++; 623 matches++;
641 } 624 }
642 } 625 }
643 } else { 626 } else {
644 can_id &= CAN_SFF_MASK; 627 can_id &= CAN_SFF_MASK;
645 hlist_for_each_entry_rcu(r, &d->rx_sff[can_id], list) { 628 hlist_for_each_entry_rcu(rcv, &dev_rcv_lists->rx_sff[can_id], list) {
646 deliver(skb, r); 629 deliver(skb, rcv);
647 matches++; 630 matches++;
648 } 631 }
649 } 632 }
@@ -653,14 +636,14 @@ static int can_rcv_filter(struct can_dev_rcv_lists *d, struct sk_buff *skb)
653 636
654static void can_receive(struct sk_buff *skb, struct net_device *dev) 637static void can_receive(struct sk_buff *skb, struct net_device *dev)
655{ 638{
656 struct can_dev_rcv_lists *d; 639 struct can_dev_rcv_lists *dev_rcv_lists;
657 struct net *net = dev_net(dev); 640 struct net *net = dev_net(dev);
658 struct s_stats *can_stats = net->can.can_stats; 641 struct can_pkg_stats *pkg_stats = net->can.pkg_stats;
659 int matches; 642 int matches;
660 643
661 /* update statistics */ 644 /* update statistics */
662 can_stats->rx_frames++; 645 pkg_stats->rx_frames++;
663 can_stats->rx_frames_delta++; 646 pkg_stats->rx_frames_delta++;
664 647
665 /* create non-zero unique skb identifier together with *skb */ 648 /* create non-zero unique skb identifier together with *skb */
666 while (!(can_skb_prv(skb)->skbcnt)) 649 while (!(can_skb_prv(skb)->skbcnt))
@@ -669,12 +652,11 @@ static void can_receive(struct sk_buff *skb, struct net_device *dev)
669 rcu_read_lock(); 652 rcu_read_lock();
670 653
671 /* deliver the packet to sockets listening on all devices */ 654 /* deliver the packet to sockets listening on all devices */
672 matches = can_rcv_filter(net->can.can_rx_alldev_list, skb); 655 matches = can_rcv_filter(net->can.rx_alldev_list, skb);
673 656
674 /* find receive list for this device */ 657 /* find receive list for this device */
675 d = find_dev_rcv_lists(net, dev); 658 dev_rcv_lists = can_dev_rcv_lists_find(net, dev);
676 if (d) 659 matches += can_rcv_filter(dev_rcv_lists, skb);
677 matches += can_rcv_filter(d, skb);
678 660
679 rcu_read_unlock(); 661 rcu_read_unlock();
680 662
@@ -682,8 +664,8 @@ static void can_receive(struct sk_buff *skb, struct net_device *dev)
682 consume_skb(skb); 664 consume_skb(skb);
683 665
684 if (matches > 0) { 666 if (matches > 0) {
685 can_stats->matches++; 667 pkg_stats->matches++;
686 can_stats->matches_delta++; 668 pkg_stats->matches_delta++;
687 } 669 }
688} 670}
689 671
@@ -789,41 +771,14 @@ static int can_notifier(struct notifier_block *nb, unsigned long msg,
789 void *ptr) 771 void *ptr)
790{ 772{
791 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 773 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
792 struct can_dev_rcv_lists *d;
793 774
794 if (dev->type != ARPHRD_CAN) 775 if (dev->type != ARPHRD_CAN)
795 return NOTIFY_DONE; 776 return NOTIFY_DONE;
796 777
797 switch (msg) { 778 switch (msg) {
798 case NETDEV_REGISTER: 779 case NETDEV_REGISTER:
799 780 WARN(!dev->ml_priv,
800 /* create new dev_rcv_lists for this device */ 781 "No CAN mid layer private allocated, please fix your driver and use alloc_candev()!\n");
801 d = kzalloc(sizeof(*d), GFP_KERNEL);
802 if (!d)
803 return NOTIFY_DONE;
804 BUG_ON(dev->ml_priv);
805 dev->ml_priv = d;
806
807 break;
808
809 case NETDEV_UNREGISTER:
810 spin_lock(&dev_net(dev)->can.can_rcvlists_lock);
811
812 d = dev->ml_priv;
813 if (d) {
814 if (d->entries) {
815 d->remove_on_zero_entries = 1;
816 } else {
817 kfree(d);
818 dev->ml_priv = NULL;
819 }
820 } else {
821 pr_err("can: notifier: receive list not found for dev %s\n",
822 dev->name);
823 }
824
825 spin_unlock(&dev_net(dev)->can.can_rcvlists_lock);
826
827 break; 782 break;
828 } 783 }
829 784
@@ -832,66 +787,51 @@ static int can_notifier(struct notifier_block *nb, unsigned long msg,
832 787
833static int can_pernet_init(struct net *net) 788static int can_pernet_init(struct net *net)
834{ 789{
835 spin_lock_init(&net->can.can_rcvlists_lock); 790 spin_lock_init(&net->can.rcvlists_lock);
836 net->can.can_rx_alldev_list = 791 net->can.rx_alldev_list =
837 kzalloc(sizeof(*net->can.can_rx_alldev_list), GFP_KERNEL); 792 kzalloc(sizeof(*net->can.rx_alldev_list), GFP_KERNEL);
838 if (!net->can.can_rx_alldev_list) 793 if (!net->can.rx_alldev_list)
839 goto out; 794 goto out;
840 net->can.can_stats = kzalloc(sizeof(*net->can.can_stats), GFP_KERNEL); 795 net->can.pkg_stats = kzalloc(sizeof(*net->can.pkg_stats), GFP_KERNEL);
841 if (!net->can.can_stats) 796 if (!net->can.pkg_stats)
842 goto out_free_alldev_list; 797 goto out_free_rx_alldev_list;
843 net->can.can_pstats = kzalloc(sizeof(*net->can.can_pstats), GFP_KERNEL); 798 net->can.rcv_lists_stats = kzalloc(sizeof(*net->can.rcv_lists_stats), GFP_KERNEL);
844 if (!net->can.can_pstats) 799 if (!net->can.rcv_lists_stats)
845 goto out_free_can_stats; 800 goto out_free_pkg_stats;
846 801
847 if (IS_ENABLED(CONFIG_PROC_FS)) { 802 if (IS_ENABLED(CONFIG_PROC_FS)) {
848 /* the statistics are updated every second (timer triggered) */ 803 /* the statistics are updated every second (timer triggered) */
849 if (stats_timer) { 804 if (stats_timer) {
850 timer_setup(&net->can.can_stattimer, can_stat_update, 805 timer_setup(&net->can.stattimer, can_stat_update,
851 0); 806 0);
852 mod_timer(&net->can.can_stattimer, 807 mod_timer(&net->can.stattimer,
853 round_jiffies(jiffies + HZ)); 808 round_jiffies(jiffies + HZ));
854 } 809 }
855 net->can.can_stats->jiffies_init = jiffies; 810 net->can.pkg_stats->jiffies_init = jiffies;
856 can_init_proc(net); 811 can_init_proc(net);
857 } 812 }
858 813
859 return 0; 814 return 0;
860 815
861 out_free_can_stats: 816 out_free_pkg_stats:
862 kfree(net->can.can_stats); 817 kfree(net->can.pkg_stats);
863 out_free_alldev_list: 818 out_free_rx_alldev_list:
864 kfree(net->can.can_rx_alldev_list); 819 kfree(net->can.rx_alldev_list);
865 out: 820 out:
866 return -ENOMEM; 821 return -ENOMEM;
867} 822}
868 823
869static void can_pernet_exit(struct net *net) 824static void can_pernet_exit(struct net *net)
870{ 825{
871 struct net_device *dev;
872
873 if (IS_ENABLED(CONFIG_PROC_FS)) { 826 if (IS_ENABLED(CONFIG_PROC_FS)) {
874 can_remove_proc(net); 827 can_remove_proc(net);
875 if (stats_timer) 828 if (stats_timer)
876 del_timer_sync(&net->can.can_stattimer); 829 del_timer_sync(&net->can.stattimer);
877 } 830 }
878 831
879 /* remove created dev_rcv_lists from still registered CAN devices */ 832 kfree(net->can.rx_alldev_list);
880 rcu_read_lock(); 833 kfree(net->can.pkg_stats);
881 for_each_netdev_rcu(net, dev) { 834 kfree(net->can.rcv_lists_stats);
882 if (dev->type == ARPHRD_CAN && dev->ml_priv) {
883 struct can_dev_rcv_lists *d = dev->ml_priv;
884
885 BUG_ON(d->entries);
886 kfree(d);
887 dev->ml_priv = NULL;
888 }
889 }
890 rcu_read_unlock();
891
892 kfree(net->can.can_rx_alldev_list);
893 kfree(net->can.can_stats);
894 kfree(net->can.can_pstats);
895} 835}
896 836
897/* af_can module init/exit functions */ 837/* af_can module init/exit functions */
diff --git a/net/can/af_can.h b/net/can/af_can.h
index 9cdb79083623..7c2d9161e224 100644
--- a/net/can/af_can.h
+++ b/net/can/af_can.h
@@ -60,25 +60,10 @@ struct receiver {
60 struct rcu_head rcu; 60 struct rcu_head rcu;
61}; 61};
62 62
63#define CAN_SFF_RCV_ARRAY_SZ (1 << CAN_SFF_ID_BITS)
64#define CAN_EFF_RCV_HASH_BITS 10
65#define CAN_EFF_RCV_ARRAY_SZ (1 << CAN_EFF_RCV_HASH_BITS)
66
67enum { RX_ERR, RX_ALL, RX_FIL, RX_INV, RX_MAX };
68
69/* per device receive filters linked at dev->ml_priv */
70struct can_dev_rcv_lists {
71 struct hlist_head rx[RX_MAX];
72 struct hlist_head rx_sff[CAN_SFF_RCV_ARRAY_SZ];
73 struct hlist_head rx_eff[CAN_EFF_RCV_ARRAY_SZ];
74 int remove_on_zero_entries;
75 int entries;
76};
77
78/* statistic structures */ 63/* statistic structures */
79 64
80/* can be reset e.g. by can_init_stats() */ 65/* can be reset e.g. by can_init_stats() */
81struct s_stats { 66struct can_pkg_stats {
82 unsigned long jiffies_init; 67 unsigned long jiffies_init;
83 68
84 unsigned long rx_frames; 69 unsigned long rx_frames;
@@ -103,7 +88,7 @@ struct s_stats {
103}; 88};
104 89
105/* persistent statistics */ 90/* persistent statistics */
106struct s_pstats { 91struct can_rcv_lists_stats {
107 unsigned long stats_reset; 92 unsigned long stats_reset;
108 unsigned long user_reset; 93 unsigned long user_reset;
109 unsigned long rcv_entries; 94 unsigned long rcv_entries;
diff --git a/net/can/bcm.c b/net/can/bcm.c
index 28fd1a1c8487..c96fa0f33db3 100644
--- a/net/can/bcm.c
+++ b/net/can/bcm.c
@@ -1294,7 +1294,7 @@ static int bcm_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
1294 /* no bound device as default => check msg_name */ 1294 /* no bound device as default => check msg_name */
1295 DECLARE_SOCKADDR(struct sockaddr_can *, addr, msg->msg_name); 1295 DECLARE_SOCKADDR(struct sockaddr_can *, addr, msg->msg_name);
1296 1296
1297 if (msg->msg_namelen < sizeof(*addr)) 1297 if (msg->msg_namelen < CAN_REQUIRED_SIZE(*addr, can_ifindex))
1298 return -EINVAL; 1298 return -EINVAL;
1299 1299
1300 if (addr->can_family != AF_CAN) 1300 if (addr->can_family != AF_CAN)
@@ -1536,7 +1536,7 @@ static int bcm_connect(struct socket *sock, struct sockaddr *uaddr, int len,
1536 struct net *net = sock_net(sk); 1536 struct net *net = sock_net(sk);
1537 int ret = 0; 1537 int ret = 0;
1538 1538
1539 if (len < sizeof(*addr)) 1539 if (len < CAN_REQUIRED_SIZE(*addr, can_ifindex))
1540 return -EINVAL; 1540 return -EINVAL;
1541 1541
1542 lock_sock(sk); 1542 lock_sock(sk);
diff --git a/net/can/j1939/Kconfig b/net/can/j1939/Kconfig
new file mode 100644
index 000000000000..2998298b71ec
--- /dev/null
+++ b/net/can/j1939/Kconfig
@@ -0,0 +1,15 @@
1# SPDX-License-Identifier: GPL-2.0
2#
3# SAE J1939 network layer core configuration
4#
5
6config CAN_J1939
7 tristate "SAE J1939"
8 depends on CAN
9 help
10 SAE J1939
11 Say Y to have in-kernel support for j1939 socket type. This
12 allows communication according to SAE j1939.
13 The relevant parts in kernel are
14 SAE j1939-21 (datalink & transport protocol)
15 & SAE j1939-81 (network management).
diff --git a/net/can/j1939/Makefile b/net/can/j1939/Makefile
new file mode 100644
index 000000000000..19181bdae173
--- /dev/null
+++ b/net/can/j1939/Makefile
@@ -0,0 +1,10 @@
1# SPDX-License-Identifier: GPL-2.0
2
3obj-$(CONFIG_CAN_J1939) += can-j1939.o
4
5can-j1939-objs := \
6 address-claim.o \
7 bus.o \
8 main.o \
9 socket.o \
10 transport.o
diff --git a/net/can/j1939/address-claim.c b/net/can/j1939/address-claim.c
new file mode 100644
index 000000000000..f33c47327927
--- /dev/null
+++ b/net/can/j1939/address-claim.c
@@ -0,0 +1,230 @@
1// SPDX-License-Identifier: GPL-2.0
2// Copyright (c) 2010-2011 EIA Electronics,
3// Kurt Van Dijck <kurt.van.dijck@eia.be>
4// Copyright (c) 2010-2011 EIA Electronics,
5// Pieter Beyens <pieter.beyens@eia.be>
6// Copyright (c) 2017-2019 Pengutronix,
7// Marc Kleine-Budde <kernel@pengutronix.de>
8// Copyright (c) 2017-2019 Pengutronix,
9// Oleksij Rempel <kernel@pengutronix.de>
10
11/* J1939 Address Claiming.
12 * Address Claiming in the kernel
13 * - keeps track of the AC states of ECU's,
14 * - resolves NAME<=>SA taking into account the AC states of ECU's.
15 *
16 * All Address Claim msgs (including host-originated msg) are processed
17 * at the receive path (a sent msg is always received again via CAN echo).
18 * As such, the processing of AC msgs is done in the order on which msgs
19 * are sent on the bus.
20 *
21 * This module doesn't send msgs itself (e.g. replies on Address Claims),
22 * this is the responsibility of a user space application or daemon.
23 */
24
25#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
26
27#include <linux/netdevice.h>
28#include <linux/skbuff.h>
29
30#include "j1939-priv.h"
31
32static inline name_t j1939_skb_to_name(const struct sk_buff *skb)
33{
34 return le64_to_cpup((__le64 *)skb->data);
35}
36
37static inline bool j1939_ac_msg_is_request(struct sk_buff *skb)
38{
39 struct j1939_sk_buff_cb *skcb = j1939_skb_to_cb(skb);
40 int req_pgn;
41
42 if (skb->len < 3 || skcb->addr.pgn != J1939_PGN_REQUEST)
43 return false;
44
45 req_pgn = skb->data[0] | (skb->data[1] << 8) | (skb->data[2] << 16);
46
47 return req_pgn == J1939_PGN_ADDRESS_CLAIMED;
48}
49
50static int j1939_ac_verify_outgoing(struct j1939_priv *priv,
51 struct sk_buff *skb)
52{
53 struct j1939_sk_buff_cb *skcb = j1939_skb_to_cb(skb);
54
55 if (skb->len != 8) {
56 netdev_notice(priv->ndev, "tx address claim with dlc %i\n",
57 skb->len);
58 return -EPROTO;
59 }
60
61 if (skcb->addr.src_name != j1939_skb_to_name(skb)) {
62 netdev_notice(priv->ndev, "tx address claim with different name\n");
63 return -EPROTO;
64 }
65
66 if (skcb->addr.sa == J1939_NO_ADDR) {
67 netdev_notice(priv->ndev, "tx address claim with broadcast sa\n");
68 return -EPROTO;
69 }
70
71 /* ac must always be a broadcast */
72 if (skcb->addr.dst_name || skcb->addr.da != J1939_NO_ADDR) {
73 netdev_notice(priv->ndev, "tx address claim with dest, not broadcast\n");
74 return -EPROTO;
75 }
76 return 0;
77}
78
79int j1939_ac_fixup(struct j1939_priv *priv, struct sk_buff *skb)
80{
81 struct j1939_sk_buff_cb *skcb = j1939_skb_to_cb(skb);
82 int ret;
83 u8 addr;
84
85 /* network mgmt: address claiming msgs */
86 if (skcb->addr.pgn == J1939_PGN_ADDRESS_CLAIMED) {
87 struct j1939_ecu *ecu;
88
89 ret = j1939_ac_verify_outgoing(priv, skb);
90 /* return both when failure & when successful */
91 if (ret < 0)
92 return ret;
93 ecu = j1939_ecu_get_by_name(priv, skcb->addr.src_name);
94 if (!ecu)
95 return -ENODEV;
96
97 if (ecu->addr != skcb->addr.sa)
98 /* hold further traffic for ecu, remove from parent */
99 j1939_ecu_unmap(ecu);
100 j1939_ecu_put(ecu);
101 } else if (skcb->addr.src_name) {
102 /* assign source address */
103 addr = j1939_name_to_addr(priv, skcb->addr.src_name);
104 if (!j1939_address_is_unicast(addr) &&
105 !j1939_ac_msg_is_request(skb)) {
106 netdev_notice(priv->ndev, "tx drop: invalid sa for name 0x%016llx\n",
107 skcb->addr.src_name);
108 return -EADDRNOTAVAIL;
109 }
110 skcb->addr.sa = addr;
111 }
112
113 /* assign destination address */
114 if (skcb->addr.dst_name) {
115 addr = j1939_name_to_addr(priv, skcb->addr.dst_name);
116 if (!j1939_address_is_unicast(addr)) {
117 netdev_notice(priv->ndev, "tx drop: invalid da for name 0x%016llx\n",
118 skcb->addr.dst_name);
119 return -EADDRNOTAVAIL;
120 }
121 skcb->addr.da = addr;
122 }
123 return 0;
124}
125
126static void j1939_ac_process(struct j1939_priv *priv, struct sk_buff *skb)
127{
128 struct j1939_sk_buff_cb *skcb = j1939_skb_to_cb(skb);
129 struct j1939_ecu *ecu, *prev;
130 name_t name;
131
132 if (skb->len != 8) {
133 netdev_notice(priv->ndev, "rx address claim with wrong dlc %i\n",
134 skb->len);
135 return;
136 }
137
138 name = j1939_skb_to_name(skb);
139 skcb->addr.src_name = name;
140 if (!name) {
141 netdev_notice(priv->ndev, "rx address claim without name\n");
142 return;
143 }
144
145 if (!j1939_address_is_valid(skcb->addr.sa)) {
146 netdev_notice(priv->ndev, "rx address claim with broadcast sa\n");
147 return;
148 }
149
150 write_lock_bh(&priv->lock);
151
152 /* Few words on the ECU ref counting:
153 *
154 * First we get an ECU handle, either with
155 * j1939_ecu_get_by_name_locked() (increments the ref counter)
156 * or j1939_ecu_create_locked() (initializes an ECU object
157 * with a ref counter of 1).
158 *
159 * j1939_ecu_unmap_locked() will decrement the ref counter,
160 * but only if the ECU was mapped before. So "ecu" still
161 * belongs to us.
162 *
163 * j1939_ecu_timer_start() will increment the ref counter
164 * before it starts the timer, so we can put the ecu when
165 * leaving this function.
166 */
167 ecu = j1939_ecu_get_by_name_locked(priv, name);
168 if (!ecu && j1939_address_is_unicast(skcb->addr.sa))
169 ecu = j1939_ecu_create_locked(priv, name);
170
171 if (IS_ERR_OR_NULL(ecu))
172 goto out_unlock_bh;
173
174 /* cancel pending (previous) address claim */
175 j1939_ecu_timer_cancel(ecu);
176
177 if (j1939_address_is_idle(skcb->addr.sa)) {
178 j1939_ecu_unmap_locked(ecu);
179 goto out_ecu_put;
180 }
181
182 /* save new addr */
183 if (ecu->addr != skcb->addr.sa)
184 j1939_ecu_unmap_locked(ecu);
185 ecu->addr = skcb->addr.sa;
186
187 prev = j1939_ecu_get_by_addr_locked(priv, skcb->addr.sa);
188 if (prev) {
189 if (ecu->name > prev->name) {
190 j1939_ecu_unmap_locked(ecu);
191 j1939_ecu_put(prev);
192 goto out_ecu_put;
193 } else {
194 /* kick prev if less or equal */
195 j1939_ecu_unmap_locked(prev);
196 j1939_ecu_put(prev);
197 }
198 }
199
200 j1939_ecu_timer_start(ecu);
201 out_ecu_put:
202 j1939_ecu_put(ecu);
203 out_unlock_bh:
204 write_unlock_bh(&priv->lock);
205}
206
207void j1939_ac_recv(struct j1939_priv *priv, struct sk_buff *skb)
208{
209 struct j1939_sk_buff_cb *skcb = j1939_skb_to_cb(skb);
210 struct j1939_ecu *ecu;
211
212 /* network mgmt */
213 if (skcb->addr.pgn == J1939_PGN_ADDRESS_CLAIMED) {
214 j1939_ac_process(priv, skb);
215 } else if (j1939_address_is_unicast(skcb->addr.sa)) {
216 /* assign source name */
217 ecu = j1939_ecu_get_by_addr(priv, skcb->addr.sa);
218 if (ecu) {
219 skcb->addr.src_name = ecu->name;
220 j1939_ecu_put(ecu);
221 }
222 }
223
224 /* assign destination name */
225 ecu = j1939_ecu_get_by_addr(priv, skcb->addr.da);
226 if (ecu) {
227 skcb->addr.dst_name = ecu->name;
228 j1939_ecu_put(ecu);
229 }
230}
diff --git a/net/can/j1939/bus.c b/net/can/j1939/bus.c
new file mode 100644
index 000000000000..486687901602
--- /dev/null
+++ b/net/can/j1939/bus.c
@@ -0,0 +1,333 @@
1// SPDX-License-Identifier: GPL-2.0
2// Copyright (c) 2010-2011 EIA Electronics,
3// Kurt Van Dijck <kurt.van.dijck@eia.be>
4// Copyright (c) 2017-2019 Pengutronix,
5// Marc Kleine-Budde <kernel@pengutronix.de>
6// Copyright (c) 2017-2019 Pengutronix,
7// Oleksij Rempel <kernel@pengutronix.de>
8
9/* bus for j1939 remote devices
10 * Since rtnetlink, no real bus is used.
11 */
12
13#include <net/sock.h>
14
15#include "j1939-priv.h"
16
17static void __j1939_ecu_release(struct kref *kref)
18{
19 struct j1939_ecu *ecu = container_of(kref, struct j1939_ecu, kref);
20 struct j1939_priv *priv = ecu->priv;
21
22 list_del(&ecu->list);
23 kfree(ecu);
24 j1939_priv_put(priv);
25}
26
27void j1939_ecu_put(struct j1939_ecu *ecu)
28{
29 kref_put(&ecu->kref, __j1939_ecu_release);
30}
31
32static void j1939_ecu_get(struct j1939_ecu *ecu)
33{
34 kref_get(&ecu->kref);
35}
36
37static bool j1939_ecu_is_mapped_locked(struct j1939_ecu *ecu)
38{
39 struct j1939_priv *priv = ecu->priv;
40
41 lockdep_assert_held(&priv->lock);
42
43 return j1939_ecu_find_by_addr_locked(priv, ecu->addr) == ecu;
44}
45
46/* ECU device interface */
47/* map ECU to a bus address space */
48static void j1939_ecu_map_locked(struct j1939_ecu *ecu)
49{
50 struct j1939_priv *priv = ecu->priv;
51 struct j1939_addr_ent *ent;
52
53 lockdep_assert_held(&priv->lock);
54
55 if (!j1939_address_is_unicast(ecu->addr))
56 return;
57
58 ent = &priv->ents[ecu->addr];
59
60 if (ent->ecu) {
61 netdev_warn(priv->ndev, "Trying to map already mapped ECU, addr: 0x%02x, name: 0x%016llx. Skip it.\n",
62 ecu->addr, ecu->name);
63 return;
64 }
65
66 j1939_ecu_get(ecu);
67 ent->ecu = ecu;
68 ent->nusers += ecu->nusers;
69}
70
71/* unmap ECU from a bus address space */
72void j1939_ecu_unmap_locked(struct j1939_ecu *ecu)
73{
74 struct j1939_priv *priv = ecu->priv;
75 struct j1939_addr_ent *ent;
76
77 lockdep_assert_held(&priv->lock);
78
79 if (!j1939_address_is_unicast(ecu->addr))
80 return;
81
82 if (!j1939_ecu_is_mapped_locked(ecu))
83 return;
84
85 ent = &priv->ents[ecu->addr];
86 ent->ecu = NULL;
87 ent->nusers -= ecu->nusers;
88 j1939_ecu_put(ecu);
89}
90
91void j1939_ecu_unmap(struct j1939_ecu *ecu)
92{
93 write_lock_bh(&ecu->priv->lock);
94 j1939_ecu_unmap_locked(ecu);
95 write_unlock_bh(&ecu->priv->lock);
96}
97
98void j1939_ecu_unmap_all(struct j1939_priv *priv)
99{
100 int i;
101
102 write_lock_bh(&priv->lock);
103 for (i = 0; i < ARRAY_SIZE(priv->ents); i++)
104 if (priv->ents[i].ecu)
105 j1939_ecu_unmap_locked(priv->ents[i].ecu);
106 write_unlock_bh(&priv->lock);
107}
108
109void j1939_ecu_timer_start(struct j1939_ecu *ecu)
110{
111 /* The ECU is held here and released in the
112 * j1939_ecu_timer_handler() or j1939_ecu_timer_cancel().
113 */
114 j1939_ecu_get(ecu);
115
116 /* Schedule timer in 250 msec to commit address change. */
117 hrtimer_start(&ecu->ac_timer, ms_to_ktime(250),
118 HRTIMER_MODE_REL_SOFT);
119}
120
121void j1939_ecu_timer_cancel(struct j1939_ecu *ecu)
122{
123 if (hrtimer_cancel(&ecu->ac_timer))
124 j1939_ecu_put(ecu);
125}
126
127static enum hrtimer_restart j1939_ecu_timer_handler(struct hrtimer *hrtimer)
128{
129 struct j1939_ecu *ecu =
130 container_of(hrtimer, struct j1939_ecu, ac_timer);
131 struct j1939_priv *priv = ecu->priv;
132
133 write_lock_bh(&priv->lock);
134 /* TODO: can we test if ecu->addr is unicast before starting
135 * the timer?
136 */
137 j1939_ecu_map_locked(ecu);
138
139 /* The corresponding j1939_ecu_get() is in
140 * j1939_ecu_timer_start().
141 */
142 j1939_ecu_put(ecu);
143 write_unlock_bh(&priv->lock);
144
145 return HRTIMER_NORESTART;
146}
147
148struct j1939_ecu *j1939_ecu_create_locked(struct j1939_priv *priv, name_t name)
149{
150 struct j1939_ecu *ecu;
151
152 lockdep_assert_held(&priv->lock);
153
154 ecu = kzalloc(sizeof(*ecu), gfp_any());
155 if (!ecu)
156 return ERR_PTR(-ENOMEM);
157 kref_init(&ecu->kref);
158 ecu->addr = J1939_IDLE_ADDR;
159 ecu->name = name;
160
161 hrtimer_init(&ecu->ac_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_SOFT);
162 ecu->ac_timer.function = j1939_ecu_timer_handler;
163 INIT_LIST_HEAD(&ecu->list);
164
165 j1939_priv_get(priv);
166 ecu->priv = priv;
167 list_add_tail(&ecu->list, &priv->ecus);
168
169 return ecu;
170}
171
172struct j1939_ecu *j1939_ecu_find_by_addr_locked(struct j1939_priv *priv,
173 u8 addr)
174{
175 lockdep_assert_held(&priv->lock);
176
177 return priv->ents[addr].ecu;
178}
179
180struct j1939_ecu *j1939_ecu_get_by_addr_locked(struct j1939_priv *priv, u8 addr)
181{
182 struct j1939_ecu *ecu;
183
184 lockdep_assert_held(&priv->lock);
185
186 if (!j1939_address_is_unicast(addr))
187 return NULL;
188
189 ecu = j1939_ecu_find_by_addr_locked(priv, addr);
190 if (ecu)
191 j1939_ecu_get(ecu);
192
193 return ecu;
194}
195
196struct j1939_ecu *j1939_ecu_get_by_addr(struct j1939_priv *priv, u8 addr)
197{
198 struct j1939_ecu *ecu;
199
200 read_lock_bh(&priv->lock);
201 ecu = j1939_ecu_get_by_addr_locked(priv, addr);
202 read_unlock_bh(&priv->lock);
203
204 return ecu;
205}
206
207/* get pointer to ecu without increasing ref counter */
208static struct j1939_ecu *j1939_ecu_find_by_name_locked(struct j1939_priv *priv,
209 name_t name)
210{
211 struct j1939_ecu *ecu;
212
213 lockdep_assert_held(&priv->lock);
214
215 list_for_each_entry(ecu, &priv->ecus, list) {
216 if (ecu->name == name)
217 return ecu;
218 }
219
220 return NULL;
221}
222
223struct j1939_ecu *j1939_ecu_get_by_name_locked(struct j1939_priv *priv,
224 name_t name)
225{
226 struct j1939_ecu *ecu;
227
228 lockdep_assert_held(&priv->lock);
229
230 if (!name)
231 return NULL;
232
233 ecu = j1939_ecu_find_by_name_locked(priv, name);
234 if (ecu)
235 j1939_ecu_get(ecu);
236
237 return ecu;
238}
239
240struct j1939_ecu *j1939_ecu_get_by_name(struct j1939_priv *priv, name_t name)
241{
242 struct j1939_ecu *ecu;
243
244 read_lock_bh(&priv->lock);
245 ecu = j1939_ecu_get_by_name_locked(priv, name);
246 read_unlock_bh(&priv->lock);
247
248 return ecu;
249}
250
251u8 j1939_name_to_addr(struct j1939_priv *priv, name_t name)
252{
253 struct j1939_ecu *ecu;
254 int addr = J1939_IDLE_ADDR;
255
256 if (!name)
257 return J1939_NO_ADDR;
258
259 read_lock_bh(&priv->lock);
260 ecu = j1939_ecu_find_by_name_locked(priv, name);
261 if (ecu && j1939_ecu_is_mapped_locked(ecu))
262 /* ecu's SA is registered */
263 addr = ecu->addr;
264
265 read_unlock_bh(&priv->lock);
266
267 return addr;
268}
269
270/* TX addr/name accounting
271 * Transport protocol needs to know if a SA is local or not
272 * These functions originate from userspace manipulating sockets,
273 * so locking is straigforward
274 */
275
276int j1939_local_ecu_get(struct j1939_priv *priv, name_t name, u8 sa)
277{
278 struct j1939_ecu *ecu;
279 int err = 0;
280
281 write_lock_bh(&priv->lock);
282
283 if (j1939_address_is_unicast(sa))
284 priv->ents[sa].nusers++;
285
286 if (!name)
287 goto done;
288
289 ecu = j1939_ecu_get_by_name_locked(priv, name);
290 if (!ecu)
291 ecu = j1939_ecu_create_locked(priv, name);
292 err = PTR_ERR_OR_ZERO(ecu);
293 if (err)
294 goto done;
295
296 ecu->nusers++;
297 /* TODO: do we care if ecu->addr != sa? */
298 if (j1939_ecu_is_mapped_locked(ecu))
299 /* ecu's sa is active already */
300 priv->ents[ecu->addr].nusers++;
301
302 done:
303 write_unlock_bh(&priv->lock);
304
305 return err;
306}
307
308void j1939_local_ecu_put(struct j1939_priv *priv, name_t name, u8 sa)
309{
310 struct j1939_ecu *ecu;
311
312 write_lock_bh(&priv->lock);
313
314 if (j1939_address_is_unicast(sa))
315 priv->ents[sa].nusers--;
316
317 if (!name)
318 goto done;
319
320 ecu = j1939_ecu_find_by_name_locked(priv, name);
321 if (WARN_ON_ONCE(!ecu))
322 goto done;
323
324 ecu->nusers--;
325 /* TODO: do we care if ecu->addr != sa? */
326 if (j1939_ecu_is_mapped_locked(ecu))
327 /* ecu's sa is active already */
328 priv->ents[ecu->addr].nusers--;
329 j1939_ecu_put(ecu);
330
331 done:
332 write_unlock_bh(&priv->lock);
333}
diff --git a/net/can/j1939/j1939-priv.h b/net/can/j1939/j1939-priv.h
new file mode 100644
index 000000000000..12369b604ce9
--- /dev/null
+++ b/net/can/j1939/j1939-priv.h
@@ -0,0 +1,338 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2// Copyright (c) 2010-2011 EIA Electronics,
3// Kurt Van Dijck <kurt.van.dijck@eia.be>
4// Copyright (c) 2017-2019 Pengutronix,
5// Marc Kleine-Budde <kernel@pengutronix.de>
6// Copyright (c) 2017-2019 Pengutronix,
7// Oleksij Rempel <kernel@pengutronix.de>
8
9#ifndef _J1939_PRIV_H_
10#define _J1939_PRIV_H_
11
12#include <linux/can/j1939.h>
13#include <net/sock.h>
14
15/* Timeout to receive the abort signal over loop back. In case CAN
16 * bus is open, the timeout should be triggered.
17 */
18#define J1939_XTP_ABORT_TIMEOUT_MS 500
19#define J1939_SIMPLE_ECHO_TIMEOUT_MS (10 * 1000)
20
21struct j1939_session;
22enum j1939_sk_errqueue_type {
23 J1939_ERRQUEUE_ACK,
24 J1939_ERRQUEUE_SCHED,
25 J1939_ERRQUEUE_ABORT,
26};
27
28/* j1939 devices */
29struct j1939_ecu {
30 struct list_head list;
31 name_t name;
32 u8 addr;
33
34 /* indicates that this ecu successfully claimed @sa as its address */
35 struct hrtimer ac_timer;
36 struct kref kref;
37 struct j1939_priv *priv;
38
39 /* count users, to help transport protocol decide for interaction */
40 int nusers;
41};
42
43struct j1939_priv {
44 struct list_head ecus;
45 /* local list entry in priv
46 * These allow irq (& softirq) context lookups on j1939 devices
47 * This approach (separate lists) is done as the other 2 alternatives
48 * are not easier or even wrong
49 * 1) using the pure kobject methods involves mutexes, which are not
50 * allowed in irq context.
51 * 2) duplicating data structures would require a lot of synchronization
52 * code
53 * usage:
54 */
55
56 /* segments need a lock to protect the above list */
57 rwlock_t lock;
58
59 struct net_device *ndev;
60
61 /* list of 256 ecu ptrs, that cache the claimed addresses.
62 * also protected by the above lock
63 */
64 struct j1939_addr_ent {
65 struct j1939_ecu *ecu;
66 /* count users, to help transport protocol */
67 int nusers;
68 } ents[256];
69
70 struct kref kref;
71
72 /* List of active sessions to prevent start of conflicting
73 * one.
74 *
75 * Do not start two sessions of same type, addresses and
76 * direction.
77 */
78 struct list_head active_session_list;
79
80 /* protects active_session_list */
81 spinlock_t active_session_list_lock;
82
83 unsigned int tp_max_packet_size;
84
85 /* lock for j1939_socks list */
86 spinlock_t j1939_socks_lock;
87 struct list_head j1939_socks;
88
89 struct kref rx_kref;
90};
91
92void j1939_ecu_put(struct j1939_ecu *ecu);
93
94/* keep the cache of what is local */
95int j1939_local_ecu_get(struct j1939_priv *priv, name_t name, u8 sa);
96void j1939_local_ecu_put(struct j1939_priv *priv, name_t name, u8 sa);
97
98static inline bool j1939_address_is_unicast(u8 addr)
99{
100 return addr <= J1939_MAX_UNICAST_ADDR;
101}
102
103static inline bool j1939_address_is_idle(u8 addr)
104{
105 return addr == J1939_IDLE_ADDR;
106}
107
108static inline bool j1939_address_is_valid(u8 addr)
109{
110 return addr != J1939_NO_ADDR;
111}
112
113static inline bool j1939_pgn_is_pdu1(pgn_t pgn)
114{
115 /* ignore dp & res bits for this */
116 return (pgn & 0xff00) < 0xf000;
117}
118
119/* utility to correctly unmap an ECU */
120void j1939_ecu_unmap_locked(struct j1939_ecu *ecu);
121void j1939_ecu_unmap(struct j1939_ecu *ecu);
122
123u8 j1939_name_to_addr(struct j1939_priv *priv, name_t name);
124struct j1939_ecu *j1939_ecu_find_by_addr_locked(struct j1939_priv *priv,
125 u8 addr);
126struct j1939_ecu *j1939_ecu_get_by_addr(struct j1939_priv *priv, u8 addr);
127struct j1939_ecu *j1939_ecu_get_by_addr_locked(struct j1939_priv *priv,
128 u8 addr);
129struct j1939_ecu *j1939_ecu_get_by_name(struct j1939_priv *priv, name_t name);
130struct j1939_ecu *j1939_ecu_get_by_name_locked(struct j1939_priv *priv,
131 name_t name);
132
133enum j1939_transfer_type {
134 J1939_TP,
135 J1939_ETP,
136 J1939_SIMPLE,
137};
138
139struct j1939_addr {
140 name_t src_name;
141 name_t dst_name;
142 pgn_t pgn;
143
144 u8 sa;
145 u8 da;
146
147 u8 type;
148};
149
150/* control buffer of the sk_buff */
151struct j1939_sk_buff_cb {
152 /* Offset in bytes within one ETP session */
153 u32 offset;
154
155 /* for tx, MSG_SYN will be used to sync on sockets */
156 u32 msg_flags;
157 u32 tskey;
158
159 struct j1939_addr addr;
160
161 /* Flags for quick lookups during skb processing.
162 * These are set in the receive path only.
163 */
164#define J1939_ECU_LOCAL_SRC BIT(0)
165#define J1939_ECU_LOCAL_DST BIT(1)
166 u8 flags;
167
168 priority_t priority;
169};
170
171static inline
172struct j1939_sk_buff_cb *j1939_skb_to_cb(const struct sk_buff *skb)
173{
174 BUILD_BUG_ON(sizeof(struct j1939_sk_buff_cb) > sizeof(skb->cb));
175
176 return (struct j1939_sk_buff_cb *)skb->cb;
177}
178
179int j1939_send_one(struct j1939_priv *priv, struct sk_buff *skb);
180void j1939_sk_recv(struct j1939_priv *priv, struct sk_buff *skb);
181bool j1939_sk_recv_match(struct j1939_priv *priv,
182 struct j1939_sk_buff_cb *skcb);
183void j1939_sk_send_loop_abort(struct sock *sk, int err);
184void j1939_sk_errqueue(struct j1939_session *session,
185 enum j1939_sk_errqueue_type type);
186void j1939_sk_queue_activate_next(struct j1939_session *session);
187
188/* stack entries */
189struct j1939_session *j1939_tp_send(struct j1939_priv *priv,
190 struct sk_buff *skb, size_t size);
191int j1939_tp_recv(struct j1939_priv *priv, struct sk_buff *skb);
192int j1939_ac_fixup(struct j1939_priv *priv, struct sk_buff *skb);
193void j1939_ac_recv(struct j1939_priv *priv, struct sk_buff *skb);
194void j1939_simple_recv(struct j1939_priv *priv, struct sk_buff *skb);
195
196/* network management */
197struct j1939_ecu *j1939_ecu_create_locked(struct j1939_priv *priv, name_t name);
198
199void j1939_ecu_timer_start(struct j1939_ecu *ecu);
200void j1939_ecu_timer_cancel(struct j1939_ecu *ecu);
201void j1939_ecu_unmap_all(struct j1939_priv *priv);
202
203struct j1939_priv *j1939_netdev_start(struct net_device *ndev);
204void j1939_netdev_stop(struct j1939_priv *priv);
205
206void j1939_priv_put(struct j1939_priv *priv);
207void j1939_priv_get(struct j1939_priv *priv);
208
209/* notify/alert all j1939 sockets bound to ifindex */
210void j1939_sk_netdev_event_netdown(struct j1939_priv *priv);
211int j1939_cancel_active_session(struct j1939_priv *priv, struct sock *sk);
212void j1939_tp_init(struct j1939_priv *priv);
213
214/* decrement pending skb for a j1939 socket */
215void j1939_sock_pending_del(struct sock *sk);
216
217enum j1939_session_state {
218 J1939_SESSION_NEW,
219 J1939_SESSION_ACTIVE,
220 /* waiting for abort signal on the bus */
221 J1939_SESSION_WAITING_ABORT,
222 J1939_SESSION_ACTIVE_MAX,
223 J1939_SESSION_DONE,
224};
225
226struct j1939_session {
227 struct j1939_priv *priv;
228 struct list_head active_session_list_entry;
229 struct list_head sk_session_queue_entry;
230 struct kref kref;
231 struct sock *sk;
232
233 /* ifindex, src, dst, pgn define the session block
234 * the are _never_ modified after insertion in the list
235 * this decreases locking problems a _lot_
236 */
237 struct j1939_sk_buff_cb skcb;
238 struct sk_buff_head skb_queue;
239
240 /* all tx related stuff (last_txcmd, pkt.tx)
241 * is protected (modified only) with the txtimer hrtimer
242 * 'total' & 'block' are never changed,
243 * last_cmd, last & block are protected by ->lock
244 * this means that the tx may run after cts is received that should
245 * have stopped tx, but this time discrepancy is never avoided anyhow
246 */
247 u8 last_cmd, last_txcmd;
248 bool transmission;
249 bool extd;
250 /* Total message size, number of bytes */
251 unsigned int total_message_size;
252 /* Total number of bytes queue from socket to the session */
253 unsigned int total_queued_size;
254 unsigned int tx_retry;
255
256 int err;
257 u32 tskey;
258 enum j1939_session_state state;
259
260 /* Packets counters for a (extended) transfer session. The packet is
261 * maximal of 7 bytes.
262 */
263 struct {
264 /* total - total number of packets for this session */
265 unsigned int total;
266 /* last - last packet of a transfer block after which
267 * responder should send ETP.CM_CTS and originator
268 * ETP.CM_DPO
269 */
270 unsigned int last;
271 /* tx - number of packets send by originator node.
272 * this counter can be set back if responder node
273 * didn't received all packets send by originator.
274 */
275 unsigned int tx;
276 unsigned int tx_acked;
277 /* rx - number of packets received */
278 unsigned int rx;
279 /* block - amount of packets expected in one block */
280 unsigned int block;
281 /* dpo - ETP.CM_DPO, Data Packet Offset */
282 unsigned int dpo;
283 } pkt;
284 struct hrtimer txtimer, rxtimer;
285};
286
287struct j1939_sock {
288 struct sock sk; /* must be first to skip with memset */
289 struct j1939_priv *priv;
290 struct list_head list;
291
292#define J1939_SOCK_BOUND BIT(0)
293#define J1939_SOCK_CONNECTED BIT(1)
294#define J1939_SOCK_PROMISC BIT(2)
295#define J1939_SOCK_ERRQUEUE BIT(3)
296 int state;
297
298 int ifindex;
299 struct j1939_addr addr;
300 struct j1939_filter *filters;
301 int nfilters;
302 pgn_t pgn_rx_filter;
303
304 /* j1939 may emit equal PGN (!= equal CAN-id's) out of order
305 * when transport protocol comes in.
306 * To allow emitting in order, keep a 'pending' nr. of packets
307 */
308 atomic_t skb_pending;
309 wait_queue_head_t waitq;
310
311 /* lock for the sk_session_queue list */
312 spinlock_t sk_session_queue_lock;
313 struct list_head sk_session_queue;
314};
315
316static inline struct j1939_sock *j1939_sk(const struct sock *sk)
317{
318 return container_of(sk, struct j1939_sock, sk);
319}
320
321void j1939_session_get(struct j1939_session *session);
322void j1939_session_put(struct j1939_session *session);
323void j1939_session_skb_queue(struct j1939_session *session,
324 struct sk_buff *skb);
325int j1939_session_activate(struct j1939_session *session);
326void j1939_tp_schedule_txtimer(struct j1939_session *session, int msec);
327void j1939_session_timers_cancel(struct j1939_session *session);
328
329#define J1939_MAX_TP_PACKET_SIZE (7 * 0xff)
330#define J1939_MAX_ETP_PACKET_SIZE (7 * 0x00ffffff)
331
332#define J1939_REGULAR 0
333#define J1939_EXTENDED 1
334
335/* CAN protocol */
336extern const struct can_proto j1939_can_proto;
337
338#endif /* _J1939_PRIV_H_ */
diff --git a/net/can/j1939/main.c b/net/can/j1939/main.c
new file mode 100644
index 000000000000..def2f813ffce
--- /dev/null
+++ b/net/can/j1939/main.c
@@ -0,0 +1,403 @@
1// SPDX-License-Identifier: GPL-2.0
2// Copyright (c) 2010-2011 EIA Electronics,
3// Pieter Beyens <pieter.beyens@eia.be>
4// Copyright (c) 2010-2011 EIA Electronics,
5// Kurt Van Dijck <kurt.van.dijck@eia.be>
6// Copyright (c) 2018 Protonic,
7// Robin van der Gracht <robin@protonic.nl>
8// Copyright (c) 2017-2019 Pengutronix,
9// Marc Kleine-Budde <kernel@pengutronix.de>
10// Copyright (c) 2017-2019 Pengutronix,
11// Oleksij Rempel <kernel@pengutronix.de>
12
13/* Core of can-j1939 that links j1939 to CAN. */
14
15#include <linux/can/can-ml.h>
16#include <linux/can/core.h>
17#include <linux/can/skb.h>
18#include <linux/if_arp.h>
19#include <linux/module.h>
20
21#include "j1939-priv.h"
22
23MODULE_DESCRIPTION("PF_CAN SAE J1939");
24MODULE_LICENSE("GPL v2");
25MODULE_AUTHOR("EIA Electronics (Kurt Van Dijck & Pieter Beyens)");
26MODULE_ALIAS("can-proto-" __stringify(CAN_J1939));
27
28/* LOWLEVEL CAN interface */
29
30/* CAN_HDR: #bytes before can_frame data part */
31#define J1939_CAN_HDR (offsetof(struct can_frame, data))
32
33/* CAN_FTR: #bytes beyond data part */
34#define J1939_CAN_FTR (sizeof(struct can_frame) - J1939_CAN_HDR - \
35 sizeof(((struct can_frame *)0)->data))
36
37/* lowest layer */
38static void j1939_can_recv(struct sk_buff *iskb, void *data)
39{
40 struct j1939_priv *priv = data;
41 struct sk_buff *skb;
42 struct j1939_sk_buff_cb *skcb, *iskcb;
43 struct can_frame *cf;
44
45 /* create a copy of the skb
46 * j1939 only delivers the real data bytes,
47 * the header goes into sockaddr.
48 * j1939 may not touch the incoming skb in such way
49 */
50 skb = skb_clone(iskb, GFP_ATOMIC);
51 if (!skb)
52 return;
53
54 can_skb_set_owner(skb, iskb->sk);
55
56 /* get a pointer to the header of the skb
57 * the skb payload (pointer) is moved, so that the next skb_data
58 * returns the actual payload
59 */
60 cf = (void *)skb->data;
61 skb_pull(skb, J1939_CAN_HDR);
62
63 /* fix length, set to dlc, with 8 maximum */
64 skb_trim(skb, min_t(uint8_t, cf->can_dlc, 8));
65
66 /* set addr */
67 skcb = j1939_skb_to_cb(skb);
68 memset(skcb, 0, sizeof(*skcb));
69
70 iskcb = j1939_skb_to_cb(iskb);
71 skcb->tskey = iskcb->tskey;
72 skcb->priority = (cf->can_id >> 26) & 0x7;
73 skcb->addr.sa = cf->can_id;
74 skcb->addr.pgn = (cf->can_id >> 8) & J1939_PGN_MAX;
75 /* set default message type */
76 skcb->addr.type = J1939_TP;
77 if (j1939_pgn_is_pdu1(skcb->addr.pgn)) {
78 /* Type 1: with destination address */
79 skcb->addr.da = skcb->addr.pgn;
80 /* normalize pgn: strip dst address */
81 skcb->addr.pgn &= 0x3ff00;
82 } else {
83 /* set broadcast address */
84 skcb->addr.da = J1939_NO_ADDR;
85 }
86
87 /* update localflags */
88 read_lock_bh(&priv->lock);
89 if (j1939_address_is_unicast(skcb->addr.sa) &&
90 priv->ents[skcb->addr.sa].nusers)
91 skcb->flags |= J1939_ECU_LOCAL_SRC;
92 if (j1939_address_is_unicast(skcb->addr.da) &&
93 priv->ents[skcb->addr.da].nusers)
94 skcb->flags |= J1939_ECU_LOCAL_DST;
95 read_unlock_bh(&priv->lock);
96
97 /* deliver into the j1939 stack ... */
98 j1939_ac_recv(priv, skb);
99
100 if (j1939_tp_recv(priv, skb))
101 /* this means the transport layer processed the message */
102 goto done;
103
104 j1939_simple_recv(priv, skb);
105 j1939_sk_recv(priv, skb);
106 done:
107 kfree_skb(skb);
108}
109
110/* NETDEV MANAGEMENT */
111
112/* values for can_rx_(un)register */
113#define J1939_CAN_ID CAN_EFF_FLAG
114#define J1939_CAN_MASK (CAN_EFF_FLAG | CAN_RTR_FLAG)
115
116static DEFINE_SPINLOCK(j1939_netdev_lock);
117
118static struct j1939_priv *j1939_priv_create(struct net_device *ndev)
119{
120 struct j1939_priv *priv;
121
122 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
123 if (!priv)
124 return NULL;
125
126 rwlock_init(&priv->lock);
127 INIT_LIST_HEAD(&priv->ecus);
128 priv->ndev = ndev;
129 kref_init(&priv->kref);
130 kref_init(&priv->rx_kref);
131 dev_hold(ndev);
132
133 netdev_dbg(priv->ndev, "%s : 0x%p\n", __func__, priv);
134
135 return priv;
136}
137
138static inline void j1939_priv_set(struct net_device *ndev,
139 struct j1939_priv *priv)
140{
141 struct can_ml_priv *can_ml_priv = ndev->ml_priv;
142
143 can_ml_priv->j1939_priv = priv;
144}
145
146static void __j1939_priv_release(struct kref *kref)
147{
148 struct j1939_priv *priv = container_of(kref, struct j1939_priv, kref);
149 struct net_device *ndev = priv->ndev;
150
151 netdev_dbg(priv->ndev, "%s: 0x%p\n", __func__, priv);
152
153 dev_put(ndev);
154 kfree(priv);
155}
156
157void j1939_priv_put(struct j1939_priv *priv)
158{
159 kref_put(&priv->kref, __j1939_priv_release);
160}
161
162void j1939_priv_get(struct j1939_priv *priv)
163{
164 kref_get(&priv->kref);
165}
166
167static int j1939_can_rx_register(struct j1939_priv *priv)
168{
169 struct net_device *ndev = priv->ndev;
170 int ret;
171
172 j1939_priv_get(priv);
173 ret = can_rx_register(dev_net(ndev), ndev, J1939_CAN_ID, J1939_CAN_MASK,
174 j1939_can_recv, priv, "j1939", NULL);
175 if (ret < 0) {
176 j1939_priv_put(priv);
177 return ret;
178 }
179
180 return 0;
181}
182
183static void j1939_can_rx_unregister(struct j1939_priv *priv)
184{
185 struct net_device *ndev = priv->ndev;
186
187 can_rx_unregister(dev_net(ndev), ndev, J1939_CAN_ID, J1939_CAN_MASK,
188 j1939_can_recv, priv);
189
190 j1939_priv_put(priv);
191}
192
193static void __j1939_rx_release(struct kref *kref)
194 __releases(&j1939_netdev_lock)
195{
196 struct j1939_priv *priv = container_of(kref, struct j1939_priv,
197 rx_kref);
198
199 j1939_can_rx_unregister(priv);
200 j1939_ecu_unmap_all(priv);
201 j1939_priv_set(priv->ndev, NULL);
202 spin_unlock(&j1939_netdev_lock);
203}
204
205/* get pointer to priv without increasing ref counter */
206static inline struct j1939_priv *j1939_ndev_to_priv(struct net_device *ndev)
207{
208 struct can_ml_priv *can_ml_priv = ndev->ml_priv;
209
210 return can_ml_priv->j1939_priv;
211}
212
213static struct j1939_priv *j1939_priv_get_by_ndev_locked(struct net_device *ndev)
214{
215 struct j1939_priv *priv;
216
217 lockdep_assert_held(&j1939_netdev_lock);
218
219 if (ndev->type != ARPHRD_CAN)
220 return NULL;
221
222 priv = j1939_ndev_to_priv(ndev);
223 if (priv)
224 j1939_priv_get(priv);
225
226 return priv;
227}
228
229static struct j1939_priv *j1939_priv_get_by_ndev(struct net_device *ndev)
230{
231 struct j1939_priv *priv;
232
233 spin_lock(&j1939_netdev_lock);
234 priv = j1939_priv_get_by_ndev_locked(ndev);
235 spin_unlock(&j1939_netdev_lock);
236
237 return priv;
238}
239
240struct j1939_priv *j1939_netdev_start(struct net_device *ndev)
241{
242 struct j1939_priv *priv, *priv_new;
243 int ret;
244
245 priv = j1939_priv_get_by_ndev(ndev);
246 if (priv) {
247 kref_get(&priv->rx_kref);
248 return priv;
249 }
250
251 priv = j1939_priv_create(ndev);
252 if (!priv)
253 return ERR_PTR(-ENOMEM);
254
255 j1939_tp_init(priv);
256 spin_lock_init(&priv->j1939_socks_lock);
257 INIT_LIST_HEAD(&priv->j1939_socks);
258
259 spin_lock(&j1939_netdev_lock);
260 priv_new = j1939_priv_get_by_ndev_locked(ndev);
261 if (priv_new) {
262 /* Someone was faster than us, use their priv and roll
263 * back our's.
264 */
265 spin_unlock(&j1939_netdev_lock);
266 dev_put(ndev);
267 kfree(priv);
268 kref_get(&priv_new->rx_kref);
269 return priv_new;
270 }
271 j1939_priv_set(ndev, priv);
272 spin_unlock(&j1939_netdev_lock);
273
274 ret = j1939_can_rx_register(priv);
275 if (ret < 0)
276 goto out_priv_put;
277
278 return priv;
279
280 out_priv_put:
281 j1939_priv_set(ndev, NULL);
282 dev_put(ndev);
283 kfree(priv);
284
285 return ERR_PTR(ret);
286}
287
288void j1939_netdev_stop(struct j1939_priv *priv)
289{
290 kref_put_lock(&priv->rx_kref, __j1939_rx_release, &j1939_netdev_lock);
291 j1939_priv_put(priv);
292}
293
294int j1939_send_one(struct j1939_priv *priv, struct sk_buff *skb)
295{
296 int ret, dlc;
297 canid_t canid;
298 struct j1939_sk_buff_cb *skcb = j1939_skb_to_cb(skb);
299 struct can_frame *cf;
300
301 /* apply sanity checks */
302 if (j1939_pgn_is_pdu1(skcb->addr.pgn))
303 skcb->addr.pgn &= J1939_PGN_PDU1_MAX;
304 else
305 skcb->addr.pgn &= J1939_PGN_MAX;
306
307 if (skcb->priority > 7)
308 skcb->priority = 6;
309
310 ret = j1939_ac_fixup(priv, skb);
311 if (unlikely(ret))
312 goto failed;
313 dlc = skb->len;
314
315 /* re-claim the CAN_HDR from the SKB */
316 cf = skb_push(skb, J1939_CAN_HDR);
317
318 /* make it a full can frame again */
319 skb_put(skb, J1939_CAN_FTR + (8 - dlc));
320
321 canid = CAN_EFF_FLAG |
322 (skcb->priority << 26) |
323 (skcb->addr.pgn << 8) |
324 skcb->addr.sa;
325 if (j1939_pgn_is_pdu1(skcb->addr.pgn))
326 canid |= skcb->addr.da << 8;
327
328 cf->can_id = canid;
329 cf->can_dlc = dlc;
330
331 return can_send(skb, 1);
332
333 failed:
334 kfree_skb(skb);
335 return ret;
336}
337
338static int j1939_netdev_notify(struct notifier_block *nb,
339 unsigned long msg, void *data)
340{
341 struct net_device *ndev = netdev_notifier_info_to_dev(data);
342 struct j1939_priv *priv;
343
344 priv = j1939_priv_get_by_ndev(ndev);
345 if (!priv)
346 goto notify_done;
347
348 if (ndev->type != ARPHRD_CAN)
349 goto notify_put;
350
351 switch (msg) {
352 case NETDEV_DOWN:
353 j1939_cancel_active_session(priv, NULL);
354 j1939_sk_netdev_event_netdown(priv);
355 j1939_ecu_unmap_all(priv);
356 break;
357 }
358
359notify_put:
360 j1939_priv_put(priv);
361
362notify_done:
363 return NOTIFY_DONE;
364}
365
366static struct notifier_block j1939_netdev_notifier = {
367 .notifier_call = j1939_netdev_notify,
368};
369
370/* MODULE interface */
371static __init int j1939_module_init(void)
372{
373 int ret;
374
375 pr_info("can: SAE J1939\n");
376
377 ret = register_netdevice_notifier(&j1939_netdev_notifier);
378 if (ret)
379 goto fail_notifier;
380
381 ret = can_proto_register(&j1939_can_proto);
382 if (ret < 0) {
383 pr_err("can: registration of j1939 protocol failed\n");
384 goto fail_sk;
385 }
386
387 return 0;
388
389 fail_sk:
390 unregister_netdevice_notifier(&j1939_netdev_notifier);
391 fail_notifier:
392 return ret;
393}
394
395static __exit void j1939_module_exit(void)
396{
397 can_proto_unregister(&j1939_can_proto);
398
399 unregister_netdevice_notifier(&j1939_netdev_notifier);
400}
401
402module_init(j1939_module_init);
403module_exit(j1939_module_exit);
diff --git a/net/can/j1939/socket.c b/net/can/j1939/socket.c
new file mode 100644
index 000000000000..37c1040bcb9c
--- /dev/null
+++ b/net/can/j1939/socket.c
@@ -0,0 +1,1160 @@
1// SPDX-License-Identifier: GPL-2.0
2// Copyright (c) 2010-2011 EIA Electronics,
3// Pieter Beyens <pieter.beyens@eia.be>
4// Copyright (c) 2010-2011 EIA Electronics,
5// Kurt Van Dijck <kurt.van.dijck@eia.be>
6// Copyright (c) 2018 Protonic,
7// Robin van der Gracht <robin@protonic.nl>
8// Copyright (c) 2017-2019 Pengutronix,
9// Marc Kleine-Budde <kernel@pengutronix.de>
10// Copyright (c) 2017-2019 Pengutronix,
11// Oleksij Rempel <kernel@pengutronix.de>
12
13#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14
15#include <linux/can/core.h>
16#include <linux/can/skb.h>
17#include <linux/errqueue.h>
18#include <linux/if_arp.h>
19
20#include "j1939-priv.h"
21
22#define J1939_MIN_NAMELEN CAN_REQUIRED_SIZE(struct sockaddr_can, can_addr.j1939)
23
24/* conversion function between struct sock::sk_priority from linux and
25 * j1939 priority field
26 */
27static inline priority_t j1939_prio(u32 sk_priority)
28{
29 sk_priority = min(sk_priority, 7U);
30
31 return 7 - sk_priority;
32}
33
34static inline u32 j1939_to_sk_priority(priority_t prio)
35{
36 return 7 - prio;
37}
38
39/* function to see if pgn is to be evaluated */
40static inline bool j1939_pgn_is_valid(pgn_t pgn)
41{
42 return pgn <= J1939_PGN_MAX;
43}
44
45/* test function to avoid non-zero DA placeholder for pdu1 pgn's */
46static inline bool j1939_pgn_is_clean_pdu(pgn_t pgn)
47{
48 if (j1939_pgn_is_pdu1(pgn))
49 return !(pgn & 0xff);
50 else
51 return true;
52}
53
54static inline void j1939_sock_pending_add(struct sock *sk)
55{
56 struct j1939_sock *jsk = j1939_sk(sk);
57
58 atomic_inc(&jsk->skb_pending);
59}
60
61static int j1939_sock_pending_get(struct sock *sk)
62{
63 struct j1939_sock *jsk = j1939_sk(sk);
64
65 return atomic_read(&jsk->skb_pending);
66}
67
68void j1939_sock_pending_del(struct sock *sk)
69{
70 struct j1939_sock *jsk = j1939_sk(sk);
71
72 /* atomic_dec_return returns the new value */
73 if (!atomic_dec_return(&jsk->skb_pending))
74 wake_up(&jsk->waitq); /* no pending SKB's */
75}
76
77static void j1939_jsk_add(struct j1939_priv *priv, struct j1939_sock *jsk)
78{
79 jsk->state |= J1939_SOCK_BOUND;
80 j1939_priv_get(priv);
81 jsk->priv = priv;
82
83 spin_lock_bh(&priv->j1939_socks_lock);
84 list_add_tail(&jsk->list, &priv->j1939_socks);
85 spin_unlock_bh(&priv->j1939_socks_lock);
86}
87
88static void j1939_jsk_del(struct j1939_priv *priv, struct j1939_sock *jsk)
89{
90 spin_lock_bh(&priv->j1939_socks_lock);
91 list_del_init(&jsk->list);
92 spin_unlock_bh(&priv->j1939_socks_lock);
93
94 jsk->priv = NULL;
95 j1939_priv_put(priv);
96 jsk->state &= ~J1939_SOCK_BOUND;
97}
98
99static bool j1939_sk_queue_session(struct j1939_session *session)
100{
101 struct j1939_sock *jsk = j1939_sk(session->sk);
102 bool empty;
103
104 spin_lock_bh(&jsk->sk_session_queue_lock);
105 empty = list_empty(&jsk->sk_session_queue);
106 j1939_session_get(session);
107 list_add_tail(&session->sk_session_queue_entry, &jsk->sk_session_queue);
108 spin_unlock_bh(&jsk->sk_session_queue_lock);
109 j1939_sock_pending_add(&jsk->sk);
110
111 return empty;
112}
113
114static struct
115j1939_session *j1939_sk_get_incomplete_session(struct j1939_sock *jsk)
116{
117 struct j1939_session *session = NULL;
118
119 spin_lock_bh(&jsk->sk_session_queue_lock);
120 if (!list_empty(&jsk->sk_session_queue)) {
121 session = list_last_entry(&jsk->sk_session_queue,
122 struct j1939_session,
123 sk_session_queue_entry);
124 if (session->total_queued_size == session->total_message_size)
125 session = NULL;
126 else
127 j1939_session_get(session);
128 }
129 spin_unlock_bh(&jsk->sk_session_queue_lock);
130
131 return session;
132}
133
134static void j1939_sk_queue_drop_all(struct j1939_priv *priv,
135 struct j1939_sock *jsk, int err)
136{
137 struct j1939_session *session, *tmp;
138
139 netdev_dbg(priv->ndev, "%s: err: %i\n", __func__, err);
140 spin_lock_bh(&jsk->sk_session_queue_lock);
141 list_for_each_entry_safe(session, tmp, &jsk->sk_session_queue,
142 sk_session_queue_entry) {
143 list_del_init(&session->sk_session_queue_entry);
144 session->err = err;
145 j1939_session_put(session);
146 }
147 spin_unlock_bh(&jsk->sk_session_queue_lock);
148}
149
150static void j1939_sk_queue_activate_next_locked(struct j1939_session *session)
151{
152 struct j1939_sock *jsk;
153 struct j1939_session *first;
154 int err;
155
156 /* RX-Session don't have a socket (yet) */
157 if (!session->sk)
158 return;
159
160 jsk = j1939_sk(session->sk);
161 lockdep_assert_held(&jsk->sk_session_queue_lock);
162
163 err = session->err;
164
165 first = list_first_entry_or_null(&jsk->sk_session_queue,
166 struct j1939_session,
167 sk_session_queue_entry);
168
169 /* Some else has already activated the next session */
170 if (first != session)
171 return;
172
173activate_next:
174 list_del_init(&first->sk_session_queue_entry);
175 j1939_session_put(first);
176 first = list_first_entry_or_null(&jsk->sk_session_queue,
177 struct j1939_session,
178 sk_session_queue_entry);
179 if (!first)
180 return;
181
182 if (WARN_ON_ONCE(j1939_session_activate(first))) {
183 first->err = -EBUSY;
184 goto activate_next;
185 } else {
186 /* Give receiver some time (arbitrary chosen) to recover */
187 int time_ms = 0;
188
189 if (err)
190 time_ms = 10 + prandom_u32_max(16);
191
192 j1939_tp_schedule_txtimer(first, time_ms);
193 }
194}
195
196void j1939_sk_queue_activate_next(struct j1939_session *session)
197{
198 struct j1939_sock *jsk;
199
200 if (!session->sk)
201 return;
202
203 jsk = j1939_sk(session->sk);
204
205 spin_lock_bh(&jsk->sk_session_queue_lock);
206 j1939_sk_queue_activate_next_locked(session);
207 spin_unlock_bh(&jsk->sk_session_queue_lock);
208}
209
210static bool j1939_sk_match_dst(struct j1939_sock *jsk,
211 const struct j1939_sk_buff_cb *skcb)
212{
213 if ((jsk->state & J1939_SOCK_PROMISC))
214 return true;
215
216 /* Destination address filter */
217 if (jsk->addr.src_name && skcb->addr.dst_name) {
218 if (jsk->addr.src_name != skcb->addr.dst_name)
219 return false;
220 } else {
221 /* receive (all sockets) if
222 * - all packages that match our bind() address
223 * - all broadcast on a socket if SO_BROADCAST
224 * is set
225 */
226 if (j1939_address_is_unicast(skcb->addr.da)) {
227 if (jsk->addr.sa != skcb->addr.da)
228 return false;
229 } else if (!sock_flag(&jsk->sk, SOCK_BROADCAST)) {
230 /* receiving broadcast without SO_BROADCAST
231 * flag is not allowed
232 */
233 return false;
234 }
235 }
236
237 /* Source address filter */
238 if (jsk->state & J1939_SOCK_CONNECTED) {
239 /* receive (all sockets) if
240 * - all packages that match our connect() name or address
241 */
242 if (jsk->addr.dst_name && skcb->addr.src_name) {
243 if (jsk->addr.dst_name != skcb->addr.src_name)
244 return false;
245 } else {
246 if (jsk->addr.da != skcb->addr.sa)
247 return false;
248 }
249 }
250
251 /* PGN filter */
252 if (j1939_pgn_is_valid(jsk->pgn_rx_filter) &&
253 jsk->pgn_rx_filter != skcb->addr.pgn)
254 return false;
255
256 return true;
257}
258
259/* matches skb control buffer (addr) with a j1939 filter */
260static bool j1939_sk_match_filter(struct j1939_sock *jsk,
261 const struct j1939_sk_buff_cb *skcb)
262{
263 const struct j1939_filter *f = jsk->filters;
264 int nfilter = jsk->nfilters;
265
266 if (!nfilter)
267 /* receive all when no filters are assigned */
268 return true;
269
270 for (; nfilter; ++f, --nfilter) {
271 if ((skcb->addr.pgn & f->pgn_mask) != f->pgn)
272 continue;
273 if ((skcb->addr.sa & f->addr_mask) != f->addr)
274 continue;
275 if ((skcb->addr.src_name & f->name_mask) != f->name)
276 continue;
277 return true;
278 }
279 return false;
280}
281
282static bool j1939_sk_recv_match_one(struct j1939_sock *jsk,
283 const struct j1939_sk_buff_cb *skcb)
284{
285 if (!(jsk->state & J1939_SOCK_BOUND))
286 return false;
287
288 if (!j1939_sk_match_dst(jsk, skcb))
289 return false;
290
291 if (!j1939_sk_match_filter(jsk, skcb))
292 return false;
293
294 return true;
295}
296
297static void j1939_sk_recv_one(struct j1939_sock *jsk, struct sk_buff *oskb)
298{
299 const struct j1939_sk_buff_cb *oskcb = j1939_skb_to_cb(oskb);
300 struct j1939_sk_buff_cb *skcb;
301 struct sk_buff *skb;
302
303 if (oskb->sk == &jsk->sk)
304 return;
305
306 if (!j1939_sk_recv_match_one(jsk, oskcb))
307 return;
308
309 skb = skb_clone(oskb, GFP_ATOMIC);
310 if (!skb) {
311 pr_warn("skb clone failed\n");
312 return;
313 }
314 can_skb_set_owner(skb, oskb->sk);
315
316 skcb = j1939_skb_to_cb(skb);
317 skcb->msg_flags &= ~(MSG_DONTROUTE);
318 if (skb->sk)
319 skcb->msg_flags |= MSG_DONTROUTE;
320
321 if (sock_queue_rcv_skb(&jsk->sk, skb) < 0)
322 kfree_skb(skb);
323}
324
325bool j1939_sk_recv_match(struct j1939_priv *priv, struct j1939_sk_buff_cb *skcb)
326{
327 struct j1939_sock *jsk;
328 bool match = false;
329
330 spin_lock_bh(&priv->j1939_socks_lock);
331 list_for_each_entry(jsk, &priv->j1939_socks, list) {
332 match = j1939_sk_recv_match_one(jsk, skcb);
333 if (match)
334 break;
335 }
336 spin_unlock_bh(&priv->j1939_socks_lock);
337
338 return match;
339}
340
341void j1939_sk_recv(struct j1939_priv *priv, struct sk_buff *skb)
342{
343 struct j1939_sock *jsk;
344
345 spin_lock_bh(&priv->j1939_socks_lock);
346 list_for_each_entry(jsk, &priv->j1939_socks, list) {
347 j1939_sk_recv_one(jsk, skb);
348 }
349 spin_unlock_bh(&priv->j1939_socks_lock);
350}
351
352static int j1939_sk_init(struct sock *sk)
353{
354 struct j1939_sock *jsk = j1939_sk(sk);
355
356 /* Ensure that "sk" is first member in "struct j1939_sock", so that we
357 * can skip it during memset().
358 */
359 BUILD_BUG_ON(offsetof(struct j1939_sock, sk) != 0);
360 memset((void *)jsk + sizeof(jsk->sk), 0x0,
361 sizeof(*jsk) - sizeof(jsk->sk));
362
363 INIT_LIST_HEAD(&jsk->list);
364 init_waitqueue_head(&jsk->waitq);
365 jsk->sk.sk_priority = j1939_to_sk_priority(6);
366 jsk->sk.sk_reuse = 1; /* per default */
367 jsk->addr.sa = J1939_NO_ADDR;
368 jsk->addr.da = J1939_NO_ADDR;
369 jsk->addr.pgn = J1939_NO_PGN;
370 jsk->pgn_rx_filter = J1939_NO_PGN;
371 atomic_set(&jsk->skb_pending, 0);
372 spin_lock_init(&jsk->sk_session_queue_lock);
373 INIT_LIST_HEAD(&jsk->sk_session_queue);
374
375 return 0;
376}
377
378static int j1939_sk_sanity_check(struct sockaddr_can *addr, int len)
379{
380 if (!addr)
381 return -EDESTADDRREQ;
382 if (len < J1939_MIN_NAMELEN)
383 return -EINVAL;
384 if (addr->can_family != AF_CAN)
385 return -EINVAL;
386 if (!addr->can_ifindex)
387 return -ENODEV;
388 if (j1939_pgn_is_valid(addr->can_addr.j1939.pgn) &&
389 !j1939_pgn_is_clean_pdu(addr->can_addr.j1939.pgn))
390 return -EINVAL;
391
392 return 0;
393}
394
395static int j1939_sk_bind(struct socket *sock, struct sockaddr *uaddr, int len)
396{
397 struct sockaddr_can *addr = (struct sockaddr_can *)uaddr;
398 struct j1939_sock *jsk = j1939_sk(sock->sk);
399 struct j1939_priv *priv = jsk->priv;
400 struct sock *sk = sock->sk;
401 struct net *net = sock_net(sk);
402 int ret = 0;
403
404 ret = j1939_sk_sanity_check(addr, len);
405 if (ret)
406 return ret;
407
408 lock_sock(sock->sk);
409
410 /* Already bound to an interface? */
411 if (jsk->state & J1939_SOCK_BOUND) {
412 /* A re-bind() to a different interface is not
413 * supported.
414 */
415 if (jsk->ifindex != addr->can_ifindex) {
416 ret = -EINVAL;
417 goto out_release_sock;
418 }
419
420 /* drop old references */
421 j1939_jsk_del(priv, jsk);
422 j1939_local_ecu_put(priv, jsk->addr.src_name, jsk->addr.sa);
423 } else {
424 struct net_device *ndev;
425
426 ndev = dev_get_by_index(net, addr->can_ifindex);
427 if (!ndev) {
428 ret = -ENODEV;
429 goto out_release_sock;
430 }
431
432 if (ndev->type != ARPHRD_CAN) {
433 dev_put(ndev);
434 ret = -ENODEV;
435 goto out_release_sock;
436 }
437
438 priv = j1939_netdev_start(ndev);
439 dev_put(ndev);
440 if (IS_ERR(priv)) {
441 ret = PTR_ERR(priv);
442 goto out_release_sock;
443 }
444
445 jsk->ifindex = addr->can_ifindex;
446 }
447
448 /* set default transmit pgn */
449 if (j1939_pgn_is_valid(addr->can_addr.j1939.pgn))
450 jsk->pgn_rx_filter = addr->can_addr.j1939.pgn;
451 jsk->addr.src_name = addr->can_addr.j1939.name;
452 jsk->addr.sa = addr->can_addr.j1939.addr;
453
454 /* get new references */
455 ret = j1939_local_ecu_get(priv, jsk->addr.src_name, jsk->addr.sa);
456 if (ret) {
457 j1939_netdev_stop(priv);
458 goto out_release_sock;
459 }
460
461 j1939_jsk_add(priv, jsk);
462
463 out_release_sock: /* fall through */
464 release_sock(sock->sk);
465
466 return ret;
467}
468
469static int j1939_sk_connect(struct socket *sock, struct sockaddr *uaddr,
470 int len, int flags)
471{
472 struct sockaddr_can *addr = (struct sockaddr_can *)uaddr;
473 struct j1939_sock *jsk = j1939_sk(sock->sk);
474 int ret = 0;
475
476 ret = j1939_sk_sanity_check(addr, len);
477 if (ret)
478 return ret;
479
480 lock_sock(sock->sk);
481
482 /* bind() before connect() is mandatory */
483 if (!(jsk->state & J1939_SOCK_BOUND)) {
484 ret = -EINVAL;
485 goto out_release_sock;
486 }
487
488 /* A connect() to a different interface is not supported. */
489 if (jsk->ifindex != addr->can_ifindex) {
490 ret = -EINVAL;
491 goto out_release_sock;
492 }
493
494 if (!addr->can_addr.j1939.name &&
495 addr->can_addr.j1939.addr == J1939_NO_ADDR &&
496 !sock_flag(&jsk->sk, SOCK_BROADCAST)) {
497 /* broadcast, but SO_BROADCAST not set */
498 ret = -EACCES;
499 goto out_release_sock;
500 }
501
502 jsk->addr.dst_name = addr->can_addr.j1939.name;
503 jsk->addr.da = addr->can_addr.j1939.addr;
504
505 if (j1939_pgn_is_valid(addr->can_addr.j1939.pgn))
506 jsk->addr.pgn = addr->can_addr.j1939.pgn;
507
508 jsk->state |= J1939_SOCK_CONNECTED;
509
510 out_release_sock: /* fall through */
511 release_sock(sock->sk);
512
513 return ret;
514}
515
516static void j1939_sk_sock2sockaddr_can(struct sockaddr_can *addr,
517 const struct j1939_sock *jsk, int peer)
518{
519 addr->can_family = AF_CAN;
520 addr->can_ifindex = jsk->ifindex;
521 addr->can_addr.j1939.pgn = jsk->addr.pgn;
522 if (peer) {
523 addr->can_addr.j1939.name = jsk->addr.dst_name;
524 addr->can_addr.j1939.addr = jsk->addr.da;
525 } else {
526 addr->can_addr.j1939.name = jsk->addr.src_name;
527 addr->can_addr.j1939.addr = jsk->addr.sa;
528 }
529}
530
531static int j1939_sk_getname(struct socket *sock, struct sockaddr *uaddr,
532 int peer)
533{
534 struct sockaddr_can *addr = (struct sockaddr_can *)uaddr;
535 struct sock *sk = sock->sk;
536 struct j1939_sock *jsk = j1939_sk(sk);
537 int ret = 0;
538
539 lock_sock(sk);
540
541 if (peer && !(jsk->state & J1939_SOCK_CONNECTED)) {
542 ret = -EADDRNOTAVAIL;
543 goto failure;
544 }
545
546 j1939_sk_sock2sockaddr_can(addr, jsk, peer);
547 ret = J1939_MIN_NAMELEN;
548
549 failure:
550 release_sock(sk);
551
552 return ret;
553}
554
555static int j1939_sk_release(struct socket *sock)
556{
557 struct sock *sk = sock->sk;
558 struct j1939_sock *jsk;
559
560 if (!sk)
561 return 0;
562
563 jsk = j1939_sk(sk);
564 lock_sock(sk);
565
566 if (jsk->state & J1939_SOCK_BOUND) {
567 struct j1939_priv *priv = jsk->priv;
568
569 if (wait_event_interruptible(jsk->waitq,
570 !j1939_sock_pending_get(&jsk->sk))) {
571 j1939_cancel_active_session(priv, sk);
572 j1939_sk_queue_drop_all(priv, jsk, ESHUTDOWN);
573 }
574
575 j1939_jsk_del(priv, jsk);
576
577 j1939_local_ecu_put(priv, jsk->addr.src_name,
578 jsk->addr.sa);
579
580 j1939_netdev_stop(priv);
581 }
582
583 sock_orphan(sk);
584 sock->sk = NULL;
585
586 release_sock(sk);
587 sock_put(sk);
588
589 return 0;
590}
591
592static int j1939_sk_setsockopt_flag(struct j1939_sock *jsk, char __user *optval,
593 unsigned int optlen, int flag)
594{
595 int tmp;
596
597 if (optlen != sizeof(tmp))
598 return -EINVAL;
599 if (copy_from_user(&tmp, optval, optlen))
600 return -EFAULT;
601 lock_sock(&jsk->sk);
602 if (tmp)
603 jsk->state |= flag;
604 else
605 jsk->state &= ~flag;
606 release_sock(&jsk->sk);
607 return tmp;
608}
609
610static int j1939_sk_setsockopt(struct socket *sock, int level, int optname,
611 char __user *optval, unsigned int optlen)
612{
613 struct sock *sk = sock->sk;
614 struct j1939_sock *jsk = j1939_sk(sk);
615 int tmp, count = 0, ret = 0;
616 struct j1939_filter *filters = NULL, *ofilters;
617
618 if (level != SOL_CAN_J1939)
619 return -EINVAL;
620
621 switch (optname) {
622 case SO_J1939_FILTER:
623 if (optval) {
624 struct j1939_filter *f;
625 int c;
626
627 if (optlen % sizeof(*filters) != 0)
628 return -EINVAL;
629
630 if (optlen > J1939_FILTER_MAX *
631 sizeof(struct j1939_filter))
632 return -EINVAL;
633
634 count = optlen / sizeof(*filters);
635 filters = memdup_user(optval, optlen);
636 if (IS_ERR(filters))
637 return PTR_ERR(filters);
638
639 for (f = filters, c = count; c; f++, c--) {
640 f->name &= f->name_mask;
641 f->pgn &= f->pgn_mask;
642 f->addr &= f->addr_mask;
643 }
644 }
645
646 lock_sock(&jsk->sk);
647 ofilters = jsk->filters;
648 jsk->filters = filters;
649 jsk->nfilters = count;
650 release_sock(&jsk->sk);
651 kfree(ofilters);
652 return 0;
653 case SO_J1939_PROMISC:
654 return j1939_sk_setsockopt_flag(jsk, optval, optlen,
655 J1939_SOCK_PROMISC);
656 case SO_J1939_ERRQUEUE:
657 ret = j1939_sk_setsockopt_flag(jsk, optval, optlen,
658 J1939_SOCK_ERRQUEUE);
659 if (ret < 0)
660 return ret;
661
662 if (!(jsk->state & J1939_SOCK_ERRQUEUE))
663 skb_queue_purge(&sk->sk_error_queue);
664 return ret;
665 case SO_J1939_SEND_PRIO:
666 if (optlen != sizeof(tmp))
667 return -EINVAL;
668 if (copy_from_user(&tmp, optval, optlen))
669 return -EFAULT;
670 if (tmp < 0 || tmp > 7)
671 return -EDOM;
672 if (tmp < 2 && !capable(CAP_NET_ADMIN))
673 return -EPERM;
674 lock_sock(&jsk->sk);
675 jsk->sk.sk_priority = j1939_to_sk_priority(tmp);
676 release_sock(&jsk->sk);
677 return 0;
678 default:
679 return -ENOPROTOOPT;
680 }
681}
682
683static int j1939_sk_getsockopt(struct socket *sock, int level, int optname,
684 char __user *optval, int __user *optlen)
685{
686 struct sock *sk = sock->sk;
687 struct j1939_sock *jsk = j1939_sk(sk);
688 int ret, ulen;
689 /* set defaults for using 'int' properties */
690 int tmp = 0;
691 int len = sizeof(tmp);
692 void *val = &tmp;
693
694 if (level != SOL_CAN_J1939)
695 return -EINVAL;
696 if (get_user(ulen, optlen))
697 return -EFAULT;
698 if (ulen < 0)
699 return -EINVAL;
700
701 lock_sock(&jsk->sk);
702 switch (optname) {
703 case SO_J1939_PROMISC:
704 tmp = (jsk->state & J1939_SOCK_PROMISC) ? 1 : 0;
705 break;
706 case SO_J1939_ERRQUEUE:
707 tmp = (jsk->state & J1939_SOCK_ERRQUEUE) ? 1 : 0;
708 break;
709 case SO_J1939_SEND_PRIO:
710 tmp = j1939_prio(jsk->sk.sk_priority);
711 break;
712 default:
713 ret = -ENOPROTOOPT;
714 goto no_copy;
715 }
716
717 /* copy to user, based on 'len' & 'val'
718 * but most sockopt's are 'int' properties, and have 'len' & 'val'
719 * left unchanged, but instead modified 'tmp'
720 */
721 if (len > ulen)
722 ret = -EFAULT;
723 else if (put_user(len, optlen))
724 ret = -EFAULT;
725 else if (copy_to_user(optval, val, len))
726 ret = -EFAULT;
727 else
728 ret = 0;
729 no_copy:
730 release_sock(&jsk->sk);
731 return ret;
732}
733
734static int j1939_sk_recvmsg(struct socket *sock, struct msghdr *msg,
735 size_t size, int flags)
736{
737 struct sock *sk = sock->sk;
738 struct sk_buff *skb;
739 struct j1939_sk_buff_cb *skcb;
740 int ret = 0;
741
742 if (flags & ~(MSG_DONTWAIT | MSG_ERRQUEUE))
743 return -EINVAL;
744
745 if (flags & MSG_ERRQUEUE)
746 return sock_recv_errqueue(sock->sk, msg, size, SOL_CAN_J1939,
747 SCM_J1939_ERRQUEUE);
748
749 skb = skb_recv_datagram(sk, flags, 0, &ret);
750 if (!skb)
751 return ret;
752
753 if (size < skb->len)
754 msg->msg_flags |= MSG_TRUNC;
755 else
756 size = skb->len;
757
758 ret = memcpy_to_msg(msg, skb->data, size);
759 if (ret < 0) {
760 skb_free_datagram(sk, skb);
761 return ret;
762 }
763
764 skcb = j1939_skb_to_cb(skb);
765 if (j1939_address_is_valid(skcb->addr.da))
766 put_cmsg(msg, SOL_CAN_J1939, SCM_J1939_DEST_ADDR,
767 sizeof(skcb->addr.da), &skcb->addr.da);
768
769 if (skcb->addr.dst_name)
770 put_cmsg(msg, SOL_CAN_J1939, SCM_J1939_DEST_NAME,
771 sizeof(skcb->addr.dst_name), &skcb->addr.dst_name);
772
773 put_cmsg(msg, SOL_CAN_J1939, SCM_J1939_PRIO,
774 sizeof(skcb->priority), &skcb->priority);
775
776 if (msg->msg_name) {
777 struct sockaddr_can *paddr = msg->msg_name;
778
779 msg->msg_namelen = J1939_MIN_NAMELEN;
780 memset(msg->msg_name, 0, msg->msg_namelen);
781 paddr->can_family = AF_CAN;
782 paddr->can_ifindex = skb->skb_iif;
783 paddr->can_addr.j1939.name = skcb->addr.src_name;
784 paddr->can_addr.j1939.addr = skcb->addr.sa;
785 paddr->can_addr.j1939.pgn = skcb->addr.pgn;
786 }
787
788 sock_recv_ts_and_drops(msg, sk, skb);
789 msg->msg_flags |= skcb->msg_flags;
790 skb_free_datagram(sk, skb);
791
792 return size;
793}
794
795static struct sk_buff *j1939_sk_alloc_skb(struct net_device *ndev,
796 struct sock *sk,
797 struct msghdr *msg, size_t size,
798 int *errcode)
799{
800 struct j1939_sock *jsk = j1939_sk(sk);
801 struct j1939_sk_buff_cb *skcb;
802 struct sk_buff *skb;
803 int ret;
804
805 skb = sock_alloc_send_skb(sk,
806 size +
807 sizeof(struct can_frame) -
808 sizeof(((struct can_frame *)NULL)->data) +
809 sizeof(struct can_skb_priv),
810 msg->msg_flags & MSG_DONTWAIT, &ret);
811 if (!skb)
812 goto failure;
813
814 can_skb_reserve(skb);
815 can_skb_prv(skb)->ifindex = ndev->ifindex;
816 can_skb_prv(skb)->skbcnt = 0;
817 skb_reserve(skb, offsetof(struct can_frame, data));
818
819 ret = memcpy_from_msg(skb_put(skb, size), msg, size);
820 if (ret < 0)
821 goto free_skb;
822
823 skb->dev = ndev;
824
825 skcb = j1939_skb_to_cb(skb);
826 memset(skcb, 0, sizeof(*skcb));
827 skcb->addr = jsk->addr;
828 skcb->priority = j1939_prio(sk->sk_priority);
829
830 if (msg->msg_name) {
831 struct sockaddr_can *addr = msg->msg_name;
832
833 if (addr->can_addr.j1939.name ||
834 addr->can_addr.j1939.addr != J1939_NO_ADDR) {
835 skcb->addr.dst_name = addr->can_addr.j1939.name;
836 skcb->addr.da = addr->can_addr.j1939.addr;
837 }
838 if (j1939_pgn_is_valid(addr->can_addr.j1939.pgn))
839 skcb->addr.pgn = addr->can_addr.j1939.pgn;
840 }
841
842 *errcode = ret;
843 return skb;
844
845free_skb:
846 kfree_skb(skb);
847failure:
848 *errcode = ret;
849 return NULL;
850}
851
852static size_t j1939_sk_opt_stats_get_size(void)
853{
854 return
855 nla_total_size(sizeof(u32)) + /* J1939_NLA_BYTES_ACKED */
856 0;
857}
858
859static struct sk_buff *
860j1939_sk_get_timestamping_opt_stats(struct j1939_session *session)
861{
862 struct sk_buff *stats;
863 u32 size;
864
865 stats = alloc_skb(j1939_sk_opt_stats_get_size(), GFP_ATOMIC);
866 if (!stats)
867 return NULL;
868
869 if (session->skcb.addr.type == J1939_SIMPLE)
870 size = session->total_message_size;
871 else
872 size = min(session->pkt.tx_acked * 7,
873 session->total_message_size);
874
875 nla_put_u32(stats, J1939_NLA_BYTES_ACKED, size);
876
877 return stats;
878}
879
880void j1939_sk_errqueue(struct j1939_session *session,
881 enum j1939_sk_errqueue_type type)
882{
883 struct j1939_priv *priv = session->priv;
884 struct sock *sk = session->sk;
885 struct j1939_sock *jsk;
886 struct sock_exterr_skb *serr;
887 struct sk_buff *skb;
888 char *state = "UNK";
889 int err;
890
891 /* currently we have no sk for the RX session */
892 if (!sk)
893 return;
894
895 jsk = j1939_sk(sk);
896
897 if (!(jsk->state & J1939_SOCK_ERRQUEUE))
898 return;
899
900 skb = j1939_sk_get_timestamping_opt_stats(session);
901 if (!skb)
902 return;
903
904 skb->tstamp = ktime_get_real();
905
906 BUILD_BUG_ON(sizeof(struct sock_exterr_skb) > sizeof(skb->cb));
907
908 serr = SKB_EXT_ERR(skb);
909 memset(serr, 0, sizeof(*serr));
910 switch (type) {
911 case J1939_ERRQUEUE_ACK:
912 if (!(sk->sk_tsflags & SOF_TIMESTAMPING_TX_ACK))
913 return;
914
915 serr->ee.ee_errno = ENOMSG;
916 serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING;
917 serr->ee.ee_info = SCM_TSTAMP_ACK;
918 state = "ACK";
919 break;
920 case J1939_ERRQUEUE_SCHED:
921 if (!(sk->sk_tsflags & SOF_TIMESTAMPING_TX_SCHED))
922 return;
923
924 serr->ee.ee_errno = ENOMSG;
925 serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING;
926 serr->ee.ee_info = SCM_TSTAMP_SCHED;
927 state = "SCH";
928 break;
929 case J1939_ERRQUEUE_ABORT:
930 serr->ee.ee_errno = session->err;
931 serr->ee.ee_origin = SO_EE_ORIGIN_LOCAL;
932 serr->ee.ee_info = J1939_EE_INFO_TX_ABORT;
933 state = "ABT";
934 break;
935 default:
936 netdev_err(priv->ndev, "Unknown errqueue type %i\n", type);
937 }
938
939 serr->opt_stats = true;
940 if (sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID)
941 serr->ee.ee_data = session->tskey;
942
943 netdev_dbg(session->priv->ndev, "%s: 0x%p tskey: %i, state: %s\n",
944 __func__, session, session->tskey, state);
945 err = sock_queue_err_skb(sk, skb);
946
947 if (err)
948 kfree_skb(skb);
949};
950
951void j1939_sk_send_loop_abort(struct sock *sk, int err)
952{
953 sk->sk_err = err;
954
955 sk->sk_error_report(sk);
956}
957
958static int j1939_sk_send_loop(struct j1939_priv *priv, struct sock *sk,
959 struct msghdr *msg, size_t size)
960
961{
962 struct j1939_sock *jsk = j1939_sk(sk);
963 struct j1939_session *session = j1939_sk_get_incomplete_session(jsk);
964 struct sk_buff *skb;
965 size_t segment_size, todo_size;
966 int ret = 0;
967
968 if (session &&
969 session->total_message_size != session->total_queued_size + size) {
970 j1939_session_put(session);
971 return -EIO;
972 }
973
974 todo_size = size;
975
976 while (todo_size) {
977 struct j1939_sk_buff_cb *skcb;
978
979 segment_size = min_t(size_t, J1939_MAX_TP_PACKET_SIZE,
980 todo_size);
981
982 /* Allocate skb for one segment */
983 skb = j1939_sk_alloc_skb(priv->ndev, sk, msg, segment_size,
984 &ret);
985 if (ret)
986 break;
987
988 skcb = j1939_skb_to_cb(skb);
989
990 if (!session) {
991 /* at this point the size should be full size
992 * of the session
993 */
994 skcb->offset = 0;
995 session = j1939_tp_send(priv, skb, size);
996 if (IS_ERR(session)) {
997 ret = PTR_ERR(session);
998 goto kfree_skb;
999 }
1000 if (j1939_sk_queue_session(session)) {
1001 /* try to activate session if we a
1002 * fist in the queue
1003 */
1004 if (!j1939_session_activate(session)) {
1005 j1939_tp_schedule_txtimer(session, 0);
1006 } else {
1007 ret = -EBUSY;
1008 session->err = ret;
1009 j1939_sk_queue_drop_all(priv, jsk,
1010 EBUSY);
1011 break;
1012 }
1013 }
1014 } else {
1015 skcb->offset = session->total_queued_size;
1016 j1939_session_skb_queue(session, skb);
1017 }
1018
1019 todo_size -= segment_size;
1020 session->total_queued_size += segment_size;
1021 }
1022
1023 switch (ret) {
1024 case 0: /* OK */
1025 if (todo_size)
1026 netdev_warn(priv->ndev,
1027 "no error found and not completely queued?! %zu\n",
1028 todo_size);
1029 ret = size;
1030 break;
1031 case -ERESTARTSYS:
1032 ret = -EINTR;
1033 /* fall through */
1034 case -EAGAIN: /* OK */
1035 if (todo_size != size)
1036 ret = size - todo_size;
1037 break;
1038 default: /* ERROR */
1039 break;
1040 }
1041
1042 if (session)
1043 j1939_session_put(session);
1044
1045 return ret;
1046
1047 kfree_skb:
1048 kfree_skb(skb);
1049 return ret;
1050}
1051
1052static int j1939_sk_sendmsg(struct socket *sock, struct msghdr *msg,
1053 size_t size)
1054{
1055 struct sock *sk = sock->sk;
1056 struct j1939_sock *jsk = j1939_sk(sk);
1057 struct j1939_priv *priv = jsk->priv;
1058 int ifindex;
1059 int ret;
1060
1061 /* various socket state tests */
1062 if (!(jsk->state & J1939_SOCK_BOUND))
1063 return -EBADFD;
1064
1065 ifindex = jsk->ifindex;
1066
1067 if (!jsk->addr.src_name && jsk->addr.sa == J1939_NO_ADDR)
1068 /* no source address assigned yet */
1069 return -EBADFD;
1070
1071 /* deal with provided destination address info */
1072 if (msg->msg_name) {
1073 struct sockaddr_can *addr = msg->msg_name;
1074
1075 if (msg->msg_namelen < J1939_MIN_NAMELEN)
1076 return -EINVAL;
1077
1078 if (addr->can_family != AF_CAN)
1079 return -EINVAL;
1080
1081 if (addr->can_ifindex && addr->can_ifindex != ifindex)
1082 return -EBADFD;
1083
1084 if (j1939_pgn_is_valid(addr->can_addr.j1939.pgn) &&
1085 !j1939_pgn_is_clean_pdu(addr->can_addr.j1939.pgn))
1086 return -EINVAL;
1087
1088 if (!addr->can_addr.j1939.name &&
1089 addr->can_addr.j1939.addr == J1939_NO_ADDR &&
1090 !sock_flag(sk, SOCK_BROADCAST))
1091 /* broadcast, but SO_BROADCAST not set */
1092 return -EACCES;
1093 } else {
1094 if (!jsk->addr.dst_name && jsk->addr.da == J1939_NO_ADDR &&
1095 !sock_flag(sk, SOCK_BROADCAST))
1096 /* broadcast, but SO_BROADCAST not set */
1097 return -EACCES;
1098 }
1099
1100 ret = j1939_sk_send_loop(priv, sk, msg, size);
1101
1102 return ret;
1103}
1104
1105void j1939_sk_netdev_event_netdown(struct j1939_priv *priv)
1106{
1107 struct j1939_sock *jsk;
1108 int error_code = ENETDOWN;
1109
1110 spin_lock_bh(&priv->j1939_socks_lock);
1111 list_for_each_entry(jsk, &priv->j1939_socks, list) {
1112 jsk->sk.sk_err = error_code;
1113 if (!sock_flag(&jsk->sk, SOCK_DEAD))
1114 jsk->sk.sk_error_report(&jsk->sk);
1115
1116 j1939_sk_queue_drop_all(priv, jsk, error_code);
1117 }
1118 spin_unlock_bh(&priv->j1939_socks_lock);
1119}
1120
1121static int j1939_sk_no_ioctlcmd(struct socket *sock, unsigned int cmd,
1122 unsigned long arg)
1123{
1124 /* no ioctls for socket layer -> hand it down to NIC layer */
1125 return -ENOIOCTLCMD;
1126}
1127
1128static const struct proto_ops j1939_ops = {
1129 .family = PF_CAN,
1130 .release = j1939_sk_release,
1131 .bind = j1939_sk_bind,
1132 .connect = j1939_sk_connect,
1133 .socketpair = sock_no_socketpair,
1134 .accept = sock_no_accept,
1135 .getname = j1939_sk_getname,
1136 .poll = datagram_poll,
1137 .ioctl = j1939_sk_no_ioctlcmd,
1138 .listen = sock_no_listen,
1139 .shutdown = sock_no_shutdown,
1140 .setsockopt = j1939_sk_setsockopt,
1141 .getsockopt = j1939_sk_getsockopt,
1142 .sendmsg = j1939_sk_sendmsg,
1143 .recvmsg = j1939_sk_recvmsg,
1144 .mmap = sock_no_mmap,
1145 .sendpage = sock_no_sendpage,
1146};
1147
1148static struct proto j1939_proto __read_mostly = {
1149 .name = "CAN_J1939",
1150 .owner = THIS_MODULE,
1151 .obj_size = sizeof(struct j1939_sock),
1152 .init = j1939_sk_init,
1153};
1154
1155const struct can_proto j1939_can_proto = {
1156 .type = SOCK_DGRAM,
1157 .protocol = CAN_J1939,
1158 .ops = &j1939_ops,
1159 .prot = &j1939_proto,
1160};
diff --git a/net/can/j1939/transport.c b/net/can/j1939/transport.c
new file mode 100644
index 000000000000..fe000ea757ea
--- /dev/null
+++ b/net/can/j1939/transport.c
@@ -0,0 +1,2027 @@
1// SPDX-License-Identifier: GPL-2.0
2// Copyright (c) 2010-2011 EIA Electronics,
3// Kurt Van Dijck <kurt.van.dijck@eia.be>
4// Copyright (c) 2018 Protonic,
5// Robin van der Gracht <robin@protonic.nl>
6// Copyright (c) 2017-2019 Pengutronix,
7// Marc Kleine-Budde <kernel@pengutronix.de>
8// Copyright (c) 2017-2019 Pengutronix,
9// Oleksij Rempel <kernel@pengutronix.de>
10
11#include <linux/can/skb.h>
12
13#include "j1939-priv.h"
14
15#define J1939_XTP_TX_RETRY_LIMIT 100
16
17#define J1939_ETP_PGN_CTL 0xc800
18#define J1939_ETP_PGN_DAT 0xc700
19#define J1939_TP_PGN_CTL 0xec00
20#define J1939_TP_PGN_DAT 0xeb00
21
22#define J1939_TP_CMD_RTS 0x10
23#define J1939_TP_CMD_CTS 0x11
24#define J1939_TP_CMD_EOMA 0x13
25#define J1939_TP_CMD_BAM 0x20
26#define J1939_TP_CMD_ABORT 0xff
27
28#define J1939_ETP_CMD_RTS 0x14
29#define J1939_ETP_CMD_CTS 0x15
30#define J1939_ETP_CMD_DPO 0x16
31#define J1939_ETP_CMD_EOMA 0x17
32#define J1939_ETP_CMD_ABORT 0xff
33
34enum j1939_xtp_abort {
35 J1939_XTP_NO_ABORT = 0,
36 J1939_XTP_ABORT_BUSY = 1,
37 /* Already in one or more connection managed sessions and
38 * cannot support another.
39 *
40 * EALREADY:
41 * Operation already in progress
42 */
43
44 J1939_XTP_ABORT_RESOURCE = 2,
45 /* System resources were needed for another task so this
46 * connection managed session was terminated.
47 *
48 * EMSGSIZE:
49 * The socket type requires that message be sent atomically,
50 * and the size of the message to be sent made this
51 * impossible.
52 */
53
54 J1939_XTP_ABORT_TIMEOUT = 3,
55 /* A timeout occurred and this is the connection abort to
56 * close the session.
57 *
58 * EHOSTUNREACH:
59 * The destination host cannot be reached (probably because
60 * the host is down or a remote router cannot reach it).
61 */
62
63 J1939_XTP_ABORT_GENERIC = 4,
64 /* CTS messages received when data transfer is in progress
65 *
66 * EBADMSG:
67 * Not a data message
68 */
69
70 J1939_XTP_ABORT_FAULT = 5,
71 /* Maximal retransmit request limit reached
72 *
73 * ENOTRECOVERABLE:
74 * State not recoverable
75 */
76
77 J1939_XTP_ABORT_UNEXPECTED_DATA = 6,
78 /* Unexpected data transfer packet
79 *
80 * ENOTCONN:
81 * Transport endpoint is not connected
82 */
83
84 J1939_XTP_ABORT_BAD_SEQ = 7,
85 /* Bad sequence number (and software is not able to recover)
86 *
87 * EILSEQ:
88 * Illegal byte sequence
89 */
90
91 J1939_XTP_ABORT_DUP_SEQ = 8,
92 /* Duplicate sequence number (and software is not able to
93 * recover)
94 */
95
96 J1939_XTP_ABORT_EDPO_UNEXPECTED = 9,
97 /* Unexpected EDPO packet (ETP) or Message size > 1785 bytes
98 * (TP)
99 */
100
101 J1939_XTP_ABORT_BAD_EDPO_PGN = 10,
102 /* Unexpected EDPO PGN (PGN in EDPO is bad) */
103
104 J1939_XTP_ABORT_EDPO_OUTOF_CTS = 11,
105 /* EDPO number of packets is greater than CTS */
106
107 J1939_XTP_ABORT_BAD_EDPO_OFFSET = 12,
108 /* Bad EDPO offset */
109
110 J1939_XTP_ABORT_OTHER_DEPRECATED = 13,
111 /* Deprecated. Use 250 instead (Any other reason) */
112
113 J1939_XTP_ABORT_ECTS_UNXPECTED_PGN = 14,
114 /* Unexpected ECTS PGN (PGN in ECTS is bad) */
115
116 J1939_XTP_ABORT_ECTS_TOO_BIG = 15,
117 /* ECTS requested packets exceeds message size */
118
119 J1939_XTP_ABORT_OTHER = 250,
120 /* Any other reason (if a Connection Abort reason is
121 * identified that is not listed in the table use code 250)
122 */
123};
124
125static unsigned int j1939_tp_block = 255;
126static unsigned int j1939_tp_packet_delay;
127static unsigned int j1939_tp_padding = 1;
128
129/* helpers */
130static const char *j1939_xtp_abort_to_str(enum j1939_xtp_abort abort)
131{
132 switch (abort) {
133 case J1939_XTP_ABORT_BUSY:
134 return "Already in one or more connection managed sessions and cannot support another.";
135 case J1939_XTP_ABORT_RESOURCE:
136 return "System resources were needed for another task so this connection managed session was terminated.";
137 case J1939_XTP_ABORT_TIMEOUT:
138 return "A timeout occurred and this is the connection abort to close the session.";
139 case J1939_XTP_ABORT_GENERIC:
140 return "CTS messages received when data transfer is in progress";
141 case J1939_XTP_ABORT_FAULT:
142 return "Maximal retransmit request limit reached";
143 case J1939_XTP_ABORT_UNEXPECTED_DATA:
144 return "Unexpected data transfer packet";
145 case J1939_XTP_ABORT_BAD_SEQ:
146 return "Bad sequence number (and software is not able to recover)";
147 case J1939_XTP_ABORT_DUP_SEQ:
148 return "Duplicate sequence number (and software is not able to recover)";
149 case J1939_XTP_ABORT_EDPO_UNEXPECTED:
150 return "Unexpected EDPO packet (ETP) or Message size > 1785 bytes (TP)";
151 case J1939_XTP_ABORT_BAD_EDPO_PGN:
152 return "Unexpected EDPO PGN (PGN in EDPO is bad)";
153 case J1939_XTP_ABORT_EDPO_OUTOF_CTS:
154 return "EDPO number of packets is greater than CTS";
155 case J1939_XTP_ABORT_BAD_EDPO_OFFSET:
156 return "Bad EDPO offset";
157 case J1939_XTP_ABORT_OTHER_DEPRECATED:
158 return "Deprecated. Use 250 instead (Any other reason)";
159 case J1939_XTP_ABORT_ECTS_UNXPECTED_PGN:
160 return "Unexpected ECTS PGN (PGN in ECTS is bad)";
161 case J1939_XTP_ABORT_ECTS_TOO_BIG:
162 return "ECTS requested packets exceeds message size";
163 case J1939_XTP_ABORT_OTHER:
164 return "Any other reason (if a Connection Abort reason is identified that is not listed in the table use code 250)";
165 default:
166 return "<unknown>";
167 }
168}
169
170static int j1939_xtp_abort_to_errno(struct j1939_priv *priv,
171 enum j1939_xtp_abort abort)
172{
173 int err;
174
175 switch (abort) {
176 case J1939_XTP_NO_ABORT:
177 WARN_ON_ONCE(abort == J1939_XTP_NO_ABORT);
178 err = 0;
179 break;
180 case J1939_XTP_ABORT_BUSY:
181 err = EALREADY;
182 break;
183 case J1939_XTP_ABORT_RESOURCE:
184 err = EMSGSIZE;
185 break;
186 case J1939_XTP_ABORT_TIMEOUT:
187 err = EHOSTUNREACH;
188 break;
189 case J1939_XTP_ABORT_GENERIC:
190 err = EBADMSG;
191 break;
192 case J1939_XTP_ABORT_FAULT:
193 err = ENOTRECOVERABLE;
194 break;
195 case J1939_XTP_ABORT_UNEXPECTED_DATA:
196 err = ENOTCONN;
197 break;
198 case J1939_XTP_ABORT_BAD_SEQ:
199 err = EILSEQ;
200 break;
201 case J1939_XTP_ABORT_DUP_SEQ:
202 err = EPROTO;
203 break;
204 case J1939_XTP_ABORT_EDPO_UNEXPECTED:
205 err = EPROTO;
206 break;
207 case J1939_XTP_ABORT_BAD_EDPO_PGN:
208 err = EPROTO;
209 break;
210 case J1939_XTP_ABORT_EDPO_OUTOF_CTS:
211 err = EPROTO;
212 break;
213 case J1939_XTP_ABORT_BAD_EDPO_OFFSET:
214 err = EPROTO;
215 break;
216 case J1939_XTP_ABORT_OTHER_DEPRECATED:
217 err = EPROTO;
218 break;
219 case J1939_XTP_ABORT_ECTS_UNXPECTED_PGN:
220 err = EPROTO;
221 break;
222 case J1939_XTP_ABORT_ECTS_TOO_BIG:
223 err = EPROTO;
224 break;
225 case J1939_XTP_ABORT_OTHER:
226 err = EPROTO;
227 break;
228 default:
229 netdev_warn(priv->ndev, "Unknown abort code %i", abort);
230 err = EPROTO;
231 }
232
233 return err;
234}
235
236static inline void j1939_session_list_lock(struct j1939_priv *priv)
237{
238 spin_lock_bh(&priv->active_session_list_lock);
239}
240
241static inline void j1939_session_list_unlock(struct j1939_priv *priv)
242{
243 spin_unlock_bh(&priv->active_session_list_lock);
244}
245
246void j1939_session_get(struct j1939_session *session)
247{
248 kref_get(&session->kref);
249}
250
251/* session completion functions */
252static void __j1939_session_drop(struct j1939_session *session)
253{
254 if (!session->transmission)
255 return;
256
257 j1939_sock_pending_del(session->sk);
258}
259
260static void j1939_session_destroy(struct j1939_session *session)
261{
262 if (session->err)
263 j1939_sk_errqueue(session, J1939_ERRQUEUE_ABORT);
264 else
265 j1939_sk_errqueue(session, J1939_ERRQUEUE_ACK);
266
267 netdev_dbg(session->priv->ndev, "%s: 0x%p\n", __func__, session);
268
269 skb_queue_purge(&session->skb_queue);
270 __j1939_session_drop(session);
271 j1939_priv_put(session->priv);
272 kfree(session);
273}
274
275static void __j1939_session_release(struct kref *kref)
276{
277 struct j1939_session *session = container_of(kref, struct j1939_session,
278 kref);
279
280 j1939_session_destroy(session);
281}
282
283void j1939_session_put(struct j1939_session *session)
284{
285 kref_put(&session->kref, __j1939_session_release);
286}
287
288static void j1939_session_txtimer_cancel(struct j1939_session *session)
289{
290 if (hrtimer_cancel(&session->txtimer))
291 j1939_session_put(session);
292}
293
294static void j1939_session_rxtimer_cancel(struct j1939_session *session)
295{
296 if (hrtimer_cancel(&session->rxtimer))
297 j1939_session_put(session);
298}
299
300void j1939_session_timers_cancel(struct j1939_session *session)
301{
302 j1939_session_txtimer_cancel(session);
303 j1939_session_rxtimer_cancel(session);
304}
305
306static inline bool j1939_cb_is_broadcast(const struct j1939_sk_buff_cb *skcb)
307{
308 return (!skcb->addr.dst_name && (skcb->addr.da == 0xff));
309}
310
311static void j1939_session_skb_drop_old(struct j1939_session *session)
312{
313 struct sk_buff *do_skb;
314 struct j1939_sk_buff_cb *do_skcb;
315 unsigned int offset_start;
316 unsigned long flags;
317
318 if (skb_queue_len(&session->skb_queue) < 2)
319 return;
320
321 offset_start = session->pkt.tx_acked * 7;
322
323 spin_lock_irqsave(&session->skb_queue.lock, flags);
324 do_skb = skb_peek(&session->skb_queue);
325 do_skcb = j1939_skb_to_cb(do_skb);
326
327 if ((do_skcb->offset + do_skb->len) < offset_start) {
328 __skb_unlink(do_skb, &session->skb_queue);
329 kfree_skb(do_skb);
330 }
331 spin_unlock_irqrestore(&session->skb_queue.lock, flags);
332}
333
334void j1939_session_skb_queue(struct j1939_session *session,
335 struct sk_buff *skb)
336{
337 struct j1939_sk_buff_cb *skcb = j1939_skb_to_cb(skb);
338 struct j1939_priv *priv = session->priv;
339
340 j1939_ac_fixup(priv, skb);
341
342 if (j1939_address_is_unicast(skcb->addr.da) &&
343 priv->ents[skcb->addr.da].nusers)
344 skcb->flags |= J1939_ECU_LOCAL_DST;
345
346 skcb->flags |= J1939_ECU_LOCAL_SRC;
347
348 skb_queue_tail(&session->skb_queue, skb);
349}
350
351static struct sk_buff *j1939_session_skb_find(struct j1939_session *session)
352{
353 struct j1939_priv *priv = session->priv;
354 struct sk_buff *skb = NULL;
355 struct sk_buff *do_skb;
356 struct j1939_sk_buff_cb *do_skcb;
357 unsigned int offset_start;
358 unsigned long flags;
359
360 offset_start = session->pkt.dpo * 7;
361
362 spin_lock_irqsave(&session->skb_queue.lock, flags);
363 skb_queue_walk(&session->skb_queue, do_skb) {
364 do_skcb = j1939_skb_to_cb(do_skb);
365
366 if (offset_start >= do_skcb->offset &&
367 offset_start < (do_skcb->offset + do_skb->len)) {
368 skb = do_skb;
369 }
370 }
371 spin_unlock_irqrestore(&session->skb_queue.lock, flags);
372
373 if (!skb)
374 netdev_dbg(priv->ndev, "%s: 0x%p: no skb found for start: %i, queue size: %i\n",
375 __func__, session, offset_start,
376 skb_queue_len(&session->skb_queue));
377
378 return skb;
379}
380
381/* see if we are receiver
382 * returns 0 for broadcasts, although we will receive them
383 */
384static inline int j1939_tp_im_receiver(const struct j1939_sk_buff_cb *skcb)
385{
386 return skcb->flags & J1939_ECU_LOCAL_DST;
387}
388
389/* see if we are sender */
390static inline int j1939_tp_im_transmitter(const struct j1939_sk_buff_cb *skcb)
391{
392 return skcb->flags & J1939_ECU_LOCAL_SRC;
393}
394
395/* see if we are involved as either receiver or transmitter */
396static int j1939_tp_im_involved(const struct j1939_sk_buff_cb *skcb, bool swap)
397{
398 if (swap)
399 return j1939_tp_im_receiver(skcb);
400 else
401 return j1939_tp_im_transmitter(skcb);
402}
403
404static int j1939_tp_im_involved_anydir(struct j1939_sk_buff_cb *skcb)
405{
406 return skcb->flags & (J1939_ECU_LOCAL_SRC | J1939_ECU_LOCAL_DST);
407}
408
409/* extract pgn from flow-ctl message */
410static inline pgn_t j1939_xtp_ctl_to_pgn(const u8 *dat)
411{
412 pgn_t pgn;
413
414 pgn = (dat[7] << 16) | (dat[6] << 8) | (dat[5] << 0);
415 if (j1939_pgn_is_pdu1(pgn))
416 pgn &= 0xffff00;
417 return pgn;
418}
419
420static inline unsigned int j1939_tp_ctl_to_size(const u8 *dat)
421{
422 return (dat[2] << 8) + (dat[1] << 0);
423}
424
425static inline unsigned int j1939_etp_ctl_to_packet(const u8 *dat)
426{
427 return (dat[4] << 16) | (dat[3] << 8) | (dat[2] << 0);
428}
429
430static inline unsigned int j1939_etp_ctl_to_size(const u8 *dat)
431{
432 return (dat[4] << 24) | (dat[3] << 16) |
433 (dat[2] << 8) | (dat[1] << 0);
434}
435
436/* find existing session:
437 * reverse: swap cb's src & dst
438 * there is no problem with matching broadcasts, since
439 * broadcasts (no dst, no da) would never call this
440 * with reverse == true
441 */
442static bool j1939_session_match(struct j1939_addr *se_addr,
443 struct j1939_addr *sk_addr, bool reverse)
444{
445 if (se_addr->type != sk_addr->type)
446 return false;
447
448 if (reverse) {
449 if (se_addr->src_name) {
450 if (se_addr->src_name != sk_addr->dst_name)
451 return false;
452 } else if (se_addr->sa != sk_addr->da) {
453 return false;
454 }
455
456 if (se_addr->dst_name) {
457 if (se_addr->dst_name != sk_addr->src_name)
458 return false;
459 } else if (se_addr->da != sk_addr->sa) {
460 return false;
461 }
462 } else {
463 if (se_addr->src_name) {
464 if (se_addr->src_name != sk_addr->src_name)
465 return false;
466 } else if (se_addr->sa != sk_addr->sa) {
467 return false;
468 }
469
470 if (se_addr->dst_name) {
471 if (se_addr->dst_name != sk_addr->dst_name)
472 return false;
473 } else if (se_addr->da != sk_addr->da) {
474 return false;
475 }
476 }
477
478 return true;
479}
480
481static struct
482j1939_session *j1939_session_get_by_addr_locked(struct j1939_priv *priv,
483 struct list_head *root,
484 struct j1939_addr *addr,
485 bool reverse, bool transmitter)
486{
487 struct j1939_session *session;
488
489 lockdep_assert_held(&priv->active_session_list_lock);
490
491 list_for_each_entry(session, root, active_session_list_entry) {
492 j1939_session_get(session);
493 if (j1939_session_match(&session->skcb.addr, addr, reverse) &&
494 session->transmission == transmitter)
495 return session;
496 j1939_session_put(session);
497 }
498
499 return NULL;
500}
501
502static struct
503j1939_session *j1939_session_get_simple(struct j1939_priv *priv,
504 struct sk_buff *skb)
505{
506 struct j1939_sk_buff_cb *skcb = j1939_skb_to_cb(skb);
507 struct j1939_session *session;
508
509 lockdep_assert_held(&priv->active_session_list_lock);
510
511 list_for_each_entry(session, &priv->active_session_list,
512 active_session_list_entry) {
513 j1939_session_get(session);
514 if (session->skcb.addr.type == J1939_SIMPLE &&
515 session->tskey == skcb->tskey && session->sk == skb->sk)
516 return session;
517 j1939_session_put(session);
518 }
519
520 return NULL;
521}
522
523static struct
524j1939_session *j1939_session_get_by_addr(struct j1939_priv *priv,
525 struct j1939_addr *addr,
526 bool reverse, bool transmitter)
527{
528 struct j1939_session *session;
529
530 j1939_session_list_lock(priv);
531 session = j1939_session_get_by_addr_locked(priv,
532 &priv->active_session_list,
533 addr, reverse, transmitter);
534 j1939_session_list_unlock(priv);
535
536 return session;
537}
538
539static void j1939_skbcb_swap(struct j1939_sk_buff_cb *skcb)
540{
541 u8 tmp = 0;
542
543 swap(skcb->addr.dst_name, skcb->addr.src_name);
544 swap(skcb->addr.da, skcb->addr.sa);
545
546 /* swap SRC and DST flags, leave other untouched */
547 if (skcb->flags & J1939_ECU_LOCAL_SRC)
548 tmp |= J1939_ECU_LOCAL_DST;
549 if (skcb->flags & J1939_ECU_LOCAL_DST)
550 tmp |= J1939_ECU_LOCAL_SRC;
551 skcb->flags &= ~(J1939_ECU_LOCAL_SRC | J1939_ECU_LOCAL_DST);
552 skcb->flags |= tmp;
553}
554
555static struct
556sk_buff *j1939_tp_tx_dat_new(struct j1939_priv *priv,
557 const struct j1939_sk_buff_cb *re_skcb,
558 bool ctl,
559 bool swap_src_dst)
560{
561 struct sk_buff *skb;
562 struct j1939_sk_buff_cb *skcb;
563
564 skb = alloc_skb(sizeof(struct can_frame) + sizeof(struct can_skb_priv),
565 GFP_ATOMIC);
566 if (unlikely(!skb))
567 return ERR_PTR(-ENOMEM);
568
569 skb->dev = priv->ndev;
570 can_skb_reserve(skb);
571 can_skb_prv(skb)->ifindex = priv->ndev->ifindex;
572 /* reserve CAN header */
573 skb_reserve(skb, offsetof(struct can_frame, data));
574
575 memcpy(skb->cb, re_skcb, sizeof(skb->cb));
576 skcb = j1939_skb_to_cb(skb);
577 if (swap_src_dst)
578 j1939_skbcb_swap(skcb);
579
580 if (ctl) {
581 if (skcb->addr.type == J1939_ETP)
582 skcb->addr.pgn = J1939_ETP_PGN_CTL;
583 else
584 skcb->addr.pgn = J1939_TP_PGN_CTL;
585 } else {
586 if (skcb->addr.type == J1939_ETP)
587 skcb->addr.pgn = J1939_ETP_PGN_DAT;
588 else
589 skcb->addr.pgn = J1939_TP_PGN_DAT;
590 }
591
592 return skb;
593}
594
595/* TP transmit packet functions */
596static int j1939_tp_tx_dat(struct j1939_session *session,
597 const u8 *dat, int len)
598{
599 struct j1939_priv *priv = session->priv;
600 struct sk_buff *skb;
601
602 skb = j1939_tp_tx_dat_new(priv, &session->skcb,
603 false, false);
604 if (IS_ERR(skb))
605 return PTR_ERR(skb);
606
607 skb_put_data(skb, dat, len);
608 if (j1939_tp_padding && len < 8)
609 memset(skb_put(skb, 8 - len), 0xff, 8 - len);
610
611 return j1939_send_one(priv, skb);
612}
613
614static int j1939_xtp_do_tx_ctl(struct j1939_priv *priv,
615 const struct j1939_sk_buff_cb *re_skcb,
616 bool swap_src_dst, pgn_t pgn, const u8 *dat)
617{
618 struct sk_buff *skb;
619 u8 *skdat;
620
621 if (!j1939_tp_im_involved(re_skcb, swap_src_dst))
622 return 0;
623
624 skb = j1939_tp_tx_dat_new(priv, re_skcb, true, swap_src_dst);
625 if (IS_ERR(skb))
626 return PTR_ERR(skb);
627
628 skdat = skb_put(skb, 8);
629 memcpy(skdat, dat, 5);
630 skdat[5] = (pgn >> 0);
631 skdat[6] = (pgn >> 8);
632 skdat[7] = (pgn >> 16);
633
634 return j1939_send_one(priv, skb);
635}
636
637static inline int j1939_tp_tx_ctl(struct j1939_session *session,
638 bool swap_src_dst, const u8 *dat)
639{
640 struct j1939_priv *priv = session->priv;
641
642 return j1939_xtp_do_tx_ctl(priv, &session->skcb,
643 swap_src_dst,
644 session->skcb.addr.pgn, dat);
645}
646
647static int j1939_xtp_tx_abort(struct j1939_priv *priv,
648 const struct j1939_sk_buff_cb *re_skcb,
649 bool swap_src_dst,
650 enum j1939_xtp_abort err,
651 pgn_t pgn)
652{
653 u8 dat[5];
654
655 if (!j1939_tp_im_involved(re_skcb, swap_src_dst))
656 return 0;
657
658 memset(dat, 0xff, sizeof(dat));
659 dat[0] = J1939_TP_CMD_ABORT;
660 dat[1] = err;
661 return j1939_xtp_do_tx_ctl(priv, re_skcb, swap_src_dst, pgn, dat);
662}
663
664void j1939_tp_schedule_txtimer(struct j1939_session *session, int msec)
665{
666 j1939_session_get(session);
667 hrtimer_start(&session->txtimer, ms_to_ktime(msec),
668 HRTIMER_MODE_REL_SOFT);
669}
670
671static inline void j1939_tp_set_rxtimeout(struct j1939_session *session,
672 int msec)
673{
674 j1939_session_rxtimer_cancel(session);
675 j1939_session_get(session);
676 hrtimer_start(&session->rxtimer, ms_to_ktime(msec),
677 HRTIMER_MODE_REL_SOFT);
678}
679
680static int j1939_session_tx_rts(struct j1939_session *session)
681{
682 u8 dat[8];
683 int ret;
684
685 memset(dat, 0xff, sizeof(dat));
686
687 dat[1] = (session->total_message_size >> 0);
688 dat[2] = (session->total_message_size >> 8);
689 dat[3] = session->pkt.total;
690
691 if (session->skcb.addr.type == J1939_ETP) {
692 dat[0] = J1939_ETP_CMD_RTS;
693 dat[1] = (session->total_message_size >> 0);
694 dat[2] = (session->total_message_size >> 8);
695 dat[3] = (session->total_message_size >> 16);
696 dat[4] = (session->total_message_size >> 24);
697 } else if (j1939_cb_is_broadcast(&session->skcb)) {
698 dat[0] = J1939_TP_CMD_BAM;
699 /* fake cts for broadcast */
700 session->pkt.tx = 0;
701 } else {
702 dat[0] = J1939_TP_CMD_RTS;
703 dat[4] = dat[3];
704 }
705
706 if (dat[0] == session->last_txcmd)
707 /* done already */
708 return 0;
709
710 ret = j1939_tp_tx_ctl(session, false, dat);
711 if (ret < 0)
712 return ret;
713
714 session->last_txcmd = dat[0];
715 if (dat[0] == J1939_TP_CMD_BAM)
716 j1939_tp_schedule_txtimer(session, 50);
717
718 j1939_tp_set_rxtimeout(session, 1250);
719
720 netdev_dbg(session->priv->ndev, "%s: 0x%p\n", __func__, session);
721
722 return 0;
723}
724
725static int j1939_session_tx_dpo(struct j1939_session *session)
726{
727 unsigned int pkt;
728 u8 dat[8];
729 int ret;
730
731 memset(dat, 0xff, sizeof(dat));
732
733 dat[0] = J1939_ETP_CMD_DPO;
734 session->pkt.dpo = session->pkt.tx_acked;
735 pkt = session->pkt.dpo;
736 dat[1] = session->pkt.last - session->pkt.tx_acked;
737 dat[2] = (pkt >> 0);
738 dat[3] = (pkt >> 8);
739 dat[4] = (pkt >> 16);
740
741 ret = j1939_tp_tx_ctl(session, false, dat);
742 if (ret < 0)
743 return ret;
744
745 session->last_txcmd = dat[0];
746 j1939_tp_set_rxtimeout(session, 1250);
747 session->pkt.tx = session->pkt.tx_acked;
748
749 netdev_dbg(session->priv->ndev, "%s: 0x%p\n", __func__, session);
750
751 return 0;
752}
753
754static int j1939_session_tx_dat(struct j1939_session *session)
755{
756 struct j1939_priv *priv = session->priv;
757 struct j1939_sk_buff_cb *skcb;
758 int offset, pkt_done, pkt_end;
759 unsigned int len, pdelay;
760 struct sk_buff *se_skb;
761 const u8 *tpdat;
762 int ret = 0;
763 u8 dat[8];
764
765 se_skb = j1939_session_skb_find(session);
766 if (!se_skb)
767 return -ENOBUFS;
768
769 skcb = j1939_skb_to_cb(se_skb);
770 tpdat = se_skb->data;
771 ret = 0;
772 pkt_done = 0;
773 if (session->skcb.addr.type != J1939_ETP &&
774 j1939_cb_is_broadcast(&session->skcb))
775 pkt_end = session->pkt.total;
776 else
777 pkt_end = session->pkt.last;
778
779 while (session->pkt.tx < pkt_end) {
780 dat[0] = session->pkt.tx - session->pkt.dpo + 1;
781 offset = (session->pkt.tx * 7) - skcb->offset;
782 len = se_skb->len - offset;
783 if (len > 7)
784 len = 7;
785
786 memcpy(&dat[1], &tpdat[offset], len);
787 ret = j1939_tp_tx_dat(session, dat, len + 1);
788 if (ret < 0) {
789 /* ENOBUS == CAN interface TX queue is full */
790 if (ret != -ENOBUFS)
791 netdev_alert(priv->ndev,
792 "%s: 0x%p: queue data error: %i\n",
793 __func__, session, ret);
794 break;
795 }
796
797 session->last_txcmd = 0xff;
798 pkt_done++;
799 session->pkt.tx++;
800 pdelay = j1939_cb_is_broadcast(&session->skcb) ? 50 :
801 j1939_tp_packet_delay;
802
803 if (session->pkt.tx < session->pkt.total && pdelay) {
804 j1939_tp_schedule_txtimer(session, pdelay);
805 break;
806 }
807 }
808
809 if (pkt_done)
810 j1939_tp_set_rxtimeout(session, 250);
811
812 return ret;
813}
814
815static int j1939_xtp_txnext_transmiter(struct j1939_session *session)
816{
817 struct j1939_priv *priv = session->priv;
818 int ret = 0;
819
820 if (!j1939_tp_im_transmitter(&session->skcb)) {
821 netdev_alert(priv->ndev, "%s: 0x%p: called by not transmitter!\n",
822 __func__, session);
823 return -EINVAL;
824 }
825
826 switch (session->last_cmd) {
827 case 0:
828 ret = j1939_session_tx_rts(session);
829 break;
830
831 case J1939_ETP_CMD_CTS:
832 if (session->last_txcmd != J1939_ETP_CMD_DPO) {
833 ret = j1939_session_tx_dpo(session);
834 if (ret)
835 return ret;
836 }
837
838 /* fall through */
839 case J1939_TP_CMD_CTS:
840 case 0xff: /* did some data */
841 case J1939_ETP_CMD_DPO:
842 case J1939_TP_CMD_BAM:
843 ret = j1939_session_tx_dat(session);
844
845 break;
846 default:
847 netdev_alert(priv->ndev, "%s: 0x%p: unexpected last_cmd: %x\n",
848 __func__, session, session->last_cmd);
849 }
850
851 return ret;
852}
853
854static int j1939_session_tx_cts(struct j1939_session *session)
855{
856 struct j1939_priv *priv = session->priv;
857 unsigned int pkt, len;
858 int ret;
859 u8 dat[8];
860
861 if (!j1939_sk_recv_match(priv, &session->skcb))
862 return -ENOENT;
863
864 len = session->pkt.total - session->pkt.rx;
865 len = min3(len, session->pkt.block, j1939_tp_block ?: 255);
866 memset(dat, 0xff, sizeof(dat));
867
868 if (session->skcb.addr.type == J1939_ETP) {
869 pkt = session->pkt.rx + 1;
870 dat[0] = J1939_ETP_CMD_CTS;
871 dat[1] = len;
872 dat[2] = (pkt >> 0);
873 dat[3] = (pkt >> 8);
874 dat[4] = (pkt >> 16);
875 } else {
876 dat[0] = J1939_TP_CMD_CTS;
877 dat[1] = len;
878 dat[2] = session->pkt.rx + 1;
879 }
880
881 if (dat[0] == session->last_txcmd)
882 /* done already */
883 return 0;
884
885 ret = j1939_tp_tx_ctl(session, true, dat);
886 if (ret < 0)
887 return ret;
888
889 if (len)
890 /* only mark cts done when len is set */
891 session->last_txcmd = dat[0];
892 j1939_tp_set_rxtimeout(session, 1250);
893
894 netdev_dbg(session->priv->ndev, "%s: 0x%p\n", __func__, session);
895
896 return 0;
897}
898
899static int j1939_session_tx_eoma(struct j1939_session *session)
900{
901 struct j1939_priv *priv = session->priv;
902 u8 dat[8];
903 int ret;
904
905 if (!j1939_sk_recv_match(priv, &session->skcb))
906 return -ENOENT;
907
908 memset(dat, 0xff, sizeof(dat));
909
910 if (session->skcb.addr.type == J1939_ETP) {
911 dat[0] = J1939_ETP_CMD_EOMA;
912 dat[1] = session->total_message_size >> 0;
913 dat[2] = session->total_message_size >> 8;
914 dat[3] = session->total_message_size >> 16;
915 dat[4] = session->total_message_size >> 24;
916 } else {
917 dat[0] = J1939_TP_CMD_EOMA;
918 dat[1] = session->total_message_size;
919 dat[2] = session->total_message_size >> 8;
920 dat[3] = session->pkt.total;
921 }
922
923 if (dat[0] == session->last_txcmd)
924 /* done already */
925 return 0;
926
927 ret = j1939_tp_tx_ctl(session, true, dat);
928 if (ret < 0)
929 return ret;
930
931 session->last_txcmd = dat[0];
932
933 /* wait for the EOMA packet to come in */
934 j1939_tp_set_rxtimeout(session, 1250);
935
936 netdev_dbg(session->priv->ndev, "%p: 0x%p\n", __func__, session);
937
938 return 0;
939}
940
941static int j1939_xtp_txnext_receiver(struct j1939_session *session)
942{
943 struct j1939_priv *priv = session->priv;
944 int ret = 0;
945
946 if (!j1939_tp_im_receiver(&session->skcb)) {
947 netdev_alert(priv->ndev, "%s: 0x%p: called by not receiver!\n",
948 __func__, session);
949 return -EINVAL;
950 }
951
952 switch (session->last_cmd) {
953 case J1939_TP_CMD_RTS:
954 case J1939_ETP_CMD_RTS:
955 ret = j1939_session_tx_cts(session);
956 break;
957
958 case J1939_ETP_CMD_CTS:
959 case J1939_TP_CMD_CTS:
960 case 0xff: /* did some data */
961 case J1939_ETP_CMD_DPO:
962 if ((session->skcb.addr.type == J1939_TP &&
963 j1939_cb_is_broadcast(&session->skcb)))
964 break;
965
966 if (session->pkt.rx >= session->pkt.total) {
967 ret = j1939_session_tx_eoma(session);
968 } else if (session->pkt.rx >= session->pkt.last) {
969 session->last_txcmd = 0;
970 ret = j1939_session_tx_cts(session);
971 }
972 break;
973 default:
974 netdev_alert(priv->ndev, "%s: 0x%p: unexpected last_cmd: %x\n",
975 __func__, session, session->last_cmd);
976 }
977
978 return ret;
979}
980
981static int j1939_simple_txnext(struct j1939_session *session)
982{
983 struct j1939_priv *priv = session->priv;
984 struct sk_buff *se_skb = j1939_session_skb_find(session);
985 struct sk_buff *skb;
986 int ret;
987
988 if (!se_skb)
989 return 0;
990
991 skb = skb_clone(se_skb, GFP_ATOMIC);
992 if (!skb)
993 return -ENOMEM;
994
995 can_skb_set_owner(skb, se_skb->sk);
996
997 j1939_tp_set_rxtimeout(session, J1939_SIMPLE_ECHO_TIMEOUT_MS);
998
999 ret = j1939_send_one(priv, skb);
1000 if (ret)
1001 return ret;
1002
1003 j1939_sk_errqueue(session, J1939_ERRQUEUE_SCHED);
1004 j1939_sk_queue_activate_next(session);
1005
1006 return 0;
1007}
1008
1009static bool j1939_session_deactivate_locked(struct j1939_session *session)
1010{
1011 bool active = false;
1012
1013 lockdep_assert_held(&session->priv->active_session_list_lock);
1014
1015 if (session->state >= J1939_SESSION_ACTIVE &&
1016 session->state < J1939_SESSION_ACTIVE_MAX) {
1017 active = true;
1018
1019 list_del_init(&session->active_session_list_entry);
1020 session->state = J1939_SESSION_DONE;
1021 j1939_session_put(session);
1022 }
1023
1024 return active;
1025}
1026
1027static bool j1939_session_deactivate(struct j1939_session *session)
1028{
1029 bool active;
1030
1031 j1939_session_list_lock(session->priv);
1032 active = j1939_session_deactivate_locked(session);
1033 j1939_session_list_unlock(session->priv);
1034
1035 return active;
1036}
1037
1038static void
1039j1939_session_deactivate_activate_next(struct j1939_session *session)
1040{
1041 if (j1939_session_deactivate(session))
1042 j1939_sk_queue_activate_next(session);
1043}
1044
1045static void j1939_session_cancel(struct j1939_session *session,
1046 enum j1939_xtp_abort err)
1047{
1048 struct j1939_priv *priv = session->priv;
1049
1050 WARN_ON_ONCE(!err);
1051
1052 session->err = j1939_xtp_abort_to_errno(priv, err);
1053 /* do not send aborts on incoming broadcasts */
1054 if (!j1939_cb_is_broadcast(&session->skcb)) {
1055 session->state = J1939_SESSION_WAITING_ABORT;
1056 j1939_xtp_tx_abort(priv, &session->skcb,
1057 !session->transmission,
1058 err, session->skcb.addr.pgn);
1059 }
1060
1061 if (session->sk)
1062 j1939_sk_send_loop_abort(session->sk, session->err);
1063}
1064
1065static enum hrtimer_restart j1939_tp_txtimer(struct hrtimer *hrtimer)
1066{
1067 struct j1939_session *session =
1068 container_of(hrtimer, struct j1939_session, txtimer);
1069 struct j1939_priv *priv = session->priv;
1070 int ret = 0;
1071
1072 if (session->skcb.addr.type == J1939_SIMPLE) {
1073 ret = j1939_simple_txnext(session);
1074 } else {
1075 if (session->transmission)
1076 ret = j1939_xtp_txnext_transmiter(session);
1077 else
1078 ret = j1939_xtp_txnext_receiver(session);
1079 }
1080
1081 switch (ret) {
1082 case -ENOBUFS:
1083 /* Retry limit is currently arbitrary chosen */
1084 if (session->tx_retry < J1939_XTP_TX_RETRY_LIMIT) {
1085 session->tx_retry++;
1086 j1939_tp_schedule_txtimer(session,
1087 10 + prandom_u32_max(16));
1088 } else {
1089 netdev_alert(priv->ndev, "%s: 0x%p: tx retry count reached\n",
1090 __func__, session);
1091 session->err = -ENETUNREACH;
1092 j1939_session_rxtimer_cancel(session);
1093 j1939_session_deactivate_activate_next(session);
1094 }
1095 break;
1096 case -ENETDOWN:
1097 /* In this case we should get a netdev_event(), all active
1098 * sessions will be cleared by
1099 * j1939_cancel_all_active_sessions(). So handle this as an
1100 * error, but let j1939_cancel_all_active_sessions() do the
1101 * cleanup including propagation of the error to user space.
1102 */
1103 break;
1104 case 0:
1105 session->tx_retry = 0;
1106 break;
1107 default:
1108 netdev_alert(priv->ndev, "%s: 0x%p: tx aborted with unknown reason: %i\n",
1109 __func__, session, ret);
1110 if (session->skcb.addr.type != J1939_SIMPLE) {
1111 j1939_tp_set_rxtimeout(session,
1112 J1939_XTP_ABORT_TIMEOUT_MS);
1113 j1939_session_cancel(session, J1939_XTP_ABORT_OTHER);
1114 } else {
1115 session->err = ret;
1116 j1939_session_rxtimer_cancel(session);
1117 j1939_session_deactivate_activate_next(session);
1118 }
1119 }
1120
1121 j1939_session_put(session);
1122
1123 return HRTIMER_NORESTART;
1124}
1125
1126static void j1939_session_completed(struct j1939_session *session)
1127{
1128 struct sk_buff *skb;
1129
1130 if (!session->transmission) {
1131 skb = j1939_session_skb_find(session);
1132 /* distribute among j1939 receivers */
1133 j1939_sk_recv(session->priv, skb);
1134 }
1135
1136 j1939_session_deactivate_activate_next(session);
1137}
1138
1139static enum hrtimer_restart j1939_tp_rxtimer(struct hrtimer *hrtimer)
1140{
1141 struct j1939_session *session = container_of(hrtimer,
1142 struct j1939_session,
1143 rxtimer);
1144 struct j1939_priv *priv = session->priv;
1145
1146 if (session->state == J1939_SESSION_WAITING_ABORT) {
1147 netdev_alert(priv->ndev, "%s: 0x%p: abort rx timeout. Force session deactivation\n",
1148 __func__, session);
1149
1150 j1939_session_deactivate_activate_next(session);
1151
1152 } else if (session->skcb.addr.type == J1939_SIMPLE) {
1153 netdev_alert(priv->ndev, "%s: 0x%p: Timeout. Failed to send simple message.\n",
1154 __func__, session);
1155
1156 /* The message is probably stuck in the CAN controller and can
1157 * be send as soon as CAN bus is in working state again.
1158 */
1159 session->err = -ETIME;
1160 j1939_session_deactivate(session);
1161 } else {
1162 netdev_alert(priv->ndev, "%s: 0x%p: rx timeout, send abort\n",
1163 __func__, session);
1164
1165 j1939_session_list_lock(session->priv);
1166 if (session->state >= J1939_SESSION_ACTIVE &&
1167 session->state < J1939_SESSION_ACTIVE_MAX) {
1168 j1939_session_get(session);
1169 hrtimer_start(&session->rxtimer,
1170 ms_to_ktime(J1939_XTP_ABORT_TIMEOUT_MS),
1171 HRTIMER_MODE_REL_SOFT);
1172 j1939_session_cancel(session, J1939_XTP_ABORT_TIMEOUT);
1173 }
1174 j1939_session_list_unlock(session->priv);
1175 }
1176
1177 j1939_session_put(session);
1178
1179 return HRTIMER_NORESTART;
1180}
1181
1182static bool j1939_xtp_rx_cmd_bad_pgn(struct j1939_session *session,
1183 const struct sk_buff *skb)
1184{
1185 const struct j1939_sk_buff_cb *skcb = j1939_skb_to_cb(skb);
1186 pgn_t pgn = j1939_xtp_ctl_to_pgn(skb->data);
1187 struct j1939_priv *priv = session->priv;
1188 enum j1939_xtp_abort abort = J1939_XTP_NO_ABORT;
1189 u8 cmd = skb->data[0];
1190
1191 if (session->skcb.addr.pgn == pgn)
1192 return false;
1193
1194 switch (cmd) {
1195 case J1939_TP_CMD_BAM:
1196 abort = J1939_XTP_NO_ABORT;
1197 break;
1198
1199 case J1939_ETP_CMD_RTS:
1200 case J1939_TP_CMD_RTS: /* fall through */
1201 abort = J1939_XTP_ABORT_BUSY;
1202 break;
1203
1204 case J1939_ETP_CMD_CTS:
1205 case J1939_TP_CMD_CTS: /* fall through */
1206 abort = J1939_XTP_ABORT_ECTS_UNXPECTED_PGN;
1207 break;
1208
1209 case J1939_ETP_CMD_DPO:
1210 abort = J1939_XTP_ABORT_BAD_EDPO_PGN;
1211 break;
1212
1213 case J1939_ETP_CMD_EOMA:
1214 case J1939_TP_CMD_EOMA: /* fall through */
1215 abort = J1939_XTP_ABORT_OTHER;
1216 break;
1217
1218 case J1939_ETP_CMD_ABORT: /* && J1939_TP_CMD_ABORT */
1219 abort = J1939_XTP_NO_ABORT;
1220 break;
1221
1222 default:
1223 WARN_ON_ONCE(1);
1224 break;
1225 }
1226
1227 netdev_warn(priv->ndev, "%s: 0x%p: CMD 0x%02x with PGN 0x%05x for running session with different PGN 0x%05x.\n",
1228 __func__, session, cmd, pgn, session->skcb.addr.pgn);
1229 if (abort != J1939_XTP_NO_ABORT)
1230 j1939_xtp_tx_abort(priv, skcb, true, abort, pgn);
1231
1232 return true;
1233}
1234
1235static void j1939_xtp_rx_abort_one(struct j1939_priv *priv, struct sk_buff *skb,
1236 bool reverse, bool transmitter)
1237{
1238 struct j1939_sk_buff_cb *skcb = j1939_skb_to_cb(skb);
1239 struct j1939_session *session;
1240 u8 abort = skb->data[1];
1241
1242 session = j1939_session_get_by_addr(priv, &skcb->addr, reverse,
1243 transmitter);
1244 if (!session)
1245 return;
1246
1247 if (j1939_xtp_rx_cmd_bad_pgn(session, skb))
1248 goto abort_put;
1249
1250 netdev_info(priv->ndev, "%s: 0x%p: 0x%05x: (%u) %s\n", __func__,
1251 session, j1939_xtp_ctl_to_pgn(skb->data), abort,
1252 j1939_xtp_abort_to_str(abort));
1253
1254 j1939_session_timers_cancel(session);
1255 session->err = j1939_xtp_abort_to_errno(priv, abort);
1256 if (session->sk)
1257 j1939_sk_send_loop_abort(session->sk, session->err);
1258 j1939_session_deactivate_activate_next(session);
1259
1260abort_put:
1261 j1939_session_put(session);
1262}
1263
1264/* abort packets may come in 2 directions */
1265static void
1266j1939_xtp_rx_abort(struct j1939_priv *priv, struct sk_buff *skb,
1267 bool transmitter)
1268{
1269 j1939_xtp_rx_abort_one(priv, skb, false, transmitter);
1270 j1939_xtp_rx_abort_one(priv, skb, true, transmitter);
1271}
1272
1273static void
1274j1939_xtp_rx_eoma_one(struct j1939_session *session, struct sk_buff *skb)
1275{
1276 if (j1939_xtp_rx_cmd_bad_pgn(session, skb))
1277 return;
1278
1279 netdev_dbg(session->priv->ndev, "%s: 0x%p\n", __func__, session);
1280
1281 session->pkt.tx_acked = session->pkt.total;
1282 j1939_session_timers_cancel(session);
1283 /* transmitted without problems */
1284 j1939_session_completed(session);
1285}
1286
1287static void
1288j1939_xtp_rx_eoma(struct j1939_priv *priv, struct sk_buff *skb,
1289 bool transmitter)
1290{
1291 struct j1939_sk_buff_cb *skcb = j1939_skb_to_cb(skb);
1292 struct j1939_session *session;
1293
1294 session = j1939_session_get_by_addr(priv, &skcb->addr, true,
1295 transmitter);
1296 if (!session)
1297 return;
1298
1299 j1939_xtp_rx_eoma_one(session, skb);
1300 j1939_session_put(session);
1301}
1302
1303static void
1304j1939_xtp_rx_cts_one(struct j1939_session *session, struct sk_buff *skb)
1305{
1306 enum j1939_xtp_abort err = J1939_XTP_ABORT_FAULT;
1307 unsigned int pkt;
1308 const u8 *dat;
1309
1310 dat = skb->data;
1311
1312 if (j1939_xtp_rx_cmd_bad_pgn(session, skb))
1313 return;
1314
1315 netdev_dbg(session->priv->ndev, "%s: 0x%p\n", __func__, session);
1316
1317 if (session->last_cmd == dat[0]) {
1318 err = J1939_XTP_ABORT_DUP_SEQ;
1319 goto out_session_cancel;
1320 }
1321
1322 if (session->skcb.addr.type == J1939_ETP)
1323 pkt = j1939_etp_ctl_to_packet(dat);
1324 else
1325 pkt = dat[2];
1326
1327 if (!pkt)
1328 goto out_session_cancel;
1329 else if (dat[1] > session->pkt.block /* 0xff for etp */)
1330 goto out_session_cancel;
1331
1332 /* set packet counters only when not CTS(0) */
1333 session->pkt.tx_acked = pkt - 1;
1334 j1939_session_skb_drop_old(session);
1335 session->pkt.last = session->pkt.tx_acked + dat[1];
1336 if (session->pkt.last > session->pkt.total)
1337 /* safety measure */
1338 session->pkt.last = session->pkt.total;
1339 /* TODO: do not set tx here, do it in txtimer */
1340 session->pkt.tx = session->pkt.tx_acked;
1341
1342 session->last_cmd = dat[0];
1343 if (dat[1]) {
1344 j1939_tp_set_rxtimeout(session, 1250);
1345 if (session->transmission) {
1346 if (session->pkt.tx_acked)
1347 j1939_sk_errqueue(session,
1348 J1939_ERRQUEUE_SCHED);
1349 j1939_session_txtimer_cancel(session);
1350 j1939_tp_schedule_txtimer(session, 0);
1351 }
1352 } else {
1353 /* CTS(0) */
1354 j1939_tp_set_rxtimeout(session, 550);
1355 }
1356 return;
1357
1358 out_session_cancel:
1359 j1939_session_timers_cancel(session);
1360 j1939_tp_set_rxtimeout(session, J1939_XTP_ABORT_TIMEOUT_MS);
1361 j1939_session_cancel(session, err);
1362}
1363
1364static void
1365j1939_xtp_rx_cts(struct j1939_priv *priv, struct sk_buff *skb, bool transmitter)
1366{
1367 struct j1939_sk_buff_cb *skcb = j1939_skb_to_cb(skb);
1368 struct j1939_session *session;
1369
1370 session = j1939_session_get_by_addr(priv, &skcb->addr, true,
1371 transmitter);
1372 if (!session)
1373 return;
1374 j1939_xtp_rx_cts_one(session, skb);
1375 j1939_session_put(session);
1376}
1377
1378static struct j1939_session *j1939_session_new(struct j1939_priv *priv,
1379 struct sk_buff *skb, size_t size)
1380{
1381 struct j1939_session *session;
1382 struct j1939_sk_buff_cb *skcb;
1383
1384 session = kzalloc(sizeof(*session), gfp_any());
1385 if (!session)
1386 return NULL;
1387
1388 INIT_LIST_HEAD(&session->active_session_list_entry);
1389 INIT_LIST_HEAD(&session->sk_session_queue_entry);
1390 kref_init(&session->kref);
1391
1392 j1939_priv_get(priv);
1393 session->priv = priv;
1394 session->total_message_size = size;
1395 session->state = J1939_SESSION_NEW;
1396
1397 skb_queue_head_init(&session->skb_queue);
1398 skb_queue_tail(&session->skb_queue, skb);
1399
1400 skcb = j1939_skb_to_cb(skb);
1401 memcpy(&session->skcb, skcb, sizeof(session->skcb));
1402
1403 hrtimer_init(&session->txtimer, CLOCK_MONOTONIC,
1404 HRTIMER_MODE_REL_SOFT);
1405 session->txtimer.function = j1939_tp_txtimer;
1406 hrtimer_init(&session->rxtimer, CLOCK_MONOTONIC,
1407 HRTIMER_MODE_REL_SOFT);
1408 session->rxtimer.function = j1939_tp_rxtimer;
1409
1410 netdev_dbg(priv->ndev, "%s: 0x%p: sa: %02x, da: %02x\n",
1411 __func__, session, skcb->addr.sa, skcb->addr.da);
1412
1413 return session;
1414}
1415
1416static struct
1417j1939_session *j1939_session_fresh_new(struct j1939_priv *priv,
1418 int size,
1419 const struct j1939_sk_buff_cb *rel_skcb)
1420{
1421 struct sk_buff *skb;
1422 struct j1939_sk_buff_cb *skcb;
1423 struct j1939_session *session;
1424
1425 skb = alloc_skb(size + sizeof(struct can_skb_priv), GFP_ATOMIC);
1426 if (unlikely(!skb))
1427 return NULL;
1428
1429 skb->dev = priv->ndev;
1430 can_skb_reserve(skb);
1431 can_skb_prv(skb)->ifindex = priv->ndev->ifindex;
1432 skcb = j1939_skb_to_cb(skb);
1433 memcpy(skcb, rel_skcb, sizeof(*skcb));
1434
1435 session = j1939_session_new(priv, skb, skb->len);
1436 if (!session) {
1437 kfree_skb(skb);
1438 return NULL;
1439 }
1440
1441 /* alloc data area */
1442 skb_put(skb, size);
1443 /* skb is recounted in j1939_session_new() */
1444 return session;
1445}
1446
1447int j1939_session_activate(struct j1939_session *session)
1448{
1449 struct j1939_priv *priv = session->priv;
1450 struct j1939_session *active = NULL;
1451 int ret = 0;
1452
1453 j1939_session_list_lock(priv);
1454 if (session->skcb.addr.type != J1939_SIMPLE)
1455 active = j1939_session_get_by_addr_locked(priv,
1456 &priv->active_session_list,
1457 &session->skcb.addr, false,
1458 session->transmission);
1459 if (active) {
1460 j1939_session_put(active);
1461 ret = -EAGAIN;
1462 } else {
1463 WARN_ON_ONCE(session->state != J1939_SESSION_NEW);
1464 list_add_tail(&session->active_session_list_entry,
1465 &priv->active_session_list);
1466 j1939_session_get(session);
1467 session->state = J1939_SESSION_ACTIVE;
1468
1469 netdev_dbg(session->priv->ndev, "%s: 0x%p\n",
1470 __func__, session);
1471 }
1472 j1939_session_list_unlock(priv);
1473
1474 return ret;
1475}
1476
1477static struct
1478j1939_session *j1939_xtp_rx_rts_session_new(struct j1939_priv *priv,
1479 struct sk_buff *skb)
1480{
1481 enum j1939_xtp_abort abort = J1939_XTP_NO_ABORT;
1482 struct j1939_sk_buff_cb skcb = *j1939_skb_to_cb(skb);
1483 struct j1939_session *session;
1484 const u8 *dat;
1485 pgn_t pgn;
1486 int len;
1487
1488 netdev_dbg(priv->ndev, "%s\n", __func__);
1489
1490 dat = skb->data;
1491 pgn = j1939_xtp_ctl_to_pgn(dat);
1492 skcb.addr.pgn = pgn;
1493
1494 if (!j1939_sk_recv_match(priv, &skcb))
1495 return NULL;
1496
1497 if (skcb.addr.type == J1939_ETP) {
1498 len = j1939_etp_ctl_to_size(dat);
1499 if (len > J1939_MAX_ETP_PACKET_SIZE)
1500 abort = J1939_XTP_ABORT_FAULT;
1501 else if (len > priv->tp_max_packet_size)
1502 abort = J1939_XTP_ABORT_RESOURCE;
1503 else if (len <= J1939_MAX_TP_PACKET_SIZE)
1504 abort = J1939_XTP_ABORT_FAULT;
1505 } else {
1506 len = j1939_tp_ctl_to_size(dat);
1507 if (len > J1939_MAX_TP_PACKET_SIZE)
1508 abort = J1939_XTP_ABORT_FAULT;
1509 else if (len > priv->tp_max_packet_size)
1510 abort = J1939_XTP_ABORT_RESOURCE;
1511 }
1512
1513 if (abort != J1939_XTP_NO_ABORT) {
1514 j1939_xtp_tx_abort(priv, &skcb, true, abort, pgn);
1515 return NULL;
1516 }
1517
1518 session = j1939_session_fresh_new(priv, len, &skcb);
1519 if (!session) {
1520 j1939_xtp_tx_abort(priv, &skcb, true,
1521 J1939_XTP_ABORT_RESOURCE, pgn);
1522 return NULL;
1523 }
1524
1525 /* initialize the control buffer: plain copy */
1526 session->pkt.total = (len + 6) / 7;
1527 session->pkt.block = 0xff;
1528 if (skcb.addr.type != J1939_ETP) {
1529 if (dat[3] != session->pkt.total)
1530 netdev_alert(priv->ndev, "%s: 0x%p: strange total, %u != %u\n",
1531 __func__, session, session->pkt.total,
1532 dat[3]);
1533 session->pkt.total = dat[3];
1534 session->pkt.block = min(dat[3], dat[4]);
1535 }
1536
1537 session->pkt.rx = 0;
1538 session->pkt.tx = 0;
1539
1540 WARN_ON_ONCE(j1939_session_activate(session));
1541
1542 return session;
1543}
1544
1545static int j1939_xtp_rx_rts_session_active(struct j1939_session *session,
1546 struct sk_buff *skb)
1547{
1548 struct j1939_sk_buff_cb *skcb = j1939_skb_to_cb(skb);
1549 struct j1939_priv *priv = session->priv;
1550
1551 if (!session->transmission) {
1552 if (j1939_xtp_rx_cmd_bad_pgn(session, skb))
1553 return -EBUSY;
1554
1555 /* RTS on active session */
1556 j1939_session_timers_cancel(session);
1557 j1939_tp_set_rxtimeout(session, J1939_XTP_ABORT_TIMEOUT_MS);
1558 j1939_session_cancel(session, J1939_XTP_ABORT_BUSY);
1559 }
1560
1561 if (session->last_cmd != 0) {
1562 /* we received a second rts on the same connection */
1563 netdev_alert(priv->ndev, "%s: 0x%p: connection exists (%02x %02x). last cmd: %x\n",
1564 __func__, session, skcb->addr.sa, skcb->addr.da,
1565 session->last_cmd);
1566
1567 j1939_session_timers_cancel(session);
1568 j1939_tp_set_rxtimeout(session, J1939_XTP_ABORT_TIMEOUT_MS);
1569 j1939_session_cancel(session, J1939_XTP_ABORT_BUSY);
1570
1571 return -EBUSY;
1572 }
1573
1574 if (session->skcb.addr.sa != skcb->addr.sa ||
1575 session->skcb.addr.da != skcb->addr.da)
1576 netdev_warn(priv->ndev, "%s: 0x%p: session->skcb.addr.sa=0x%02x skcb->addr.sa=0x%02x session->skcb.addr.da=0x%02x skcb->addr.da=0x%02x\n",
1577 __func__, session,
1578 session->skcb.addr.sa, skcb->addr.sa,
1579 session->skcb.addr.da, skcb->addr.da);
1580 /* make sure 'sa' & 'da' are correct !
1581 * They may be 'not filled in yet' for sending
1582 * skb's, since they did not pass the Address Claim ever.
1583 */
1584 session->skcb.addr.sa = skcb->addr.sa;
1585 session->skcb.addr.da = skcb->addr.da;
1586
1587 netdev_dbg(session->priv->ndev, "%s: 0x%p\n", __func__, session);
1588
1589 return 0;
1590}
1591
1592static void j1939_xtp_rx_rts(struct j1939_priv *priv, struct sk_buff *skb,
1593 bool transmitter)
1594{
1595 struct j1939_sk_buff_cb *skcb = j1939_skb_to_cb(skb);
1596 struct j1939_session *session;
1597 u8 cmd = skb->data[0];
1598
1599 session = j1939_session_get_by_addr(priv, &skcb->addr, false,
1600 transmitter);
1601
1602 if (!session) {
1603 if (transmitter) {
1604 /* If we're the transmitter and this function is called,
1605 * we received our own RTS. A session has already been
1606 * created.
1607 *
1608 * For some reasons however it might have been destroyed
1609 * already. So don't create a new one here (using
1610 * "j1939_xtp_rx_rts_session_new()") as this will be a
1611 * receiver session.
1612 *
1613 * The reasons the session is already destroyed might
1614 * be:
1615 * - user space closed socket was and the session was
1616 * aborted
1617 * - session was aborted due to external abort message
1618 */
1619 return;
1620 }
1621 session = j1939_xtp_rx_rts_session_new(priv, skb);
1622 if (!session)
1623 return;
1624 } else {
1625 if (j1939_xtp_rx_rts_session_active(session, skb)) {
1626 j1939_session_put(session);
1627 return;
1628 }
1629 }
1630 session->last_cmd = cmd;
1631
1632 j1939_tp_set_rxtimeout(session, 1250);
1633
1634 if (cmd != J1939_TP_CMD_BAM && !session->transmission) {
1635 j1939_session_txtimer_cancel(session);
1636 j1939_tp_schedule_txtimer(session, 0);
1637 }
1638
1639 j1939_session_put(session);
1640}
1641
1642static void j1939_xtp_rx_dpo_one(struct j1939_session *session,
1643 struct sk_buff *skb)
1644{
1645 const u8 *dat = skb->data;
1646
1647 if (j1939_xtp_rx_cmd_bad_pgn(session, skb))
1648 return;
1649
1650 netdev_dbg(session->priv->ndev, "%s: 0x%p\n", __func__, session);
1651
1652 /* transmitted without problems */
1653 session->pkt.dpo = j1939_etp_ctl_to_packet(skb->data);
1654 session->last_cmd = dat[0];
1655 j1939_tp_set_rxtimeout(session, 750);
1656}
1657
1658static void j1939_xtp_rx_dpo(struct j1939_priv *priv, struct sk_buff *skb,
1659 bool transmitter)
1660{
1661 struct j1939_sk_buff_cb *skcb = j1939_skb_to_cb(skb);
1662 struct j1939_session *session;
1663
1664 session = j1939_session_get_by_addr(priv, &skcb->addr, false,
1665 transmitter);
1666 if (!session) {
1667 netdev_info(priv->ndev,
1668 "%s: no connection found\n", __func__);
1669 return;
1670 }
1671
1672 j1939_xtp_rx_dpo_one(session, skb);
1673 j1939_session_put(session);
1674}
1675
1676static void j1939_xtp_rx_dat_one(struct j1939_session *session,
1677 struct sk_buff *skb)
1678{
1679 struct j1939_priv *priv = session->priv;
1680 struct j1939_sk_buff_cb *skcb;
1681 struct sk_buff *se_skb;
1682 const u8 *dat;
1683 u8 *tpdat;
1684 int offset;
1685 int nbytes;
1686 bool final = false;
1687 bool do_cts_eoma = false;
1688 int packet;
1689
1690 skcb = j1939_skb_to_cb(skb);
1691 dat = skb->data;
1692 if (skb->len <= 1)
1693 /* makes no sense */
1694 goto out_session_cancel;
1695
1696 switch (session->last_cmd) {
1697 case 0xff:
1698 break;
1699 case J1939_ETP_CMD_DPO:
1700 if (skcb->addr.type == J1939_ETP)
1701 break;
1702 /* fall through */
1703 case J1939_TP_CMD_BAM: /* fall through */
1704 case J1939_TP_CMD_CTS: /* fall through */
1705 if (skcb->addr.type != J1939_ETP)
1706 break;
1707 /* fall through */
1708 default:
1709 netdev_info(priv->ndev, "%s: 0x%p: last %02x\n", __func__,
1710 session, session->last_cmd);
1711 goto out_session_cancel;
1712 }
1713
1714 packet = (dat[0] - 1 + session->pkt.dpo);
1715 if (packet > session->pkt.total ||
1716 (session->pkt.rx + 1) > session->pkt.total) {
1717 netdev_info(priv->ndev, "%s: 0x%p: should have been completed\n",
1718 __func__, session);
1719 goto out_session_cancel;
1720 }
1721 se_skb = j1939_session_skb_find(session);
1722 if (!se_skb) {
1723 netdev_warn(priv->ndev, "%s: 0x%p: no skb found\n", __func__,
1724 session);
1725 goto out_session_cancel;
1726 }
1727
1728 skcb = j1939_skb_to_cb(se_skb);
1729 offset = packet * 7 - skcb->offset;
1730 nbytes = se_skb->len - offset;
1731 if (nbytes > 7)
1732 nbytes = 7;
1733 if (nbytes <= 0 || (nbytes + 1) > skb->len) {
1734 netdev_info(priv->ndev, "%s: 0x%p: nbytes %i, len %i\n",
1735 __func__, session, nbytes, skb->len);
1736 goto out_session_cancel;
1737 }
1738
1739 tpdat = se_skb->data;
1740 memcpy(&tpdat[offset], &dat[1], nbytes);
1741 if (packet == session->pkt.rx)
1742 session->pkt.rx++;
1743
1744 if (skcb->addr.type != J1939_ETP &&
1745 j1939_cb_is_broadcast(&session->skcb)) {
1746 if (session->pkt.rx >= session->pkt.total)
1747 final = true;
1748 } else {
1749 /* never final, an EOMA must follow */
1750 if (session->pkt.rx >= session->pkt.last)
1751 do_cts_eoma = true;
1752 }
1753
1754 if (final) {
1755 j1939_session_completed(session);
1756 } else if (do_cts_eoma) {
1757 j1939_tp_set_rxtimeout(session, 1250);
1758 if (!session->transmission)
1759 j1939_tp_schedule_txtimer(session, 0);
1760 } else {
1761 j1939_tp_set_rxtimeout(session, 250);
1762 }
1763 session->last_cmd = 0xff;
1764 j1939_session_put(session);
1765
1766 return;
1767
1768 out_session_cancel:
1769 j1939_session_timers_cancel(session);
1770 j1939_tp_set_rxtimeout(session, J1939_XTP_ABORT_TIMEOUT_MS);
1771 j1939_session_cancel(session, J1939_XTP_ABORT_FAULT);
1772 j1939_session_put(session);
1773}
1774
1775static void j1939_xtp_rx_dat(struct j1939_priv *priv, struct sk_buff *skb)
1776{
1777 struct j1939_sk_buff_cb *skcb;
1778 struct j1939_session *session;
1779
1780 skcb = j1939_skb_to_cb(skb);
1781
1782 if (j1939_tp_im_transmitter(skcb)) {
1783 session = j1939_session_get_by_addr(priv, &skcb->addr, false,
1784 true);
1785 if (!session)
1786 netdev_info(priv->ndev, "%s: no tx connection found\n",
1787 __func__);
1788 else
1789 j1939_xtp_rx_dat_one(session, skb);
1790 }
1791
1792 if (j1939_tp_im_receiver(skcb)) {
1793 session = j1939_session_get_by_addr(priv, &skcb->addr, false,
1794 false);
1795 if (!session)
1796 netdev_info(priv->ndev, "%s: no rx connection found\n",
1797 __func__);
1798 else
1799 j1939_xtp_rx_dat_one(session, skb);
1800 }
1801}
1802
1803/* j1939 main intf */
1804struct j1939_session *j1939_tp_send(struct j1939_priv *priv,
1805 struct sk_buff *skb, size_t size)
1806{
1807 struct j1939_sk_buff_cb *skcb = j1939_skb_to_cb(skb);
1808 struct j1939_session *session;
1809 int ret;
1810
1811 if (skcb->addr.pgn == J1939_TP_PGN_DAT ||
1812 skcb->addr.pgn == J1939_TP_PGN_CTL ||
1813 skcb->addr.pgn == J1939_ETP_PGN_DAT ||
1814 skcb->addr.pgn == J1939_ETP_PGN_CTL)
1815 /* avoid conflict */
1816 return ERR_PTR(-EDOM);
1817
1818 if (size > priv->tp_max_packet_size)
1819 return ERR_PTR(-EMSGSIZE);
1820
1821 if (size <= 8)
1822 skcb->addr.type = J1939_SIMPLE;
1823 else if (size > J1939_MAX_TP_PACKET_SIZE)
1824 skcb->addr.type = J1939_ETP;
1825 else
1826 skcb->addr.type = J1939_TP;
1827
1828 if (skcb->addr.type == J1939_ETP &&
1829 j1939_cb_is_broadcast(skcb))
1830 return ERR_PTR(-EDESTADDRREQ);
1831
1832 /* fill in addresses from names */
1833 ret = j1939_ac_fixup(priv, skb);
1834 if (unlikely(ret))
1835 return ERR_PTR(ret);
1836
1837 /* fix DST flags, it may be used there soon */
1838 if (j1939_address_is_unicast(skcb->addr.da) &&
1839 priv->ents[skcb->addr.da].nusers)
1840 skcb->flags |= J1939_ECU_LOCAL_DST;
1841
1842 /* src is always local, I'm sending ... */
1843 skcb->flags |= J1939_ECU_LOCAL_SRC;
1844
1845 /* prepare new session */
1846 session = j1939_session_new(priv, skb, size);
1847 if (!session)
1848 return ERR_PTR(-ENOMEM);
1849
1850 /* skb is recounted in j1939_session_new() */
1851 session->sk = skb->sk;
1852 session->transmission = true;
1853 session->pkt.total = (size + 6) / 7;
1854 session->pkt.block = skcb->addr.type == J1939_ETP ? 255 :
1855 min(j1939_tp_block ?: 255, session->pkt.total);
1856
1857 if (j1939_cb_is_broadcast(&session->skcb))
1858 /* set the end-packet for broadcast */
1859 session->pkt.last = session->pkt.total;
1860
1861 skcb->tskey = session->sk->sk_tskey++;
1862 session->tskey = skcb->tskey;
1863
1864 return session;
1865}
1866
1867static void j1939_tp_cmd_recv(struct j1939_priv *priv, struct sk_buff *skb)
1868{
1869 struct j1939_sk_buff_cb *skcb = j1939_skb_to_cb(skb);
1870 int extd = J1939_TP;
1871 u8 cmd = skb->data[0];
1872
1873 switch (cmd) {
1874 case J1939_ETP_CMD_RTS:
1875 extd = J1939_ETP;
1876 /* fall through */
1877 case J1939_TP_CMD_BAM: /* fall through */
1878 case J1939_TP_CMD_RTS: /* fall through */
1879 if (skcb->addr.type != extd)
1880 return;
1881
1882 if (cmd == J1939_TP_CMD_RTS && j1939_cb_is_broadcast(skcb)) {
1883 netdev_alert(priv->ndev, "%s: rts without destination (%02x)\n",
1884 __func__, skcb->addr.sa);
1885 return;
1886 }
1887
1888 if (j1939_tp_im_transmitter(skcb))
1889 j1939_xtp_rx_rts(priv, skb, true);
1890
1891 if (j1939_tp_im_receiver(skcb))
1892 j1939_xtp_rx_rts(priv, skb, false);
1893
1894 break;
1895
1896 case J1939_ETP_CMD_CTS:
1897 extd = J1939_ETP;
1898 /* fall through */
1899 case J1939_TP_CMD_CTS:
1900 if (skcb->addr.type != extd)
1901 return;
1902
1903 if (j1939_tp_im_transmitter(skcb))
1904 j1939_xtp_rx_cts(priv, skb, false);
1905
1906 if (j1939_tp_im_receiver(skcb))
1907 j1939_xtp_rx_cts(priv, skb, true);
1908
1909 break;
1910
1911 case J1939_ETP_CMD_DPO:
1912 if (skcb->addr.type != J1939_ETP)
1913 return;
1914
1915 if (j1939_tp_im_transmitter(skcb))
1916 j1939_xtp_rx_dpo(priv, skb, true);
1917
1918 if (j1939_tp_im_receiver(skcb))
1919 j1939_xtp_rx_dpo(priv, skb, false);
1920
1921 break;
1922
1923 case J1939_ETP_CMD_EOMA:
1924 extd = J1939_ETP;
1925 /* fall through */
1926 case J1939_TP_CMD_EOMA:
1927 if (skcb->addr.type != extd)
1928 return;
1929
1930 if (j1939_tp_im_transmitter(skcb))
1931 j1939_xtp_rx_eoma(priv, skb, false);
1932
1933 if (j1939_tp_im_receiver(skcb))
1934 j1939_xtp_rx_eoma(priv, skb, true);
1935
1936 break;
1937
1938 case J1939_ETP_CMD_ABORT: /* && J1939_TP_CMD_ABORT */
1939 if (j1939_tp_im_transmitter(skcb))
1940 j1939_xtp_rx_abort(priv, skb, true);
1941
1942 if (j1939_tp_im_receiver(skcb))
1943 j1939_xtp_rx_abort(priv, skb, false);
1944
1945 break;
1946 default:
1947 return;
1948 }
1949}
1950
1951int j1939_tp_recv(struct j1939_priv *priv, struct sk_buff *skb)
1952{
1953 struct j1939_sk_buff_cb *skcb = j1939_skb_to_cb(skb);
1954
1955 if (!j1939_tp_im_involved_anydir(skcb))
1956 return 0;
1957
1958 switch (skcb->addr.pgn) {
1959 case J1939_ETP_PGN_DAT:
1960 skcb->addr.type = J1939_ETP;
1961 /* fall through */
1962 case J1939_TP_PGN_DAT:
1963 j1939_xtp_rx_dat(priv, skb);
1964 break;
1965
1966 case J1939_ETP_PGN_CTL:
1967 skcb->addr.type = J1939_ETP;
1968 /* fall through */
1969 case J1939_TP_PGN_CTL:
1970 if (skb->len < 8)
1971 return 0; /* Don't care. Nothing to extract here */
1972
1973 j1939_tp_cmd_recv(priv, skb);
1974 break;
1975 default:
1976 return 0; /* no problem */
1977 }
1978 return 1; /* "I processed the message" */
1979}
1980
1981void j1939_simple_recv(struct j1939_priv *priv, struct sk_buff *skb)
1982{
1983 struct j1939_session *session;
1984
1985 if (!skb->sk)
1986 return;
1987
1988 j1939_session_list_lock(priv);
1989 session = j1939_session_get_simple(priv, skb);
1990 j1939_session_list_unlock(priv);
1991 if (!session) {
1992 netdev_warn(priv->ndev,
1993 "%s: Received already invalidated message\n",
1994 __func__);
1995 return;
1996 }
1997
1998 j1939_session_timers_cancel(session);
1999 j1939_session_deactivate(session);
2000 j1939_session_put(session);
2001}
2002
2003int j1939_cancel_active_session(struct j1939_priv *priv, struct sock *sk)
2004{
2005 struct j1939_session *session, *saved;
2006
2007 netdev_dbg(priv->ndev, "%s, sk: %p\n", __func__, sk);
2008 j1939_session_list_lock(priv);
2009 list_for_each_entry_safe(session, saved,
2010 &priv->active_session_list,
2011 active_session_list_entry) {
2012 if (!sk || sk == session->sk) {
2013 j1939_session_timers_cancel(session);
2014 session->err = ESHUTDOWN;
2015 j1939_session_deactivate_locked(session);
2016 }
2017 }
2018 j1939_session_list_unlock(priv);
2019 return NOTIFY_DONE;
2020}
2021
2022void j1939_tp_init(struct j1939_priv *priv)
2023{
2024 spin_lock_init(&priv->active_session_list_lock);
2025 INIT_LIST_HEAD(&priv->active_session_list);
2026 priv->tp_max_packet_size = J1939_MAX_ETP_PACKET_SIZE;
2027}
diff --git a/net/can/proc.c b/net/can/proc.c
index edb822c31902..e6881bfc3ed1 100644
--- a/net/can/proc.c
+++ b/net/can/proc.c
@@ -45,6 +45,7 @@
45#include <linux/list.h> 45#include <linux/list.h>
46#include <linux/rcupdate.h> 46#include <linux/rcupdate.h>
47#include <linux/if_arp.h> 47#include <linux/if_arp.h>
48#include <linux/can/can-ml.h>
48#include <linux/can/core.h> 49#include <linux/can/core.h>
49 50
50#include "af_can.h" 51#include "af_can.h"
@@ -78,21 +79,21 @@ static const char rx_list_name[][8] = {
78 79
79static void can_init_stats(struct net *net) 80static void can_init_stats(struct net *net)
80{ 81{
81 struct s_stats *can_stats = net->can.can_stats; 82 struct can_pkg_stats *pkg_stats = net->can.pkg_stats;
82 struct s_pstats *can_pstats = net->can.can_pstats; 83 struct can_rcv_lists_stats *rcv_lists_stats = net->can.rcv_lists_stats;
83 /* 84 /*
84 * This memset function is called from a timer context (when 85 * This memset function is called from a timer context (when
85 * can_stattimer is active which is the default) OR in a process 86 * can_stattimer is active which is the default) OR in a process
86 * context (reading the proc_fs when can_stattimer is disabled). 87 * context (reading the proc_fs when can_stattimer is disabled).
87 */ 88 */
88 memset(can_stats, 0, sizeof(struct s_stats)); 89 memset(pkg_stats, 0, sizeof(struct can_pkg_stats));
89 can_stats->jiffies_init = jiffies; 90 pkg_stats->jiffies_init = jiffies;
90 91
91 can_pstats->stats_reset++; 92 rcv_lists_stats->stats_reset++;
92 93
93 if (user_reset) { 94 if (user_reset) {
94 user_reset = 0; 95 user_reset = 0;
95 can_pstats->user_reset++; 96 rcv_lists_stats->user_reset++;
96 } 97 }
97} 98}
98 99
@@ -118,8 +119,8 @@ static unsigned long calc_rate(unsigned long oldjif, unsigned long newjif,
118 119
119void can_stat_update(struct timer_list *t) 120void can_stat_update(struct timer_list *t)
120{ 121{
121 struct net *net = from_timer(net, t, can.can_stattimer); 122 struct net *net = from_timer(net, t, can.stattimer);
122 struct s_stats *can_stats = net->can.can_stats; 123 struct can_pkg_stats *pkg_stats = net->can.pkg_stats;
123 unsigned long j = jiffies; /* snapshot */ 124 unsigned long j = jiffies; /* snapshot */
124 125
125 /* restart counting in timer context on user request */ 126 /* restart counting in timer context on user request */
@@ -127,57 +128,57 @@ void can_stat_update(struct timer_list *t)
127 can_init_stats(net); 128 can_init_stats(net);
128 129
129 /* restart counting on jiffies overflow */ 130 /* restart counting on jiffies overflow */
130 if (j < can_stats->jiffies_init) 131 if (j < pkg_stats->jiffies_init)
131 can_init_stats(net); 132 can_init_stats(net);
132 133
133 /* prevent overflow in calc_rate() */ 134 /* prevent overflow in calc_rate() */
134 if (can_stats->rx_frames > (ULONG_MAX / HZ)) 135 if (pkg_stats->rx_frames > (ULONG_MAX / HZ))
135 can_init_stats(net); 136 can_init_stats(net);
136 137
137 /* prevent overflow in calc_rate() */ 138 /* prevent overflow in calc_rate() */
138 if (can_stats->tx_frames > (ULONG_MAX / HZ)) 139 if (pkg_stats->tx_frames > (ULONG_MAX / HZ))
139 can_init_stats(net); 140 can_init_stats(net);
140 141
141 /* matches overflow - very improbable */ 142 /* matches overflow - very improbable */
142 if (can_stats->matches > (ULONG_MAX / 100)) 143 if (pkg_stats->matches > (ULONG_MAX / 100))
143 can_init_stats(net); 144 can_init_stats(net);
144 145
145 /* calc total values */ 146 /* calc total values */
146 if (can_stats->rx_frames) 147 if (pkg_stats->rx_frames)
147 can_stats->total_rx_match_ratio = (can_stats->matches * 100) / 148 pkg_stats->total_rx_match_ratio = (pkg_stats->matches * 100) /
148 can_stats->rx_frames; 149 pkg_stats->rx_frames;
149 150
150 can_stats->total_tx_rate = calc_rate(can_stats->jiffies_init, j, 151 pkg_stats->total_tx_rate = calc_rate(pkg_stats->jiffies_init, j,
151 can_stats->tx_frames); 152 pkg_stats->tx_frames);
152 can_stats->total_rx_rate = calc_rate(can_stats->jiffies_init, j, 153 pkg_stats->total_rx_rate = calc_rate(pkg_stats->jiffies_init, j,
153 can_stats->rx_frames); 154 pkg_stats->rx_frames);
154 155
155 /* calc current values */ 156 /* calc current values */
156 if (can_stats->rx_frames_delta) 157 if (pkg_stats->rx_frames_delta)
157 can_stats->current_rx_match_ratio = 158 pkg_stats->current_rx_match_ratio =
158 (can_stats->matches_delta * 100) / 159 (pkg_stats->matches_delta * 100) /
159 can_stats->rx_frames_delta; 160 pkg_stats->rx_frames_delta;
160 161
161 can_stats->current_tx_rate = calc_rate(0, HZ, can_stats->tx_frames_delta); 162 pkg_stats->current_tx_rate = calc_rate(0, HZ, pkg_stats->tx_frames_delta);
162 can_stats->current_rx_rate = calc_rate(0, HZ, can_stats->rx_frames_delta); 163 pkg_stats->current_rx_rate = calc_rate(0, HZ, pkg_stats->rx_frames_delta);
163 164
164 /* check / update maximum values */ 165 /* check / update maximum values */
165 if (can_stats->max_tx_rate < can_stats->current_tx_rate) 166 if (pkg_stats->max_tx_rate < pkg_stats->current_tx_rate)
166 can_stats->max_tx_rate = can_stats->current_tx_rate; 167 pkg_stats->max_tx_rate = pkg_stats->current_tx_rate;
167 168
168 if (can_stats->max_rx_rate < can_stats->current_rx_rate) 169 if (pkg_stats->max_rx_rate < pkg_stats->current_rx_rate)
169 can_stats->max_rx_rate = can_stats->current_rx_rate; 170 pkg_stats->max_rx_rate = pkg_stats->current_rx_rate;
170 171
171 if (can_stats->max_rx_match_ratio < can_stats->current_rx_match_ratio) 172 if (pkg_stats->max_rx_match_ratio < pkg_stats->current_rx_match_ratio)
172 can_stats->max_rx_match_ratio = can_stats->current_rx_match_ratio; 173 pkg_stats->max_rx_match_ratio = pkg_stats->current_rx_match_ratio;
173 174
174 /* clear values for 'current rate' calculation */ 175 /* clear values for 'current rate' calculation */
175 can_stats->tx_frames_delta = 0; 176 pkg_stats->tx_frames_delta = 0;
176 can_stats->rx_frames_delta = 0; 177 pkg_stats->rx_frames_delta = 0;
177 can_stats->matches_delta = 0; 178 pkg_stats->matches_delta = 0;
178 179
179 /* restart timer (one second) */ 180 /* restart timer (one second) */
180 mod_timer(&net->can.can_stattimer, round_jiffies(jiffies + HZ)); 181 mod_timer(&net->can.stattimer, round_jiffies(jiffies + HZ));
181} 182}
182 183
183/* 184/*
@@ -212,60 +213,60 @@ static void can_print_recv_banner(struct seq_file *m)
212static int can_stats_proc_show(struct seq_file *m, void *v) 213static int can_stats_proc_show(struct seq_file *m, void *v)
213{ 214{
214 struct net *net = m->private; 215 struct net *net = m->private;
215 struct s_stats *can_stats = net->can.can_stats; 216 struct can_pkg_stats *pkg_stats = net->can.pkg_stats;
216 struct s_pstats *can_pstats = net->can.can_pstats; 217 struct can_rcv_lists_stats *rcv_lists_stats = net->can.rcv_lists_stats;
217 218
218 seq_putc(m, '\n'); 219 seq_putc(m, '\n');
219 seq_printf(m, " %8ld transmitted frames (TXF)\n", can_stats->tx_frames); 220 seq_printf(m, " %8ld transmitted frames (TXF)\n", pkg_stats->tx_frames);
220 seq_printf(m, " %8ld received frames (RXF)\n", can_stats->rx_frames); 221 seq_printf(m, " %8ld received frames (RXF)\n", pkg_stats->rx_frames);
221 seq_printf(m, " %8ld matched frames (RXMF)\n", can_stats->matches); 222 seq_printf(m, " %8ld matched frames (RXMF)\n", pkg_stats->matches);
222 223
223 seq_putc(m, '\n'); 224 seq_putc(m, '\n');
224 225
225 if (net->can.can_stattimer.function == can_stat_update) { 226 if (net->can.stattimer.function == can_stat_update) {
226 seq_printf(m, " %8ld %% total match ratio (RXMR)\n", 227 seq_printf(m, " %8ld %% total match ratio (RXMR)\n",
227 can_stats->total_rx_match_ratio); 228 pkg_stats->total_rx_match_ratio);
228 229
229 seq_printf(m, " %8ld frames/s total tx rate (TXR)\n", 230 seq_printf(m, " %8ld frames/s total tx rate (TXR)\n",
230 can_stats->total_tx_rate); 231 pkg_stats->total_tx_rate);
231 seq_printf(m, " %8ld frames/s total rx rate (RXR)\n", 232 seq_printf(m, " %8ld frames/s total rx rate (RXR)\n",
232 can_stats->total_rx_rate); 233 pkg_stats->total_rx_rate);
233 234
234 seq_putc(m, '\n'); 235 seq_putc(m, '\n');
235 236
236 seq_printf(m, " %8ld %% current match ratio (CRXMR)\n", 237 seq_printf(m, " %8ld %% current match ratio (CRXMR)\n",
237 can_stats->current_rx_match_ratio); 238 pkg_stats->current_rx_match_ratio);
238 239
239 seq_printf(m, " %8ld frames/s current tx rate (CTXR)\n", 240 seq_printf(m, " %8ld frames/s current tx rate (CTXR)\n",
240 can_stats->current_tx_rate); 241 pkg_stats->current_tx_rate);
241 seq_printf(m, " %8ld frames/s current rx rate (CRXR)\n", 242 seq_printf(m, " %8ld frames/s current rx rate (CRXR)\n",
242 can_stats->current_rx_rate); 243 pkg_stats->current_rx_rate);
243 244
244 seq_putc(m, '\n'); 245 seq_putc(m, '\n');
245 246
246 seq_printf(m, " %8ld %% max match ratio (MRXMR)\n", 247 seq_printf(m, " %8ld %% max match ratio (MRXMR)\n",
247 can_stats->max_rx_match_ratio); 248 pkg_stats->max_rx_match_ratio);
248 249
249 seq_printf(m, " %8ld frames/s max tx rate (MTXR)\n", 250 seq_printf(m, " %8ld frames/s max tx rate (MTXR)\n",
250 can_stats->max_tx_rate); 251 pkg_stats->max_tx_rate);
251 seq_printf(m, " %8ld frames/s max rx rate (MRXR)\n", 252 seq_printf(m, " %8ld frames/s max rx rate (MRXR)\n",
252 can_stats->max_rx_rate); 253 pkg_stats->max_rx_rate);
253 254
254 seq_putc(m, '\n'); 255 seq_putc(m, '\n');
255 } 256 }
256 257
257 seq_printf(m, " %8ld current receive list entries (CRCV)\n", 258 seq_printf(m, " %8ld current receive list entries (CRCV)\n",
258 can_pstats->rcv_entries); 259 rcv_lists_stats->rcv_entries);
259 seq_printf(m, " %8ld maximum receive list entries (MRCV)\n", 260 seq_printf(m, " %8ld maximum receive list entries (MRCV)\n",
260 can_pstats->rcv_entries_max); 261 rcv_lists_stats->rcv_entries_max);
261 262
262 if (can_pstats->stats_reset) 263 if (rcv_lists_stats->stats_reset)
263 seq_printf(m, "\n %8ld statistic resets (STR)\n", 264 seq_printf(m, "\n %8ld statistic resets (STR)\n",
264 can_pstats->stats_reset); 265 rcv_lists_stats->stats_reset);
265 266
266 if (can_pstats->user_reset) 267 if (rcv_lists_stats->user_reset)
267 seq_printf(m, " %8ld user statistic resets (USTR)\n", 268 seq_printf(m, " %8ld user statistic resets (USTR)\n",
268 can_pstats->user_reset); 269 rcv_lists_stats->user_reset);
269 270
270 seq_putc(m, '\n'); 271 seq_putc(m, '\n');
271 return 0; 272 return 0;
@@ -274,20 +275,20 @@ static int can_stats_proc_show(struct seq_file *m, void *v)
274static int can_reset_stats_proc_show(struct seq_file *m, void *v) 275static int can_reset_stats_proc_show(struct seq_file *m, void *v)
275{ 276{
276 struct net *net = m->private; 277 struct net *net = m->private;
277 struct s_pstats *can_pstats = net->can.can_pstats; 278 struct can_rcv_lists_stats *rcv_lists_stats = net->can.rcv_lists_stats;
278 struct s_stats *can_stats = net->can.can_stats; 279 struct can_pkg_stats *pkg_stats = net->can.pkg_stats;
279 280
280 user_reset = 1; 281 user_reset = 1;
281 282
282 if (net->can.can_stattimer.function == can_stat_update) { 283 if (net->can.stattimer.function == can_stat_update) {
283 seq_printf(m, "Scheduled statistic reset #%ld.\n", 284 seq_printf(m, "Scheduled statistic reset #%ld.\n",
284 can_pstats->stats_reset + 1); 285 rcv_lists_stats->stats_reset + 1);
285 } else { 286 } else {
286 if (can_stats->jiffies_init != jiffies) 287 if (pkg_stats->jiffies_init != jiffies)
287 can_init_stats(net); 288 can_init_stats(net);
288 289
289 seq_printf(m, "Performed statistic reset #%ld.\n", 290 seq_printf(m, "Performed statistic reset #%ld.\n",
290 can_pstats->stats_reset); 291 rcv_lists_stats->stats_reset);
291 } 292 }
292 return 0; 293 return 0;
293} 294}
@@ -300,11 +301,11 @@ static int can_version_proc_show(struct seq_file *m, void *v)
300 301
301static inline void can_rcvlist_proc_show_one(struct seq_file *m, int idx, 302static inline void can_rcvlist_proc_show_one(struct seq_file *m, int idx,
302 struct net_device *dev, 303 struct net_device *dev,
303 struct can_dev_rcv_lists *d) 304 struct can_dev_rcv_lists *dev_rcv_lists)
304{ 305{
305 if (!hlist_empty(&d->rx[idx])) { 306 if (!hlist_empty(&dev_rcv_lists->rx[idx])) {
306 can_print_recv_banner(m); 307 can_print_recv_banner(m);
307 can_print_rcvlist(m, &d->rx[idx], dev); 308 can_print_rcvlist(m, &dev_rcv_lists->rx[idx], dev);
308 } else 309 } else
309 seq_printf(m, " (%s: no entry)\n", DNAME(dev)); 310 seq_printf(m, " (%s: no entry)\n", DNAME(dev));
310 311
@@ -315,7 +316,7 @@ static int can_rcvlist_proc_show(struct seq_file *m, void *v)
315 /* double cast to prevent GCC warning */ 316 /* double cast to prevent GCC warning */
316 int idx = (int)(long)PDE_DATA(m->file->f_inode); 317 int idx = (int)(long)PDE_DATA(m->file->f_inode);
317 struct net_device *dev; 318 struct net_device *dev;
318 struct can_dev_rcv_lists *d; 319 struct can_dev_rcv_lists *dev_rcv_lists;
319 struct net *net = m->private; 320 struct net *net = m->private;
320 321
321 seq_printf(m, "\nreceive list '%s':\n", rx_list_name[idx]); 322 seq_printf(m, "\nreceive list '%s':\n", rx_list_name[idx]);
@@ -323,8 +324,8 @@ static int can_rcvlist_proc_show(struct seq_file *m, void *v)
323 rcu_read_lock(); 324 rcu_read_lock();
324 325
325 /* receive list for 'all' CAN devices (dev == NULL) */ 326 /* receive list for 'all' CAN devices (dev == NULL) */
326 d = net->can.can_rx_alldev_list; 327 dev_rcv_lists = net->can.rx_alldev_list;
327 can_rcvlist_proc_show_one(m, idx, NULL, d); 328 can_rcvlist_proc_show_one(m, idx, NULL, dev_rcv_lists);
328 329
329 /* receive list for registered CAN devices */ 330 /* receive list for registered CAN devices */
330 for_each_netdev_rcu(net, dev) { 331 for_each_netdev_rcu(net, dev) {
@@ -366,7 +367,7 @@ static inline void can_rcvlist_proc_show_array(struct seq_file *m,
366static int can_rcvlist_sff_proc_show(struct seq_file *m, void *v) 367static int can_rcvlist_sff_proc_show(struct seq_file *m, void *v)
367{ 368{
368 struct net_device *dev; 369 struct net_device *dev;
369 struct can_dev_rcv_lists *d; 370 struct can_dev_rcv_lists *dev_rcv_lists;
370 struct net *net = m->private; 371 struct net *net = m->private;
371 372
372 /* RX_SFF */ 373 /* RX_SFF */
@@ -375,15 +376,16 @@ static int can_rcvlist_sff_proc_show(struct seq_file *m, void *v)
375 rcu_read_lock(); 376 rcu_read_lock();
376 377
377 /* sff receive list for 'all' CAN devices (dev == NULL) */ 378 /* sff receive list for 'all' CAN devices (dev == NULL) */
378 d = net->can.can_rx_alldev_list; 379 dev_rcv_lists = net->can.rx_alldev_list;
379 can_rcvlist_proc_show_array(m, NULL, d->rx_sff, ARRAY_SIZE(d->rx_sff)); 380 can_rcvlist_proc_show_array(m, NULL, dev_rcv_lists->rx_sff,
381 ARRAY_SIZE(dev_rcv_lists->rx_sff));
380 382
381 /* sff receive list for registered CAN devices */ 383 /* sff receive list for registered CAN devices */
382 for_each_netdev_rcu(net, dev) { 384 for_each_netdev_rcu(net, dev) {
383 if (dev->type == ARPHRD_CAN && dev->ml_priv) { 385 if (dev->type == ARPHRD_CAN && dev->ml_priv) {
384 d = dev->ml_priv; 386 dev_rcv_lists = dev->ml_priv;
385 can_rcvlist_proc_show_array(m, dev, d->rx_sff, 387 can_rcvlist_proc_show_array(m, dev, dev_rcv_lists->rx_sff,
386 ARRAY_SIZE(d->rx_sff)); 388 ARRAY_SIZE(dev_rcv_lists->rx_sff));
387 } 389 }
388 } 390 }
389 391
@@ -396,7 +398,7 @@ static int can_rcvlist_sff_proc_show(struct seq_file *m, void *v)
396static int can_rcvlist_eff_proc_show(struct seq_file *m, void *v) 398static int can_rcvlist_eff_proc_show(struct seq_file *m, void *v)
397{ 399{
398 struct net_device *dev; 400 struct net_device *dev;
399 struct can_dev_rcv_lists *d; 401 struct can_dev_rcv_lists *dev_rcv_lists;
400 struct net *net = m->private; 402 struct net *net = m->private;
401 403
402 /* RX_EFF */ 404 /* RX_EFF */
@@ -405,15 +407,16 @@ static int can_rcvlist_eff_proc_show(struct seq_file *m, void *v)
405 rcu_read_lock(); 407 rcu_read_lock();
406 408
407 /* eff receive list for 'all' CAN devices (dev == NULL) */ 409 /* eff receive list for 'all' CAN devices (dev == NULL) */
408 d = net->can.can_rx_alldev_list; 410 dev_rcv_lists = net->can.rx_alldev_list;
409 can_rcvlist_proc_show_array(m, NULL, d->rx_eff, ARRAY_SIZE(d->rx_eff)); 411 can_rcvlist_proc_show_array(m, NULL, dev_rcv_lists->rx_eff,
412 ARRAY_SIZE(dev_rcv_lists->rx_eff));
410 413
411 /* eff receive list for registered CAN devices */ 414 /* eff receive list for registered CAN devices */
412 for_each_netdev_rcu(net, dev) { 415 for_each_netdev_rcu(net, dev) {
413 if (dev->type == ARPHRD_CAN && dev->ml_priv) { 416 if (dev->type == ARPHRD_CAN && dev->ml_priv) {
414 d = dev->ml_priv; 417 dev_rcv_lists = dev->ml_priv;
415 can_rcvlist_proc_show_array(m, dev, d->rx_eff, 418 can_rcvlist_proc_show_array(m, dev, dev_rcv_lists->rx_eff,
416 ARRAY_SIZE(d->rx_eff)); 419 ARRAY_SIZE(dev_rcv_lists->rx_eff));
417 } 420 }
418 } 421 }
419 422
diff --git a/net/can/raw.c b/net/can/raw.c
index fdbc36140e9b..59c039d73c6d 100644
--- a/net/can/raw.c
+++ b/net/can/raw.c
@@ -396,7 +396,7 @@ static int raw_bind(struct socket *sock, struct sockaddr *uaddr, int len)
396 int err = 0; 396 int err = 0;
397 int notify_enetdown = 0; 397 int notify_enetdown = 0;
398 398
399 if (len < sizeof(*addr)) 399 if (len < CAN_REQUIRED_SIZE(*addr, can_ifindex))
400 return -EINVAL; 400 return -EINVAL;
401 if (addr->can_family != AF_CAN) 401 if (addr->can_family != AF_CAN)
402 return -EINVAL; 402 return -EINVAL;
@@ -733,7 +733,7 @@ static int raw_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
733 if (msg->msg_name) { 733 if (msg->msg_name) {
734 DECLARE_SOCKADDR(struct sockaddr_can *, addr, msg->msg_name); 734 DECLARE_SOCKADDR(struct sockaddr_can *, addr, msg->msg_name);
735 735
736 if (msg->msg_namelen < sizeof(*addr)) 736 if (msg->msg_namelen < CAN_REQUIRED_SIZE(*addr, can_ifindex))
737 return -EINVAL; 737 return -EINVAL;
738 738
739 if (addr->can_family != AF_CAN) 739 if (addr->can_family != AF_CAN)