diff options
| author | David S. Miller <davem@davemloft.net> | 2019-08-15 15:43:22 -0400 |
|---|---|---|
| committer | David S. Miller <davem@davemloft.net> | 2019-08-15 15:43:22 -0400 |
| commit | 8714652fcd327df170e241394d5c83c38a2f0e27 (patch) | |
| tree | 96cb74f1944d3f32c36ec13efcd9dba47f675682 /net | |
| parent | 8d73f8f23e6b869b726cb01dd4747f56dc88660a (diff) | |
| parent | 3ca3c4aad2efa2931b663acc4ece7a38b31071d1 (diff) | |
Merge tag 'linux-can-next-for-5.4-20190814' of git://git.kernel.org/pub/scm/linux/kernel/git/mkl/linux-can-next
Marc Kleine-Budde says:
====================
pull-request: can-next 2019-08-14
this is a pull request for net-next/master consisting of 41 patches.
The first two patches are for the kvaser_pciefd driver: Christer Beskow
removes unnecessary code in the kvaser_pciefd_pwm_stop() function,
YueHaibing removes the unused including of <linux/version.h>.
In the next patch YueHaibing also removes the unused including of
<linux/version.h> in the f81601 driver.
In the ti_hecc driver the next 6 patches are by me and fix checkpatch
warnings. YueHaibing's patch removes an unused variable in the
ti_hecc_mailbox_read() function.
The next 6 patches all target the xilinx_can driver. Anssi Hannula's
patch fixes a chip start failure with an invalid bus. The patch by
Venkatesh Yadav Abbarapu skips an error message in case of a deferred
probe. The 3 patches by Appana Durga Kedareswara rao fix the RX and TX
path for CAN-FD frames. Srinivas Neeli's patch fixes the bit timing
calculations for CAN-FD.
The next 12 patches are by me and several checkpatch warnings in the
af_can, raw and bcm components.
Thomas Gleixner provides a patch for the bcm, which switches the timer
to HRTIMER_MODE_SOFT and removes the hrtimer_tasklet.
Then 6 more patches by me for the gw component, which fix checkpatch
warnings, followed by 2 patches by Oliver Hartkopp to add CAN-FD
support.
The vcan driver gets 3 patches by me, fixing checkpatch warnings.
And finally a patch by Andre Hartmann to fix typos in CAN's netlink
header.
====================
Diffstat (limited to 'net')
| -rw-r--r-- | net/can/af_can.c | 89 | ||||
| -rw-r--r-- | net/can/af_can.h | 5 | ||||
| -rw-r--r-- | net/can/bcm.c | 160 | ||||
| -rw-r--r-- | net/can/gw.c | 510 | ||||
| -rw-r--r-- | net/can/raw.c | 34 |
5 files changed, 436 insertions, 362 deletions
diff --git a/net/can/af_can.c b/net/can/af_can.c index 76cf83b2bd40..9a9a51847c7c 100644 --- a/net/can/af_can.c +++ b/net/can/af_can.c | |||
| @@ -1,6 +1,5 @@ | |||
| 1 | // SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) | 1 | // SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) |
| 2 | /* | 2 | /* af_can.c - Protocol family CAN core module |
| 3 | * af_can.c - Protocol family CAN core module | ||
| 4 | * (used by different CAN protocol modules) | 3 | * (used by different CAN protocol modules) |
| 5 | * | 4 | * |
| 6 | * Copyright (c) 2002-2017 Volkswagen Group Electronic Research | 5 | * Copyright (c) 2002-2017 Volkswagen Group Electronic Research |
| @@ -84,9 +83,7 @@ static DEFINE_MUTEX(proto_tab_lock); | |||
| 84 | 83 | ||
| 85 | static atomic_t skbcounter = ATOMIC_INIT(0); | 84 | static atomic_t skbcounter = ATOMIC_INIT(0); |
| 86 | 85 | ||
| 87 | /* | 86 | /* af_can socket functions */ |
| 88 | * af_can socket functions | ||
| 89 | */ | ||
| 90 | 87 | ||
| 91 | static void can_sock_destruct(struct sock *sk) | 88 | static void can_sock_destruct(struct sock *sk) |
| 92 | { | 89 | { |
| @@ -132,14 +129,13 @@ static int can_create(struct net *net, struct socket *sock, int protocol, | |||
| 132 | 129 | ||
| 133 | err = request_module("can-proto-%d", protocol); | 130 | err = request_module("can-proto-%d", protocol); |
| 134 | 131 | ||
| 135 | /* | 132 | /* In case of error we only print a message but don't |
| 136 | * In case of error we only print a message but don't | ||
| 137 | * return the error code immediately. Below we will | 133 | * return the error code immediately. Below we will |
| 138 | * return -EPROTONOSUPPORT | 134 | * return -EPROTONOSUPPORT |
| 139 | */ | 135 | */ |
| 140 | if (err) | 136 | if (err) |
| 141 | printk_ratelimited(KERN_ERR "can: request_module " | 137 | pr_err_ratelimited("can: request_module (can-proto-%d) failed.\n", |
| 142 | "(can-proto-%d) failed.\n", protocol); | 138 | protocol); |
| 143 | 139 | ||
| 144 | cp = can_get_proto(protocol); | 140 | cp = can_get_proto(protocol); |
| 145 | } | 141 | } |
| @@ -180,9 +176,7 @@ static int can_create(struct net *net, struct socket *sock, int protocol, | |||
| 180 | return err; | 176 | return err; |
| 181 | } | 177 | } |
| 182 | 178 | ||
| 183 | /* | 179 | /* af_can tx path */ |
| 184 | * af_can tx path | ||
| 185 | */ | ||
| 186 | 180 | ||
| 187 | /** | 181 | /** |
| 188 | * can_send - transmit a CAN frame (optional with local loopback) | 182 | * can_send - transmit a CAN frame (optional with local loopback) |
| @@ -215,11 +209,11 @@ int can_send(struct sk_buff *skb, int loop) | |||
| 215 | skb->protocol = htons(ETH_P_CANFD); | 209 | skb->protocol = htons(ETH_P_CANFD); |
| 216 | if (unlikely(cfd->len > CANFD_MAX_DLEN)) | 210 | if (unlikely(cfd->len > CANFD_MAX_DLEN)) |
| 217 | goto inval_skb; | 211 | goto inval_skb; |
| 218 | } else | 212 | } else { |
| 219 | goto inval_skb; | 213 | goto inval_skb; |
| 214 | } | ||
| 220 | 215 | ||
| 221 | /* | 216 | /* Make sure the CAN frame can pass the selected CAN netdevice. |
| 222 | * Make sure the CAN frame can pass the selected CAN netdevice. | ||
| 223 | * As structs can_frame and canfd_frame are similar, we can provide | 217 | * As structs can_frame and canfd_frame are similar, we can provide |
| 224 | * CAN FD frames to legacy CAN drivers as long as the length is <= 8 | 218 | * CAN FD frames to legacy CAN drivers as long as the length is <= 8 |
| 225 | */ | 219 | */ |
| @@ -250,8 +244,7 @@ int can_send(struct sk_buff *skb, int loop) | |||
| 250 | /* indication for the CAN driver: do loopback */ | 244 | /* indication for the CAN driver: do loopback */ |
| 251 | skb->pkt_type = PACKET_LOOPBACK; | 245 | skb->pkt_type = PACKET_LOOPBACK; |
| 252 | 246 | ||
| 253 | /* | 247 | /* The reference to the originating sock may be required |
| 254 | * The reference to the originating sock may be required | ||
| 255 | * by the receiving socket to check whether the frame is | 248 | * by the receiving socket to check whether the frame is |
| 256 | * its own. Example: can_raw sockopt CAN_RAW_RECV_OWN_MSGS | 249 | * its own. Example: can_raw sockopt CAN_RAW_RECV_OWN_MSGS |
| 257 | * Therefore we have to ensure that skb->sk remains the | 250 | * Therefore we have to ensure that skb->sk remains the |
| @@ -260,8 +253,7 @@ int can_send(struct sk_buff *skb, int loop) | |||
| 260 | */ | 253 | */ |
| 261 | 254 | ||
| 262 | if (!(skb->dev->flags & IFF_ECHO)) { | 255 | if (!(skb->dev->flags & IFF_ECHO)) { |
| 263 | /* | 256 | /* If the interface is not capable to do loopback |
| 264 | * If the interface is not capable to do loopback | ||
| 265 | * itself, we do it here. | 257 | * itself, we do it here. |
| 266 | */ | 258 | */ |
| 267 | newskb = skb_clone(skb, GFP_ATOMIC); | 259 | newskb = skb_clone(skb, GFP_ATOMIC); |
| @@ -304,12 +296,10 @@ inval_skb: | |||
| 304 | } | 296 | } |
| 305 | EXPORT_SYMBOL(can_send); | 297 | EXPORT_SYMBOL(can_send); |
| 306 | 298 | ||
| 307 | /* | 299 | /* af_can rx path */ |
| 308 | * af_can rx path | ||
| 309 | */ | ||
| 310 | 300 | ||
| 311 | static struct can_dev_rcv_lists *find_dev_rcv_lists(struct net *net, | 301 | static struct can_dev_rcv_lists *find_dev_rcv_lists(struct net *net, |
| 312 | struct net_device *dev) | 302 | struct net_device *dev) |
| 313 | { | 303 | { |
| 314 | if (!dev) | 304 | if (!dev) |
| 315 | return net->can.can_rx_alldev_list; | 305 | return net->can.can_rx_alldev_list; |
| @@ -401,7 +391,6 @@ static struct hlist_head *find_rcv_list(canid_t *can_id, canid_t *mask, | |||
| 401 | /* extra filterlists for the subscription of a single non-RTR can_id */ | 391 | /* extra filterlists for the subscription of a single non-RTR can_id */ |
| 402 | if (((*mask & CAN_EFF_RTR_FLAGS) == CAN_EFF_RTR_FLAGS) && | 392 | if (((*mask & CAN_EFF_RTR_FLAGS) == CAN_EFF_RTR_FLAGS) && |
| 403 | !(*can_id & CAN_RTR_FLAG)) { | 393 | !(*can_id & CAN_RTR_FLAG)) { |
| 404 | |||
| 405 | if (*can_id & CAN_EFF_FLAG) { | 394 | if (*can_id & CAN_EFF_FLAG) { |
| 406 | if (*mask == (CAN_EFF_MASK | CAN_EFF_RTR_FLAGS)) | 395 | if (*mask == (CAN_EFF_MASK | CAN_EFF_RTR_FLAGS)) |
| 407 | return &d->rx_eff[effhash(*can_id)]; | 396 | return &d->rx_eff[effhash(*can_id)]; |
| @@ -498,9 +487,7 @@ int can_rx_register(struct net *net, struct net_device *dev, canid_t can_id, | |||
| 498 | } | 487 | } |
| 499 | EXPORT_SYMBOL(can_rx_register); | 488 | EXPORT_SYMBOL(can_rx_register); |
| 500 | 489 | ||
| 501 | /* | 490 | /* can_rx_delete_receiver - rcu callback for single receiver entry removal */ |
| 502 | * can_rx_delete_receiver - rcu callback for single receiver entry removal | ||
| 503 | */ | ||
| 504 | static void can_rx_delete_receiver(struct rcu_head *rp) | 491 | static void can_rx_delete_receiver(struct rcu_head *rp) |
| 505 | { | 492 | { |
| 506 | struct receiver *r = container_of(rp, struct receiver, rcu); | 493 | struct receiver *r = container_of(rp, struct receiver, rcu); |
| @@ -541,16 +528,14 @@ void can_rx_unregister(struct net *net, struct net_device *dev, canid_t can_id, | |||
| 541 | 528 | ||
| 542 | d = find_dev_rcv_lists(net, dev); | 529 | d = find_dev_rcv_lists(net, dev); |
| 543 | if (!d) { | 530 | if (!d) { |
| 544 | pr_err("BUG: receive list not found for " | 531 | pr_err("BUG: receive list not found for dev %s, id %03X, mask %03X\n", |
| 545 | "dev %s, id %03X, mask %03X\n", | ||
| 546 | DNAME(dev), can_id, mask); | 532 | DNAME(dev), can_id, mask); |
| 547 | goto out; | 533 | goto out; |
| 548 | } | 534 | } |
| 549 | 535 | ||
| 550 | rl = find_rcv_list(&can_id, &mask, d); | 536 | rl = find_rcv_list(&can_id, &mask, d); |
| 551 | 537 | ||
| 552 | /* | 538 | /* Search the receiver list for the item to delete. This should |
| 553 | * Search the receiver list for the item to delete. This should | ||
| 554 | * exist, since no receiver may be unregistered that hasn't | 539 | * exist, since no receiver may be unregistered that hasn't |
| 555 | * been registered before. | 540 | * been registered before. |
| 556 | */ | 541 | */ |
| @@ -561,14 +546,13 @@ void can_rx_unregister(struct net *net, struct net_device *dev, canid_t can_id, | |||
| 561 | break; | 546 | break; |
| 562 | } | 547 | } |
| 563 | 548 | ||
| 564 | /* | 549 | /* Check for bugs in CAN protocol implementations using af_can.c: |
| 565 | * Check for bugs in CAN protocol implementations using af_can.c: | ||
| 566 | * 'r' will be NULL if no matching list item was found for removal. | 550 | * 'r' will be NULL if no matching list item was found for removal. |
| 567 | */ | 551 | */ |
| 568 | 552 | ||
| 569 | if (!r) { | 553 | if (!r) { |
| 570 | WARN(1, "BUG: receive list entry not found for dev %s, " | 554 | WARN(1, "BUG: receive list entry not found for dev %s, id %03X, mask %03X\n", |
| 571 | "id %03X, mask %03X\n", DNAME(dev), can_id, mask); | 555 | DNAME(dev), can_id, mask); |
| 572 | goto out; | 556 | goto out; |
| 573 | } | 557 | } |
| 574 | 558 | ||
| @@ -721,7 +705,7 @@ static int can_rcv(struct sk_buff *skb, struct net_device *dev, | |||
| 721 | } | 705 | } |
| 722 | 706 | ||
| 723 | static int canfd_rcv(struct sk_buff *skb, struct net_device *dev, | 707 | static int canfd_rcv(struct sk_buff *skb, struct net_device *dev, |
| 724 | struct packet_type *pt, struct net_device *orig_dev) | 708 | struct packet_type *pt, struct net_device *orig_dev) |
| 725 | { | 709 | { |
| 726 | struct canfd_frame *cfd = (struct canfd_frame *)skb->data; | 710 | struct canfd_frame *cfd = (struct canfd_frame *)skb->data; |
| 727 | 711 | ||
| @@ -737,9 +721,7 @@ static int canfd_rcv(struct sk_buff *skb, struct net_device *dev, | |||
| 737 | return NET_RX_SUCCESS; | 721 | return NET_RX_SUCCESS; |
| 738 | } | 722 | } |
| 739 | 723 | ||
| 740 | /* | 724 | /* af_can protocol functions */ |
| 741 | * af_can protocol functions | ||
| 742 | */ | ||
| 743 | 725 | ||
| 744 | /** | 726 | /** |
| 745 | * can_proto_register - register CAN transport protocol | 727 | * can_proto_register - register CAN transport protocol |
| @@ -770,8 +752,9 @@ int can_proto_register(const struct can_proto *cp) | |||
| 770 | if (rcu_access_pointer(proto_tab[proto])) { | 752 | if (rcu_access_pointer(proto_tab[proto])) { |
| 771 | pr_err("can: protocol %d already registered\n", proto); | 753 | pr_err("can: protocol %d already registered\n", proto); |
| 772 | err = -EBUSY; | 754 | err = -EBUSY; |
| 773 | } else | 755 | } else { |
| 774 | RCU_INIT_POINTER(proto_tab[proto], cp); | 756 | RCU_INIT_POINTER(proto_tab[proto], cp); |
| 757 | } | ||
| 775 | 758 | ||
| 776 | mutex_unlock(&proto_tab_lock); | 759 | mutex_unlock(&proto_tab_lock); |
| 777 | 760 | ||
| @@ -801,9 +784,7 @@ void can_proto_unregister(const struct can_proto *cp) | |||
| 801 | } | 784 | } |
| 802 | EXPORT_SYMBOL(can_proto_unregister); | 785 | EXPORT_SYMBOL(can_proto_unregister); |
| 803 | 786 | ||
| 804 | /* | 787 | /* af_can notifier to create/remove CAN netdevice specific structs */ |
| 805 | * af_can notifier to create/remove CAN netdevice specific structs | ||
| 806 | */ | ||
| 807 | static int can_notifier(struct notifier_block *nb, unsigned long msg, | 788 | static int can_notifier(struct notifier_block *nb, unsigned long msg, |
| 808 | void *ptr) | 789 | void *ptr) |
| 809 | { | 790 | { |
| @@ -814,7 +795,6 @@ static int can_notifier(struct notifier_block *nb, unsigned long msg, | |||
| 814 | return NOTIFY_DONE; | 795 | return NOTIFY_DONE; |
| 815 | 796 | ||
| 816 | switch (msg) { | 797 | switch (msg) { |
| 817 | |||
| 818 | case NETDEV_REGISTER: | 798 | case NETDEV_REGISTER: |
| 819 | 799 | ||
| 820 | /* create new dev_rcv_lists for this device */ | 800 | /* create new dev_rcv_lists for this device */ |
| @@ -831,15 +811,16 @@ static int can_notifier(struct notifier_block *nb, unsigned long msg, | |||
| 831 | 811 | ||
| 832 | d = dev->ml_priv; | 812 | d = dev->ml_priv; |
| 833 | if (d) { | 813 | if (d) { |
| 834 | if (d->entries) | 814 | if (d->entries) { |
| 835 | d->remove_on_zero_entries = 1; | 815 | d->remove_on_zero_entries = 1; |
| 836 | else { | 816 | } else { |
| 837 | kfree(d); | 817 | kfree(d); |
| 838 | dev->ml_priv = NULL; | 818 | dev->ml_priv = NULL; |
| 839 | } | 819 | } |
| 840 | } else | 820 | } else { |
| 841 | pr_err("can: notifier: receive list not found for dev " | 821 | pr_err("can: notifier: receive list not found for dev %s\n", |
| 842 | "%s\n", dev->name); | 822 | dev->name); |
| 823 | } | ||
| 843 | 824 | ||
| 844 | spin_unlock(&dev_net(dev)->can.can_rcvlists_lock); | 825 | spin_unlock(&dev_net(dev)->can.can_rcvlists_lock); |
| 845 | 826 | ||
| @@ -853,13 +834,13 @@ static int can_pernet_init(struct net *net) | |||
| 853 | { | 834 | { |
| 854 | spin_lock_init(&net->can.can_rcvlists_lock); | 835 | spin_lock_init(&net->can.can_rcvlists_lock); |
| 855 | net->can.can_rx_alldev_list = | 836 | net->can.can_rx_alldev_list = |
| 856 | kzalloc(sizeof(struct can_dev_rcv_lists), GFP_KERNEL); | 837 | kzalloc(sizeof(*net->can.can_rx_alldev_list), GFP_KERNEL); |
| 857 | if (!net->can.can_rx_alldev_list) | 838 | if (!net->can.can_rx_alldev_list) |
| 858 | goto out; | 839 | goto out; |
| 859 | net->can.can_stats = kzalloc(sizeof(struct s_stats), GFP_KERNEL); | 840 | net->can.can_stats = kzalloc(sizeof(*net->can.can_stats), GFP_KERNEL); |
| 860 | if (!net->can.can_stats) | 841 | if (!net->can.can_stats) |
| 861 | goto out_free_alldev_list; | 842 | goto out_free_alldev_list; |
| 862 | net->can.can_pstats = kzalloc(sizeof(struct s_pstats), GFP_KERNEL); | 843 | net->can.can_pstats = kzalloc(sizeof(*net->can.can_pstats), GFP_KERNEL); |
| 863 | if (!net->can.can_pstats) | 844 | if (!net->can.can_pstats) |
| 864 | goto out_free_can_stats; | 845 | goto out_free_can_stats; |
| 865 | 846 | ||
| @@ -913,9 +894,7 @@ static void can_pernet_exit(struct net *net) | |||
| 913 | kfree(net->can.can_pstats); | 894 | kfree(net->can.can_pstats); |
| 914 | } | 895 | } |
| 915 | 896 | ||
| 916 | /* | 897 | /* af_can module init/exit functions */ |
| 917 | * af_can module init/exit functions | ||
| 918 | */ | ||
| 919 | 898 | ||
| 920 | static struct packet_type can_packet __read_mostly = { | 899 | static struct packet_type can_packet __read_mostly = { |
| 921 | .type = cpu_to_be16(ETH_P_CAN), | 900 | .type = cpu_to_be16(ETH_P_CAN), |
diff --git a/net/can/af_can.h b/net/can/af_can.h index ef21f7c6bc80..9cdb79083623 100644 --- a/net/can/af_can.h +++ b/net/can/af_can.h | |||
| @@ -1,6 +1,5 @@ | |||
| 1 | /* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */ | 1 | /* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */ |
| 2 | /* | 2 | /* Copyright (c) 2002-2007 Volkswagen Group Electronic Research |
| 3 | * Copyright (c) 2002-2007 Volkswagen Group Electronic Research | ||
| 4 | * All rights reserved. | 3 | * All rights reserved. |
| 5 | * | 4 | * |
| 6 | * Redistribution and use in source and binary forms, with or without | 5 | * Redistribution and use in source and binary forms, with or without |
| @@ -54,7 +53,7 @@ struct receiver { | |||
| 54 | canid_t can_id; | 53 | canid_t can_id; |
| 55 | canid_t mask; | 54 | canid_t mask; |
| 56 | unsigned long matches; | 55 | unsigned long matches; |
| 57 | void (*func)(struct sk_buff *, void *); | 56 | void (*func)(struct sk_buff *skb, void *data); |
| 58 | void *data; | 57 | void *data; |
| 59 | char *ident; | 58 | char *ident; |
| 60 | struct sock *sk; | 59 | struct sock *sk; |
diff --git a/net/can/bcm.c b/net/can/bcm.c index bf1d0bbecec8..28fd1a1c8487 100644 --- a/net/can/bcm.c +++ b/net/can/bcm.c | |||
| @@ -106,7 +106,6 @@ struct bcm_op { | |||
| 106 | unsigned long frames_abs, frames_filtered; | 106 | unsigned long frames_abs, frames_filtered; |
| 107 | struct bcm_timeval ival1, ival2; | 107 | struct bcm_timeval ival1, ival2; |
| 108 | struct hrtimer timer, thrtimer; | 108 | struct hrtimer timer, thrtimer; |
| 109 | struct tasklet_struct tsklet, thrtsklet; | ||
| 110 | ktime_t rx_stamp, kt_ival1, kt_ival2, kt_lastmsg; | 109 | ktime_t rx_stamp, kt_ival1, kt_ival2, kt_lastmsg; |
| 111 | int rx_ifindex; | 110 | int rx_ifindex; |
| 112 | int cfsiz; | 111 | int cfsiz; |
| @@ -371,25 +370,34 @@ static void bcm_send_to_user(struct bcm_op *op, struct bcm_msg_head *head, | |||
| 371 | } | 370 | } |
| 372 | } | 371 | } |
| 373 | 372 | ||
| 374 | static void bcm_tx_start_timer(struct bcm_op *op) | 373 | static bool bcm_tx_set_expiry(struct bcm_op *op, struct hrtimer *hrt) |
| 375 | { | 374 | { |
| 375 | ktime_t ival; | ||
| 376 | |||
| 376 | if (op->kt_ival1 && op->count) | 377 | if (op->kt_ival1 && op->count) |
| 377 | hrtimer_start(&op->timer, | 378 | ival = op->kt_ival1; |
| 378 | ktime_add(ktime_get(), op->kt_ival1), | ||
| 379 | HRTIMER_MODE_ABS); | ||
| 380 | else if (op->kt_ival2) | 379 | else if (op->kt_ival2) |
| 381 | hrtimer_start(&op->timer, | 380 | ival = op->kt_ival2; |
| 382 | ktime_add(ktime_get(), op->kt_ival2), | 381 | else |
| 383 | HRTIMER_MODE_ABS); | 382 | return false; |
| 383 | |||
| 384 | hrtimer_set_expires(hrt, ktime_add(ktime_get(), ival)); | ||
| 385 | return true; | ||
| 384 | } | 386 | } |
| 385 | 387 | ||
| 386 | static void bcm_tx_timeout_tsklet(unsigned long data) | 388 | static void bcm_tx_start_timer(struct bcm_op *op) |
| 387 | { | 389 | { |
| 388 | struct bcm_op *op = (struct bcm_op *)data; | 390 | if (bcm_tx_set_expiry(op, &op->timer)) |
| 391 | hrtimer_start_expires(&op->timer, HRTIMER_MODE_ABS_SOFT); | ||
| 392 | } | ||
| 393 | |||
| 394 | /* bcm_tx_timeout_handler - performs cyclic CAN frame transmissions */ | ||
| 395 | static enum hrtimer_restart bcm_tx_timeout_handler(struct hrtimer *hrtimer) | ||
| 396 | { | ||
| 397 | struct bcm_op *op = container_of(hrtimer, struct bcm_op, timer); | ||
| 389 | struct bcm_msg_head msg_head; | 398 | struct bcm_msg_head msg_head; |
| 390 | 399 | ||
| 391 | if (op->kt_ival1 && (op->count > 0)) { | 400 | if (op->kt_ival1 && (op->count > 0)) { |
| 392 | |||
| 393 | op->count--; | 401 | op->count--; |
| 394 | if (!op->count && (op->flags & TX_COUNTEVT)) { | 402 | if (!op->count && (op->flags & TX_COUNTEVT)) { |
| 395 | 403 | ||
| @@ -406,22 +414,12 @@ static void bcm_tx_timeout_tsklet(unsigned long data) | |||
| 406 | } | 414 | } |
| 407 | bcm_can_tx(op); | 415 | bcm_can_tx(op); |
| 408 | 416 | ||
| 409 | } else if (op->kt_ival2) | 417 | } else if (op->kt_ival2) { |
| 410 | bcm_can_tx(op); | 418 | bcm_can_tx(op); |
| 419 | } | ||
| 411 | 420 | ||
| 412 | bcm_tx_start_timer(op); | 421 | return bcm_tx_set_expiry(op, &op->timer) ? |
| 413 | } | 422 | HRTIMER_RESTART : HRTIMER_NORESTART; |
| 414 | |||
| 415 | /* | ||
| 416 | * bcm_tx_timeout_handler - performs cyclic CAN frame transmissions | ||
| 417 | */ | ||
| 418 | static enum hrtimer_restart bcm_tx_timeout_handler(struct hrtimer *hrtimer) | ||
| 419 | { | ||
| 420 | struct bcm_op *op = container_of(hrtimer, struct bcm_op, timer); | ||
| 421 | |||
| 422 | tasklet_schedule(&op->tsklet); | ||
| 423 | |||
| 424 | return HRTIMER_NORESTART; | ||
| 425 | } | 423 | } |
| 426 | 424 | ||
| 427 | /* | 425 | /* |
| @@ -487,7 +485,7 @@ static void bcm_rx_update_and_send(struct bcm_op *op, | |||
| 487 | /* do not send the saved data - only start throttle timer */ | 485 | /* do not send the saved data - only start throttle timer */ |
| 488 | hrtimer_start(&op->thrtimer, | 486 | hrtimer_start(&op->thrtimer, |
| 489 | ktime_add(op->kt_lastmsg, op->kt_ival2), | 487 | ktime_add(op->kt_lastmsg, op->kt_ival2), |
| 490 | HRTIMER_MODE_ABS); | 488 | HRTIMER_MODE_ABS_SOFT); |
| 491 | return; | 489 | return; |
| 492 | } | 490 | } |
| 493 | 491 | ||
| @@ -546,14 +544,21 @@ static void bcm_rx_starttimer(struct bcm_op *op) | |||
| 546 | return; | 544 | return; |
| 547 | 545 | ||
| 548 | if (op->kt_ival1) | 546 | if (op->kt_ival1) |
| 549 | hrtimer_start(&op->timer, op->kt_ival1, HRTIMER_MODE_REL); | 547 | hrtimer_start(&op->timer, op->kt_ival1, HRTIMER_MODE_REL_SOFT); |
| 550 | } | 548 | } |
| 551 | 549 | ||
| 552 | static void bcm_rx_timeout_tsklet(unsigned long data) | 550 | /* bcm_rx_timeout_handler - when the (cyclic) CAN frame reception timed out */ |
| 551 | static enum hrtimer_restart bcm_rx_timeout_handler(struct hrtimer *hrtimer) | ||
| 553 | { | 552 | { |
| 554 | struct bcm_op *op = (struct bcm_op *)data; | 553 | struct bcm_op *op = container_of(hrtimer, struct bcm_op, timer); |
| 555 | struct bcm_msg_head msg_head; | 554 | struct bcm_msg_head msg_head; |
| 556 | 555 | ||
| 556 | /* if user wants to be informed, when cyclic CAN-Messages come back */ | ||
| 557 | if ((op->flags & RX_ANNOUNCE_RESUME) && op->last_frames) { | ||
| 558 | /* clear received CAN frames to indicate 'nothing received' */ | ||
| 559 | memset(op->last_frames, 0, op->nframes * op->cfsiz); | ||
| 560 | } | ||
| 561 | |||
| 557 | /* create notification to user */ | 562 | /* create notification to user */ |
| 558 | msg_head.opcode = RX_TIMEOUT; | 563 | msg_head.opcode = RX_TIMEOUT; |
| 559 | msg_head.flags = op->flags; | 564 | msg_head.flags = op->flags; |
| @@ -564,25 +569,6 @@ static void bcm_rx_timeout_tsklet(unsigned long data) | |||
| 564 | msg_head.nframes = 0; | 569 | msg_head.nframes = 0; |
| 565 | 570 | ||
| 566 | bcm_send_to_user(op, &msg_head, NULL, 0); | 571 | bcm_send_to_user(op, &msg_head, NULL, 0); |
| 567 | } | ||
| 568 | |||
| 569 | /* | ||
| 570 | * bcm_rx_timeout_handler - when the (cyclic) CAN frame reception timed out | ||
| 571 | */ | ||
| 572 | static enum hrtimer_restart bcm_rx_timeout_handler(struct hrtimer *hrtimer) | ||
| 573 | { | ||
| 574 | struct bcm_op *op = container_of(hrtimer, struct bcm_op, timer); | ||
| 575 | |||
| 576 | /* schedule before NET_RX_SOFTIRQ */ | ||
| 577 | tasklet_hi_schedule(&op->tsklet); | ||
| 578 | |||
| 579 | /* no restart of the timer is done here! */ | ||
| 580 | |||
| 581 | /* if user wants to be informed, when cyclic CAN-Messages come back */ | ||
| 582 | if ((op->flags & RX_ANNOUNCE_RESUME) && op->last_frames) { | ||
| 583 | /* clear received CAN frames to indicate 'nothing received' */ | ||
| 584 | memset(op->last_frames, 0, op->nframes * op->cfsiz); | ||
| 585 | } | ||
| 586 | 572 | ||
| 587 | return HRTIMER_NORESTART; | 573 | return HRTIMER_NORESTART; |
| 588 | } | 574 | } |
| @@ -590,14 +576,12 @@ static enum hrtimer_restart bcm_rx_timeout_handler(struct hrtimer *hrtimer) | |||
| 590 | /* | 576 | /* |
| 591 | * bcm_rx_do_flush - helper for bcm_rx_thr_flush | 577 | * bcm_rx_do_flush - helper for bcm_rx_thr_flush |
| 592 | */ | 578 | */ |
| 593 | static inline int bcm_rx_do_flush(struct bcm_op *op, int update, | 579 | static inline int bcm_rx_do_flush(struct bcm_op *op, unsigned int index) |
| 594 | unsigned int index) | ||
| 595 | { | 580 | { |
| 596 | struct canfd_frame *lcf = op->last_frames + op->cfsiz * index; | 581 | struct canfd_frame *lcf = op->last_frames + op->cfsiz * index; |
| 597 | 582 | ||
| 598 | if ((op->last_frames) && (lcf->flags & RX_THR)) { | 583 | if ((op->last_frames) && (lcf->flags & RX_THR)) { |
| 599 | if (update) | 584 | bcm_rx_changed(op, lcf); |
| 600 | bcm_rx_changed(op, lcf); | ||
| 601 | return 1; | 585 | return 1; |
| 602 | } | 586 | } |
| 603 | return 0; | 587 | return 0; |
| @@ -605,11 +589,8 @@ static inline int bcm_rx_do_flush(struct bcm_op *op, int update, | |||
| 605 | 589 | ||
| 606 | /* | 590 | /* |
| 607 | * bcm_rx_thr_flush - Check for throttled data and send it to the userspace | 591 | * bcm_rx_thr_flush - Check for throttled data and send it to the userspace |
| 608 | * | ||
| 609 | * update == 0 : just check if throttled data is available (any irq context) | ||
| 610 | * update == 1 : check and send throttled data to userspace (soft_irq context) | ||
| 611 | */ | 592 | */ |
| 612 | static int bcm_rx_thr_flush(struct bcm_op *op, int update) | 593 | static int bcm_rx_thr_flush(struct bcm_op *op) |
| 613 | { | 594 | { |
| 614 | int updated = 0; | 595 | int updated = 0; |
| 615 | 596 | ||
| @@ -618,24 +599,16 @@ static int bcm_rx_thr_flush(struct bcm_op *op, int update) | |||
| 618 | 599 | ||
| 619 | /* for MUX filter we start at index 1 */ | 600 | /* for MUX filter we start at index 1 */ |
| 620 | for (i = 1; i < op->nframes; i++) | 601 | for (i = 1; i < op->nframes; i++) |
| 621 | updated += bcm_rx_do_flush(op, update, i); | 602 | updated += bcm_rx_do_flush(op, i); |
| 622 | 603 | ||
| 623 | } else { | 604 | } else { |
| 624 | /* for RX_FILTER_ID and simple filter */ | 605 | /* for RX_FILTER_ID and simple filter */ |
| 625 | updated += bcm_rx_do_flush(op, update, 0); | 606 | updated += bcm_rx_do_flush(op, 0); |
| 626 | } | 607 | } |
| 627 | 608 | ||
| 628 | return updated; | 609 | return updated; |
| 629 | } | 610 | } |
| 630 | 611 | ||
| 631 | static void bcm_rx_thr_tsklet(unsigned long data) | ||
| 632 | { | ||
| 633 | struct bcm_op *op = (struct bcm_op *)data; | ||
| 634 | |||
| 635 | /* push the changed data to the userspace */ | ||
| 636 | bcm_rx_thr_flush(op, 1); | ||
| 637 | } | ||
| 638 | |||
| 639 | /* | 612 | /* |
| 640 | * bcm_rx_thr_handler - the time for blocked content updates is over now: | 613 | * bcm_rx_thr_handler - the time for blocked content updates is over now: |
| 641 | * Check for throttled data and send it to the userspace | 614 | * Check for throttled data and send it to the userspace |
| @@ -644,9 +617,7 @@ static enum hrtimer_restart bcm_rx_thr_handler(struct hrtimer *hrtimer) | |||
| 644 | { | 617 | { |
| 645 | struct bcm_op *op = container_of(hrtimer, struct bcm_op, thrtimer); | 618 | struct bcm_op *op = container_of(hrtimer, struct bcm_op, thrtimer); |
| 646 | 619 | ||
| 647 | tasklet_schedule(&op->thrtsklet); | 620 | if (bcm_rx_thr_flush(op)) { |
| 648 | |||
| 649 | if (bcm_rx_thr_flush(op, 0)) { | ||
| 650 | hrtimer_forward(hrtimer, ktime_get(), op->kt_ival2); | 621 | hrtimer_forward(hrtimer, ktime_get(), op->kt_ival2); |
| 651 | return HRTIMER_RESTART; | 622 | return HRTIMER_RESTART; |
| 652 | } else { | 623 | } else { |
| @@ -742,23 +713,8 @@ static struct bcm_op *bcm_find_op(struct list_head *ops, | |||
| 742 | 713 | ||
| 743 | static void bcm_remove_op(struct bcm_op *op) | 714 | static void bcm_remove_op(struct bcm_op *op) |
| 744 | { | 715 | { |
| 745 | if (op->tsklet.func) { | 716 | hrtimer_cancel(&op->timer); |
| 746 | while (test_bit(TASKLET_STATE_SCHED, &op->tsklet.state) || | 717 | hrtimer_cancel(&op->thrtimer); |
| 747 | test_bit(TASKLET_STATE_RUN, &op->tsklet.state) || | ||
| 748 | hrtimer_active(&op->timer)) { | ||
| 749 | hrtimer_cancel(&op->timer); | ||
| 750 | tasklet_kill(&op->tsklet); | ||
| 751 | } | ||
| 752 | } | ||
| 753 | |||
| 754 | if (op->thrtsklet.func) { | ||
| 755 | while (test_bit(TASKLET_STATE_SCHED, &op->thrtsklet.state) || | ||
| 756 | test_bit(TASKLET_STATE_RUN, &op->thrtsklet.state) || | ||
| 757 | hrtimer_active(&op->thrtimer)) { | ||
| 758 | hrtimer_cancel(&op->thrtimer); | ||
| 759 | tasklet_kill(&op->thrtsklet); | ||
| 760 | } | ||
| 761 | } | ||
| 762 | 718 | ||
| 763 | if ((op->frames) && (op->frames != &op->sframe)) | 719 | if ((op->frames) && (op->frames != &op->sframe)) |
| 764 | kfree(op->frames); | 720 | kfree(op->frames); |
| @@ -991,15 +947,13 @@ static int bcm_tx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg, | |||
| 991 | op->ifindex = ifindex; | 947 | op->ifindex = ifindex; |
| 992 | 948 | ||
| 993 | /* initialize uninitialized (kzalloc) structure */ | 949 | /* initialize uninitialized (kzalloc) structure */ |
| 994 | hrtimer_init(&op->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); | 950 | hrtimer_init(&op->timer, CLOCK_MONOTONIC, |
| 951 | HRTIMER_MODE_REL_SOFT); | ||
| 995 | op->timer.function = bcm_tx_timeout_handler; | 952 | op->timer.function = bcm_tx_timeout_handler; |
| 996 | 953 | ||
| 997 | /* initialize tasklet for tx countevent notification */ | ||
| 998 | tasklet_init(&op->tsklet, bcm_tx_timeout_tsklet, | ||
| 999 | (unsigned long) op); | ||
| 1000 | |||
| 1001 | /* currently unused in tx_ops */ | 954 | /* currently unused in tx_ops */ |
| 1002 | hrtimer_init(&op->thrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); | 955 | hrtimer_init(&op->thrtimer, CLOCK_MONOTONIC, |
| 956 | HRTIMER_MODE_REL_SOFT); | ||
| 1003 | 957 | ||
| 1004 | /* add this bcm_op to the list of the tx_ops */ | 958 | /* add this bcm_op to the list of the tx_ops */ |
| 1005 | list_add(&op->list, &bo->tx_ops); | 959 | list_add(&op->list, &bo->tx_ops); |
| @@ -1168,20 +1122,14 @@ static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg, | |||
| 1168 | op->rx_ifindex = ifindex; | 1122 | op->rx_ifindex = ifindex; |
| 1169 | 1123 | ||
| 1170 | /* initialize uninitialized (kzalloc) structure */ | 1124 | /* initialize uninitialized (kzalloc) structure */ |
| 1171 | hrtimer_init(&op->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); | 1125 | hrtimer_init(&op->timer, CLOCK_MONOTONIC, |
| 1126 | HRTIMER_MODE_REL_SOFT); | ||
| 1172 | op->timer.function = bcm_rx_timeout_handler; | 1127 | op->timer.function = bcm_rx_timeout_handler; |
| 1173 | 1128 | ||
| 1174 | /* initialize tasklet for rx timeout notification */ | 1129 | hrtimer_init(&op->thrtimer, CLOCK_MONOTONIC, |
| 1175 | tasklet_init(&op->tsklet, bcm_rx_timeout_tsklet, | 1130 | HRTIMER_MODE_REL_SOFT); |
| 1176 | (unsigned long) op); | ||
| 1177 | |||
| 1178 | hrtimer_init(&op->thrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); | ||
| 1179 | op->thrtimer.function = bcm_rx_thr_handler; | 1131 | op->thrtimer.function = bcm_rx_thr_handler; |
| 1180 | 1132 | ||
| 1181 | /* initialize tasklet for rx throttle handling */ | ||
| 1182 | tasklet_init(&op->thrtsklet, bcm_rx_thr_tsklet, | ||
| 1183 | (unsigned long) op); | ||
| 1184 | |||
| 1185 | /* add this bcm_op to the list of the rx_ops */ | 1133 | /* add this bcm_op to the list of the rx_ops */ |
| 1186 | list_add(&op->list, &bo->rx_ops); | 1134 | list_add(&op->list, &bo->rx_ops); |
| 1187 | 1135 | ||
| @@ -1227,12 +1175,12 @@ static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg, | |||
| 1227 | */ | 1175 | */ |
| 1228 | op->kt_lastmsg = 0; | 1176 | op->kt_lastmsg = 0; |
| 1229 | hrtimer_cancel(&op->thrtimer); | 1177 | hrtimer_cancel(&op->thrtimer); |
| 1230 | bcm_rx_thr_flush(op, 1); | 1178 | bcm_rx_thr_flush(op); |
| 1231 | } | 1179 | } |
| 1232 | 1180 | ||
| 1233 | if ((op->flags & STARTTIMER) && op->kt_ival1) | 1181 | if ((op->flags & STARTTIMER) && op->kt_ival1) |
| 1234 | hrtimer_start(&op->timer, op->kt_ival1, | 1182 | hrtimer_start(&op->timer, op->kt_ival1, |
| 1235 | HRTIMER_MODE_REL); | 1183 | HRTIMER_MODE_REL_SOFT); |
| 1236 | } | 1184 | } |
| 1237 | 1185 | ||
| 1238 | /* now we can register for can_ids, if we added a new bcm_op */ | 1186 | /* now we can register for can_ids, if we added a new bcm_op */ |
| @@ -1680,8 +1628,8 @@ static int bcm_recvmsg(struct socket *sock, struct msghdr *msg, size_t size, | |||
| 1680 | return size; | 1628 | return size; |
| 1681 | } | 1629 | } |
| 1682 | 1630 | ||
| 1683 | int bcm_sock_no_ioctlcmd(struct socket *sock, unsigned int cmd, | 1631 | static int bcm_sock_no_ioctlcmd(struct socket *sock, unsigned int cmd, |
| 1684 | unsigned long arg) | 1632 | unsigned long arg) |
| 1685 | { | 1633 | { |
| 1686 | /* no ioctls for socket layer -> hand it down to NIC layer */ | 1634 | /* no ioctls for socket layer -> hand it down to NIC layer */ |
| 1687 | return -ENOIOCTLCMD; | 1635 | return -ENOIOCTLCMD; |
diff --git a/net/can/gw.c b/net/can/gw.c index ce17f836262b..65d60c93af29 100644 --- a/net/can/gw.c +++ b/net/can/gw.c | |||
| @@ -1,8 +1,7 @@ | |||
| 1 | // SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) | 1 | // SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) |
| 2 | /* | 2 | /* gw.c - CAN frame Gateway/Router/Bridge with netlink interface |
| 3 | * gw.c - CAN frame Gateway/Router/Bridge with netlink interface | ||
| 4 | * | 3 | * |
| 5 | * Copyright (c) 2017 Volkswagen Group Electronic Research | 4 | * Copyright (c) 2019 Volkswagen Group Electronic Research |
| 6 | * All rights reserved. | 5 | * All rights reserved. |
| 7 | * | 6 | * |
| 8 | * Redistribution and use in source and binary forms, with or without | 7 | * Redistribution and use in source and binary forms, with or without |
| @@ -60,7 +59,7 @@ | |||
| 60 | #include <net/net_namespace.h> | 59 | #include <net/net_namespace.h> |
| 61 | #include <net/sock.h> | 60 | #include <net/sock.h> |
| 62 | 61 | ||
| 63 | #define CAN_GW_VERSION "20170425" | 62 | #define CAN_GW_VERSION "20190810" |
| 64 | #define CAN_GW_NAME "can-gw" | 63 | #define CAN_GW_NAME "can-gw" |
| 65 | 64 | ||
| 66 | MODULE_DESCRIPTION("PF_CAN netlink gateway"); | 65 | MODULE_DESCRIPTION("PF_CAN netlink gateway"); |
| @@ -86,10 +85,10 @@ static struct kmem_cache *cgw_cache __read_mostly; | |||
| 86 | /* structure that contains the (on-the-fly) CAN frame modifications */ | 85 | /* structure that contains the (on-the-fly) CAN frame modifications */ |
| 87 | struct cf_mod { | 86 | struct cf_mod { |
| 88 | struct { | 87 | struct { |
| 89 | struct can_frame and; | 88 | struct canfd_frame and; |
| 90 | struct can_frame or; | 89 | struct canfd_frame or; |
| 91 | struct can_frame xor; | 90 | struct canfd_frame xor; |
| 92 | struct can_frame set; | 91 | struct canfd_frame set; |
| 93 | } modframe; | 92 | } modframe; |
| 94 | struct { | 93 | struct { |
| 95 | u8 and; | 94 | u8 and; |
| @@ -97,7 +96,7 @@ struct cf_mod { | |||
| 97 | u8 xor; | 96 | u8 xor; |
| 98 | u8 set; | 97 | u8 set; |
| 99 | } modtype; | 98 | } modtype; |
| 100 | void (*modfunc[MAX_MODFUNCTIONS])(struct can_frame *cf, | 99 | void (*modfunc[MAX_MODFUNCTIONS])(struct canfd_frame *cf, |
| 101 | struct cf_mod *mod); | 100 | struct cf_mod *mod); |
| 102 | 101 | ||
| 103 | /* CAN frame checksum calculation after CAN frame modifications */ | 102 | /* CAN frame checksum calculation after CAN frame modifications */ |
| @@ -106,15 +105,15 @@ struct cf_mod { | |||
| 106 | struct cgw_csum_crc8 crc8; | 105 | struct cgw_csum_crc8 crc8; |
| 107 | } csum; | 106 | } csum; |
| 108 | struct { | 107 | struct { |
| 109 | void (*xor)(struct can_frame *cf, struct cgw_csum_xor *xor); | 108 | void (*xor)(struct canfd_frame *cf, |
| 110 | void (*crc8)(struct can_frame *cf, struct cgw_csum_crc8 *crc8); | 109 | struct cgw_csum_xor *xor); |
| 110 | void (*crc8)(struct canfd_frame *cf, | ||
| 111 | struct cgw_csum_crc8 *crc8); | ||
| 111 | } csumfunc; | 112 | } csumfunc; |
| 112 | u32 uid; | 113 | u32 uid; |
| 113 | }; | 114 | }; |
| 114 | 115 | ||
| 115 | 116 | /* So far we just support CAN -> CAN routing and frame modifications. | |
| 116 | /* | ||
| 117 | * So far we just support CAN -> CAN routing and frame modifications. | ||
| 118 | * | 117 | * |
| 119 | * The internal can_can_gw structure contains data and attributes for | 118 | * The internal can_can_gw structure contains data and attributes for |
| 120 | * a CAN -> CAN gateway job. | 119 | * a CAN -> CAN gateway job. |
| @@ -152,39 +151,88 @@ struct cgw_job { | |||
| 152 | 151 | ||
| 153 | /* modification functions that are invoked in the hot path in can_can_gw_rcv */ | 152 | /* modification functions that are invoked in the hot path in can_can_gw_rcv */ |
| 154 | 153 | ||
| 155 | #define MODFUNC(func, op) static void func(struct can_frame *cf, \ | 154 | #define MODFUNC(func, op) static void func(struct canfd_frame *cf, \ |
| 156 | struct cf_mod *mod) { op ; } | 155 | struct cf_mod *mod) { op ; } |
| 157 | 156 | ||
| 158 | MODFUNC(mod_and_id, cf->can_id &= mod->modframe.and.can_id) | 157 | MODFUNC(mod_and_id, cf->can_id &= mod->modframe.and.can_id) |
| 159 | MODFUNC(mod_and_dlc, cf->can_dlc &= mod->modframe.and.can_dlc) | 158 | MODFUNC(mod_and_len, cf->len &= mod->modframe.and.len) |
| 159 | MODFUNC(mod_and_flags, cf->flags &= mod->modframe.and.flags) | ||
| 160 | MODFUNC(mod_and_data, *(u64 *)cf->data &= *(u64 *)mod->modframe.and.data) | 160 | MODFUNC(mod_and_data, *(u64 *)cf->data &= *(u64 *)mod->modframe.and.data) |
| 161 | MODFUNC(mod_or_id, cf->can_id |= mod->modframe.or.can_id) | 161 | MODFUNC(mod_or_id, cf->can_id |= mod->modframe.or.can_id) |
| 162 | MODFUNC(mod_or_dlc, cf->can_dlc |= mod->modframe.or.can_dlc) | 162 | MODFUNC(mod_or_len, cf->len |= mod->modframe.or.len) |
| 163 | MODFUNC(mod_or_flags, cf->flags |= mod->modframe.or.flags) | ||
| 163 | MODFUNC(mod_or_data, *(u64 *)cf->data |= *(u64 *)mod->modframe.or.data) | 164 | MODFUNC(mod_or_data, *(u64 *)cf->data |= *(u64 *)mod->modframe.or.data) |
| 164 | MODFUNC(mod_xor_id, cf->can_id ^= mod->modframe.xor.can_id) | 165 | MODFUNC(mod_xor_id, cf->can_id ^= mod->modframe.xor.can_id) |
| 165 | MODFUNC(mod_xor_dlc, cf->can_dlc ^= mod->modframe.xor.can_dlc) | 166 | MODFUNC(mod_xor_len, cf->len ^= mod->modframe.xor.len) |
| 167 | MODFUNC(mod_xor_flags, cf->flags ^= mod->modframe.xor.flags) | ||
| 166 | MODFUNC(mod_xor_data, *(u64 *)cf->data ^= *(u64 *)mod->modframe.xor.data) | 168 | MODFUNC(mod_xor_data, *(u64 *)cf->data ^= *(u64 *)mod->modframe.xor.data) |
| 167 | MODFUNC(mod_set_id, cf->can_id = mod->modframe.set.can_id) | 169 | MODFUNC(mod_set_id, cf->can_id = mod->modframe.set.can_id) |
| 168 | MODFUNC(mod_set_dlc, cf->can_dlc = mod->modframe.set.can_dlc) | 170 | MODFUNC(mod_set_len, cf->len = mod->modframe.set.len) |
| 171 | MODFUNC(mod_set_flags, cf->flags = mod->modframe.set.flags) | ||
| 169 | MODFUNC(mod_set_data, *(u64 *)cf->data = *(u64 *)mod->modframe.set.data) | 172 | MODFUNC(mod_set_data, *(u64 *)cf->data = *(u64 *)mod->modframe.set.data) |
| 170 | 173 | ||
| 171 | static inline void canframecpy(struct can_frame *dst, struct can_frame *src) | 174 | static void mod_and_fddata(struct canfd_frame *cf, struct cf_mod *mod) |
| 175 | { | ||
| 176 | int i; | ||
| 177 | |||
| 178 | for (i = 0; i < CANFD_MAX_DLEN; i += 8) | ||
| 179 | *(u64 *)(cf->data + i) &= *(u64 *)(mod->modframe.and.data + i); | ||
| 180 | } | ||
| 181 | |||
| 182 | static void mod_or_fddata(struct canfd_frame *cf, struct cf_mod *mod) | ||
| 183 | { | ||
| 184 | int i; | ||
| 185 | |||
| 186 | for (i = 0; i < CANFD_MAX_DLEN; i += 8) | ||
| 187 | *(u64 *)(cf->data + i) |= *(u64 *)(mod->modframe.or.data + i); | ||
| 188 | } | ||
| 189 | |||
| 190 | static void mod_xor_fddata(struct canfd_frame *cf, struct cf_mod *mod) | ||
| 191 | { | ||
| 192 | int i; | ||
| 193 | |||
| 194 | for (i = 0; i < CANFD_MAX_DLEN; i += 8) | ||
| 195 | *(u64 *)(cf->data + i) ^= *(u64 *)(mod->modframe.xor.data + i); | ||
| 196 | } | ||
| 197 | |||
| 198 | static void mod_set_fddata(struct canfd_frame *cf, struct cf_mod *mod) | ||
| 199 | { | ||
| 200 | memcpy(cf->data, mod->modframe.set.data, CANFD_MAX_DLEN); | ||
| 201 | } | ||
| 202 | |||
| 203 | static void canframecpy(struct canfd_frame *dst, struct can_frame *src) | ||
| 172 | { | 204 | { |
| 173 | /* | 205 | /* Copy the struct members separately to ensure that no uninitialized |
| 174 | * Copy the struct members separately to ensure that no uninitialized | ||
| 175 | * data are copied in the 3 bytes hole of the struct. This is needed | 206 | * data are copied in the 3 bytes hole of the struct. This is needed |
| 176 | * to make easy compares of the data in the struct cf_mod. | 207 | * to make easy compares of the data in the struct cf_mod. |
| 177 | */ | 208 | */ |
| 178 | 209 | ||
| 179 | dst->can_id = src->can_id; | 210 | dst->can_id = src->can_id; |
| 180 | dst->can_dlc = src->can_dlc; | 211 | dst->len = src->can_dlc; |
| 181 | *(u64 *)dst->data = *(u64 *)src->data; | 212 | *(u64 *)dst->data = *(u64 *)src->data; |
| 182 | } | 213 | } |
| 183 | 214 | ||
| 184 | static int cgw_chk_csum_parms(s8 fr, s8 to, s8 re) | 215 | static void canfdframecpy(struct canfd_frame *dst, struct canfd_frame *src) |
| 185 | { | 216 | { |
| 186 | /* | 217 | /* Copy the struct members separately to ensure that no uninitialized |
| 187 | * absolute dlc values 0 .. 7 => 0 .. 7, e.g. data [0] | 218 | * data are copied in the 2 bytes hole of the struct. This is needed |
| 219 | * to make easy compares of the data in the struct cf_mod. | ||
| 220 | */ | ||
| 221 | |||
| 222 | dst->can_id = src->can_id; | ||
| 223 | dst->flags = src->flags; | ||
| 224 | dst->len = src->len; | ||
| 225 | memcpy(dst->data, src->data, CANFD_MAX_DLEN); | ||
| 226 | } | ||
| 227 | |||
| 228 | static int cgw_chk_csum_parms(s8 fr, s8 to, s8 re, struct rtcanmsg *r) | ||
| 229 | { | ||
| 230 | s8 dlen = CAN_MAX_DLEN; | ||
| 231 | |||
| 232 | if (r->flags & CGW_FLAGS_CAN_FD) | ||
| 233 | dlen = CANFD_MAX_DLEN; | ||
| 234 | |||
| 235 | /* absolute dlc values 0 .. 7 => 0 .. 7, e.g. data [0] | ||
| 188 | * relative to received dlc -1 .. -8 : | 236 | * relative to received dlc -1 .. -8 : |
| 189 | * e.g. for received dlc = 8 | 237 | * e.g. for received dlc = 8 |
| 190 | * -1 => index = 7 (data[7]) | 238 | * -1 => index = 7 (data[7]) |
| @@ -192,27 +240,27 @@ static int cgw_chk_csum_parms(s8 fr, s8 to, s8 re) | |||
| 192 | * -8 => index = 0 (data[0]) | 240 | * -8 => index = 0 (data[0]) |
| 193 | */ | 241 | */ |
| 194 | 242 | ||
| 195 | if (fr > -9 && fr < 8 && | 243 | if (fr >= -dlen && fr < dlen && |
| 196 | to > -9 && to < 8 && | 244 | to >= -dlen && to < dlen && |
| 197 | re > -9 && re < 8) | 245 | re >= -dlen && re < dlen) |
| 198 | return 0; | 246 | return 0; |
| 199 | else | 247 | else |
| 200 | return -EINVAL; | 248 | return -EINVAL; |
| 201 | } | 249 | } |
| 202 | 250 | ||
| 203 | static inline int calc_idx(int idx, int rx_dlc) | 251 | static inline int calc_idx(int idx, int rx_len) |
| 204 | { | 252 | { |
| 205 | if (idx < 0) | 253 | if (idx < 0) |
| 206 | return rx_dlc + idx; | 254 | return rx_len + idx; |
| 207 | else | 255 | else |
| 208 | return idx; | 256 | return idx; |
| 209 | } | 257 | } |
| 210 | 258 | ||
| 211 | static void cgw_csum_xor_rel(struct can_frame *cf, struct cgw_csum_xor *xor) | 259 | static void cgw_csum_xor_rel(struct canfd_frame *cf, struct cgw_csum_xor *xor) |
| 212 | { | 260 | { |
| 213 | int from = calc_idx(xor->from_idx, cf->can_dlc); | 261 | int from = calc_idx(xor->from_idx, cf->len); |
| 214 | int to = calc_idx(xor->to_idx, cf->can_dlc); | 262 | int to = calc_idx(xor->to_idx, cf->len); |
| 215 | int res = calc_idx(xor->result_idx, cf->can_dlc); | 263 | int res = calc_idx(xor->result_idx, cf->len); |
| 216 | u8 val = xor->init_xor_val; | 264 | u8 val = xor->init_xor_val; |
| 217 | int i; | 265 | int i; |
| 218 | 266 | ||
| @@ -230,7 +278,7 @@ static void cgw_csum_xor_rel(struct can_frame *cf, struct cgw_csum_xor *xor) | |||
| 230 | cf->data[res] = val; | 278 | cf->data[res] = val; |
| 231 | } | 279 | } |
| 232 | 280 | ||
| 233 | static void cgw_csum_xor_pos(struct can_frame *cf, struct cgw_csum_xor *xor) | 281 | static void cgw_csum_xor_pos(struct canfd_frame *cf, struct cgw_csum_xor *xor) |
| 234 | { | 282 | { |
| 235 | u8 val = xor->init_xor_val; | 283 | u8 val = xor->init_xor_val; |
| 236 | int i; | 284 | int i; |
| @@ -241,7 +289,7 @@ static void cgw_csum_xor_pos(struct can_frame *cf, struct cgw_csum_xor *xor) | |||
| 241 | cf->data[xor->result_idx] = val; | 289 | cf->data[xor->result_idx] = val; |
| 242 | } | 290 | } |
| 243 | 291 | ||
| 244 | static void cgw_csum_xor_neg(struct can_frame *cf, struct cgw_csum_xor *xor) | 292 | static void cgw_csum_xor_neg(struct canfd_frame *cf, struct cgw_csum_xor *xor) |
| 245 | { | 293 | { |
| 246 | u8 val = xor->init_xor_val; | 294 | u8 val = xor->init_xor_val; |
| 247 | int i; | 295 | int i; |
| @@ -252,11 +300,12 @@ static void cgw_csum_xor_neg(struct can_frame *cf, struct cgw_csum_xor *xor) | |||
| 252 | cf->data[xor->result_idx] = val; | 300 | cf->data[xor->result_idx] = val; |
| 253 | } | 301 | } |
| 254 | 302 | ||
| 255 | static void cgw_csum_crc8_rel(struct can_frame *cf, struct cgw_csum_crc8 *crc8) | 303 | static void cgw_csum_crc8_rel(struct canfd_frame *cf, |
| 304 | struct cgw_csum_crc8 *crc8) | ||
| 256 | { | 305 | { |
| 257 | int from = calc_idx(crc8->from_idx, cf->can_dlc); | 306 | int from = calc_idx(crc8->from_idx, cf->len); |
| 258 | int to = calc_idx(crc8->to_idx, cf->can_dlc); | 307 | int to = calc_idx(crc8->to_idx, cf->len); |
| 259 | int res = calc_idx(crc8->result_idx, cf->can_dlc); | 308 | int res = calc_idx(crc8->result_idx, cf->len); |
| 260 | u8 crc = crc8->init_crc_val; | 309 | u8 crc = crc8->init_crc_val; |
| 261 | int i; | 310 | int i; |
| 262 | 311 | ||
| @@ -265,96 +314,102 @@ static void cgw_csum_crc8_rel(struct can_frame *cf, struct cgw_csum_crc8 *crc8) | |||
| 265 | 314 | ||
| 266 | if (from <= to) { | 315 | if (from <= to) { |
| 267 | for (i = crc8->from_idx; i <= crc8->to_idx; i++) | 316 | for (i = crc8->from_idx; i <= crc8->to_idx; i++) |
| 268 | crc = crc8->crctab[crc^cf->data[i]]; | 317 | crc = crc8->crctab[crc ^ cf->data[i]]; |
| 269 | } else { | 318 | } else { |
| 270 | for (i = crc8->from_idx; i >= crc8->to_idx; i--) | 319 | for (i = crc8->from_idx; i >= crc8->to_idx; i--) |
| 271 | crc = crc8->crctab[crc^cf->data[i]]; | 320 | crc = crc8->crctab[crc ^ cf->data[i]]; |
| 272 | } | 321 | } |
| 273 | 322 | ||
| 274 | switch (crc8->profile) { | 323 | switch (crc8->profile) { |
| 275 | |||
| 276 | case CGW_CRC8PRF_1U8: | 324 | case CGW_CRC8PRF_1U8: |
| 277 | crc = crc8->crctab[crc^crc8->profile_data[0]]; | 325 | crc = crc8->crctab[crc ^ crc8->profile_data[0]]; |
| 278 | break; | 326 | break; |
| 279 | 327 | ||
| 280 | case CGW_CRC8PRF_16U8: | 328 | case CGW_CRC8PRF_16U8: |
| 281 | crc = crc8->crctab[crc^crc8->profile_data[cf->data[1] & 0xF]]; | 329 | crc = crc8->crctab[crc ^ crc8->profile_data[cf->data[1] & 0xF]]; |
| 282 | break; | 330 | break; |
| 283 | 331 | ||
| 284 | case CGW_CRC8PRF_SFFID_XOR: | 332 | case CGW_CRC8PRF_SFFID_XOR: |
| 285 | crc = crc8->crctab[crc^(cf->can_id & 0xFF)^ | 333 | crc = crc8->crctab[crc ^ (cf->can_id & 0xFF) ^ |
| 286 | (cf->can_id >> 8 & 0xFF)]; | 334 | (cf->can_id >> 8 & 0xFF)]; |
| 287 | break; | 335 | break; |
| 288 | |||
| 289 | } | 336 | } |
| 290 | 337 | ||
| 291 | cf->data[crc8->result_idx] = crc^crc8->final_xor_val; | 338 | cf->data[crc8->result_idx] = crc ^ crc8->final_xor_val; |
| 292 | } | 339 | } |
| 293 | 340 | ||
| 294 | static void cgw_csum_crc8_pos(struct can_frame *cf, struct cgw_csum_crc8 *crc8) | 341 | static void cgw_csum_crc8_pos(struct canfd_frame *cf, |
| 342 | struct cgw_csum_crc8 *crc8) | ||
| 295 | { | 343 | { |
| 296 | u8 crc = crc8->init_crc_val; | 344 | u8 crc = crc8->init_crc_val; |
| 297 | int i; | 345 | int i; |
| 298 | 346 | ||
| 299 | for (i = crc8->from_idx; i <= crc8->to_idx; i++) | 347 | for (i = crc8->from_idx; i <= crc8->to_idx; i++) |
| 300 | crc = crc8->crctab[crc^cf->data[i]]; | 348 | crc = crc8->crctab[crc ^ cf->data[i]]; |
| 301 | 349 | ||
| 302 | switch (crc8->profile) { | 350 | switch (crc8->profile) { |
| 303 | |||
| 304 | case CGW_CRC8PRF_1U8: | 351 | case CGW_CRC8PRF_1U8: |
| 305 | crc = crc8->crctab[crc^crc8->profile_data[0]]; | 352 | crc = crc8->crctab[crc ^ crc8->profile_data[0]]; |
| 306 | break; | 353 | break; |
| 307 | 354 | ||
| 308 | case CGW_CRC8PRF_16U8: | 355 | case CGW_CRC8PRF_16U8: |
| 309 | crc = crc8->crctab[crc^crc8->profile_data[cf->data[1] & 0xF]]; | 356 | crc = crc8->crctab[crc ^ crc8->profile_data[cf->data[1] & 0xF]]; |
| 310 | break; | 357 | break; |
| 311 | 358 | ||
| 312 | case CGW_CRC8PRF_SFFID_XOR: | 359 | case CGW_CRC8PRF_SFFID_XOR: |
| 313 | crc = crc8->crctab[crc^(cf->can_id & 0xFF)^ | 360 | crc = crc8->crctab[crc ^ (cf->can_id & 0xFF) ^ |
| 314 | (cf->can_id >> 8 & 0xFF)]; | 361 | (cf->can_id >> 8 & 0xFF)]; |
| 315 | break; | 362 | break; |
| 316 | } | 363 | } |
| 317 | 364 | ||
| 318 | cf->data[crc8->result_idx] = crc^crc8->final_xor_val; | 365 | cf->data[crc8->result_idx] = crc ^ crc8->final_xor_val; |
| 319 | } | 366 | } |
| 320 | 367 | ||
| 321 | static void cgw_csum_crc8_neg(struct can_frame *cf, struct cgw_csum_crc8 *crc8) | 368 | static void cgw_csum_crc8_neg(struct canfd_frame *cf, |
| 369 | struct cgw_csum_crc8 *crc8) | ||
| 322 | { | 370 | { |
| 323 | u8 crc = crc8->init_crc_val; | 371 | u8 crc = crc8->init_crc_val; |
| 324 | int i; | 372 | int i; |
| 325 | 373 | ||
| 326 | for (i = crc8->from_idx; i >= crc8->to_idx; i--) | 374 | for (i = crc8->from_idx; i >= crc8->to_idx; i--) |
| 327 | crc = crc8->crctab[crc^cf->data[i]]; | 375 | crc = crc8->crctab[crc ^ cf->data[i]]; |
| 328 | 376 | ||
| 329 | switch (crc8->profile) { | 377 | switch (crc8->profile) { |
| 330 | |||
| 331 | case CGW_CRC8PRF_1U8: | 378 | case CGW_CRC8PRF_1U8: |
| 332 | crc = crc8->crctab[crc^crc8->profile_data[0]]; | 379 | crc = crc8->crctab[crc ^ crc8->profile_data[0]]; |
| 333 | break; | 380 | break; |
| 334 | 381 | ||
| 335 | case CGW_CRC8PRF_16U8: | 382 | case CGW_CRC8PRF_16U8: |
| 336 | crc = crc8->crctab[crc^crc8->profile_data[cf->data[1] & 0xF]]; | 383 | crc = crc8->crctab[crc ^ crc8->profile_data[cf->data[1] & 0xF]]; |
| 337 | break; | 384 | break; |
| 338 | 385 | ||
| 339 | case CGW_CRC8PRF_SFFID_XOR: | 386 | case CGW_CRC8PRF_SFFID_XOR: |
| 340 | crc = crc8->crctab[crc^(cf->can_id & 0xFF)^ | 387 | crc = crc8->crctab[crc ^ (cf->can_id & 0xFF) ^ |
| 341 | (cf->can_id >> 8 & 0xFF)]; | 388 | (cf->can_id >> 8 & 0xFF)]; |
| 342 | break; | 389 | break; |
| 343 | } | 390 | } |
| 344 | 391 | ||
| 345 | cf->data[crc8->result_idx] = crc^crc8->final_xor_val; | 392 | cf->data[crc8->result_idx] = crc ^ crc8->final_xor_val; |
| 346 | } | 393 | } |
| 347 | 394 | ||
| 348 | /* the receive & process & send function */ | 395 | /* the receive & process & send function */ |
| 349 | static void can_can_gw_rcv(struct sk_buff *skb, void *data) | 396 | static void can_can_gw_rcv(struct sk_buff *skb, void *data) |
| 350 | { | 397 | { |
| 351 | struct cgw_job *gwj = (struct cgw_job *)data; | 398 | struct cgw_job *gwj = (struct cgw_job *)data; |
| 352 | struct can_frame *cf; | 399 | struct canfd_frame *cf; |
| 353 | struct sk_buff *nskb; | 400 | struct sk_buff *nskb; |
| 354 | int modidx = 0; | 401 | int modidx = 0; |
| 355 | 402 | ||
| 356 | /* | 403 | /* process strictly Classic CAN or CAN FD frames */ |
| 357 | * Do not handle CAN frames routed more than 'max_hops' times. | 404 | if (gwj->flags & CGW_FLAGS_CAN_FD) { |
| 405 | if (skb->len != CANFD_MTU) | ||
| 406 | return; | ||
| 407 | } else { | ||
| 408 | if (skb->len != CAN_MTU) | ||
| 409 | return; | ||
| 410 | } | ||
| 411 | |||
| 412 | /* Do not handle CAN frames routed more than 'max_hops' times. | ||
| 358 | * In general we should never catch this delimiter which is intended | 413 | * In general we should never catch this delimiter which is intended |
| 359 | * to cover a misconfiguration protection (e.g. circular CAN routes). | 414 | * to cover a misconfiguration protection (e.g. circular CAN routes). |
| 360 | * | 415 | * |
| @@ -385,8 +440,7 @@ static void can_can_gw_rcv(struct sk_buff *skb, void *data) | |||
| 385 | can_skb_prv(skb)->ifindex == gwj->dst.dev->ifindex) | 440 | can_skb_prv(skb)->ifindex == gwj->dst.dev->ifindex) |
| 386 | return; | 441 | return; |
| 387 | 442 | ||
| 388 | /* | 443 | /* clone the given skb, which has not been done in can_rcv() |
| 389 | * clone the given skb, which has not been done in can_rcv() | ||
| 390 | * | 444 | * |
| 391 | * When there is at least one modification function activated, | 445 | * When there is at least one modification function activated, |
| 392 | * we need to copy the skb as we want to modify skb->data. | 446 | * we need to copy the skb as we want to modify skb->data. |
| @@ -411,7 +465,7 @@ static void can_can_gw_rcv(struct sk_buff *skb, void *data) | |||
| 411 | nskb->dev = gwj->dst.dev; | 465 | nskb->dev = gwj->dst.dev; |
| 412 | 466 | ||
| 413 | /* pointer to modifiable CAN frame */ | 467 | /* pointer to modifiable CAN frame */ |
| 414 | cf = (struct can_frame *)nskb->data; | 468 | cf = (struct canfd_frame *)nskb->data; |
| 415 | 469 | ||
| 416 | /* perform preprocessed modification functions if there are any */ | 470 | /* perform preprocessed modification functions if there are any */ |
| 417 | while (modidx < MAX_MODFUNCTIONS && gwj->mod.modfunc[modidx]) | 471 | while (modidx < MAX_MODFUNCTIONS && gwj->mod.modfunc[modidx]) |
| @@ -420,26 +474,22 @@ static void can_can_gw_rcv(struct sk_buff *skb, void *data) | |||
| 420 | /* Has the CAN frame been modified? */ | 474 | /* Has the CAN frame been modified? */ |
| 421 | if (modidx) { | 475 | if (modidx) { |
| 422 | /* get available space for the processed CAN frame type */ | 476 | /* get available space for the processed CAN frame type */ |
| 423 | int max_len = nskb->len - offsetof(struct can_frame, data); | 477 | int max_len = nskb->len - offsetof(struct canfd_frame, data); |
| 424 | 478 | ||
| 425 | /* dlc may have changed, make sure it fits to the CAN frame */ | 479 | /* dlc may have changed, make sure it fits to the CAN frame */ |
| 426 | if (cf->can_dlc > max_len) | 480 | if (cf->len > max_len) { |
| 427 | goto out_delete; | 481 | /* delete frame due to misconfiguration */ |
| 428 | 482 | gwj->deleted_frames++; | |
| 429 | /* check for checksum updates in classic CAN length only */ | 483 | kfree_skb(nskb); |
| 430 | if (gwj->mod.csumfunc.crc8) { | 484 | return; |
| 431 | if (cf->can_dlc > 8) | ||
| 432 | goto out_delete; | ||
| 433 | |||
| 434 | (*gwj->mod.csumfunc.crc8)(cf, &gwj->mod.csum.crc8); | ||
| 435 | } | 485 | } |
| 436 | 486 | ||
| 437 | if (gwj->mod.csumfunc.xor) { | 487 | /* check for checksum updates */ |
| 438 | if (cf->can_dlc > 8) | 488 | if (gwj->mod.csumfunc.crc8) |
| 439 | goto out_delete; | 489 | (*gwj->mod.csumfunc.crc8)(cf, &gwj->mod.csum.crc8); |
| 440 | 490 | ||
| 491 | if (gwj->mod.csumfunc.xor) | ||
| 441 | (*gwj->mod.csumfunc.xor)(cf, &gwj->mod.csum.xor); | 492 | (*gwj->mod.csumfunc.xor)(cf, &gwj->mod.csum.xor); |
| 442 | } | ||
| 443 | } | 493 | } |
| 444 | 494 | ||
| 445 | /* clear the skb timestamp if not configured the other way */ | 495 | /* clear the skb timestamp if not configured the other way */ |
| @@ -451,14 +501,6 @@ static void can_can_gw_rcv(struct sk_buff *skb, void *data) | |||
| 451 | gwj->dropped_frames++; | 501 | gwj->dropped_frames++; |
| 452 | else | 502 | else |
| 453 | gwj->handled_frames++; | 503 | gwj->handled_frames++; |
| 454 | |||
| 455 | return; | ||
| 456 | |||
| 457 | out_delete: | ||
| 458 | /* delete frame due to misconfiguration */ | ||
| 459 | gwj->deleted_frames++; | ||
| 460 | kfree_skb(nskb); | ||
| 461 | return; | ||
| 462 | } | 504 | } |
| 463 | 505 | ||
| 464 | static inline int cgw_register_filter(struct net *net, struct cgw_job *gwj) | 506 | static inline int cgw_register_filter(struct net *net, struct cgw_job *gwj) |
| @@ -484,14 +526,12 @@ static int cgw_notifier(struct notifier_block *nb, | |||
| 484 | return NOTIFY_DONE; | 526 | return NOTIFY_DONE; |
| 485 | 527 | ||
| 486 | if (msg == NETDEV_UNREGISTER) { | 528 | if (msg == NETDEV_UNREGISTER) { |
| 487 | |||
| 488 | struct cgw_job *gwj = NULL; | 529 | struct cgw_job *gwj = NULL; |
| 489 | struct hlist_node *nx; | 530 | struct hlist_node *nx; |
| 490 | 531 | ||
| 491 | ASSERT_RTNL(); | 532 | ASSERT_RTNL(); |
| 492 | 533 | ||
| 493 | hlist_for_each_entry_safe(gwj, nx, &net->can.cgw_list, list) { | 534 | hlist_for_each_entry_safe(gwj, nx, &net->can.cgw_list, list) { |
| 494 | |||
| 495 | if (gwj->src.dev == dev || gwj->dst.dev == dev) { | 535 | if (gwj->src.dev == dev || gwj->dst.dev == dev) { |
| 496 | hlist_del(&gwj->list); | 536 | hlist_del(&gwj->list); |
| 497 | cgw_unregister_filter(net, gwj); | 537 | cgw_unregister_filter(net, gwj); |
| @@ -506,7 +546,6 @@ static int cgw_notifier(struct notifier_block *nb, | |||
| 506 | static int cgw_put_job(struct sk_buff *skb, struct cgw_job *gwj, int type, | 546 | static int cgw_put_job(struct sk_buff *skb, struct cgw_job *gwj, int type, |
| 507 | u32 pid, u32 seq, int flags) | 547 | u32 pid, u32 seq, int flags) |
| 508 | { | 548 | { |
| 509 | struct cgw_frame_mod mb; | ||
| 510 | struct rtcanmsg *rtcan; | 549 | struct rtcanmsg *rtcan; |
| 511 | struct nlmsghdr *nlh; | 550 | struct nlmsghdr *nlh; |
| 512 | 551 | ||
| @@ -543,32 +582,66 @@ static int cgw_put_job(struct sk_buff *skb, struct cgw_job *gwj, int type, | |||
| 543 | goto cancel; | 582 | goto cancel; |
| 544 | } | 583 | } |
| 545 | 584 | ||
| 546 | if (gwj->mod.modtype.and) { | 585 | if (gwj->flags & CGW_FLAGS_CAN_FD) { |
| 547 | memcpy(&mb.cf, &gwj->mod.modframe.and, sizeof(mb.cf)); | 586 | struct cgw_fdframe_mod mb; |
| 548 | mb.modtype = gwj->mod.modtype.and; | ||
| 549 | if (nla_put(skb, CGW_MOD_AND, sizeof(mb), &mb) < 0) | ||
| 550 | goto cancel; | ||
| 551 | } | ||
| 552 | 587 | ||
| 553 | if (gwj->mod.modtype.or) { | 588 | if (gwj->mod.modtype.and) { |
| 554 | memcpy(&mb.cf, &gwj->mod.modframe.or, sizeof(mb.cf)); | 589 | memcpy(&mb.cf, &gwj->mod.modframe.and, sizeof(mb.cf)); |
| 555 | mb.modtype = gwj->mod.modtype.or; | 590 | mb.modtype = gwj->mod.modtype.and; |
| 556 | if (nla_put(skb, CGW_MOD_OR, sizeof(mb), &mb) < 0) | 591 | if (nla_put(skb, CGW_FDMOD_AND, sizeof(mb), &mb) < 0) |
| 557 | goto cancel; | 592 | goto cancel; |
| 558 | } | 593 | } |
| 559 | 594 | ||
| 560 | if (gwj->mod.modtype.xor) { | 595 | if (gwj->mod.modtype.or) { |
| 561 | memcpy(&mb.cf, &gwj->mod.modframe.xor, sizeof(mb.cf)); | 596 | memcpy(&mb.cf, &gwj->mod.modframe.or, sizeof(mb.cf)); |
| 562 | mb.modtype = gwj->mod.modtype.xor; | 597 | mb.modtype = gwj->mod.modtype.or; |
| 563 | if (nla_put(skb, CGW_MOD_XOR, sizeof(mb), &mb) < 0) | 598 | if (nla_put(skb, CGW_FDMOD_OR, sizeof(mb), &mb) < 0) |
| 564 | goto cancel; | 599 | goto cancel; |
| 565 | } | 600 | } |
| 566 | 601 | ||
| 567 | if (gwj->mod.modtype.set) { | 602 | if (gwj->mod.modtype.xor) { |
| 568 | memcpy(&mb.cf, &gwj->mod.modframe.set, sizeof(mb.cf)); | 603 | memcpy(&mb.cf, &gwj->mod.modframe.xor, sizeof(mb.cf)); |
| 569 | mb.modtype = gwj->mod.modtype.set; | 604 | mb.modtype = gwj->mod.modtype.xor; |
| 570 | if (nla_put(skb, CGW_MOD_SET, sizeof(mb), &mb) < 0) | 605 | if (nla_put(skb, CGW_FDMOD_XOR, sizeof(mb), &mb) < 0) |
| 571 | goto cancel; | 606 | goto cancel; |
| 607 | } | ||
| 608 | |||
| 609 | if (gwj->mod.modtype.set) { | ||
| 610 | memcpy(&mb.cf, &gwj->mod.modframe.set, sizeof(mb.cf)); | ||
| 611 | mb.modtype = gwj->mod.modtype.set; | ||
| 612 | if (nla_put(skb, CGW_FDMOD_SET, sizeof(mb), &mb) < 0) | ||
| 613 | goto cancel; | ||
| 614 | } | ||
| 615 | } else { | ||
| 616 | struct cgw_frame_mod mb; | ||
| 617 | |||
| 618 | if (gwj->mod.modtype.and) { | ||
| 619 | memcpy(&mb.cf, &gwj->mod.modframe.and, sizeof(mb.cf)); | ||
| 620 | mb.modtype = gwj->mod.modtype.and; | ||
| 621 | if (nla_put(skb, CGW_MOD_AND, sizeof(mb), &mb) < 0) | ||
| 622 | goto cancel; | ||
| 623 | } | ||
| 624 | |||
| 625 | if (gwj->mod.modtype.or) { | ||
| 626 | memcpy(&mb.cf, &gwj->mod.modframe.or, sizeof(mb.cf)); | ||
| 627 | mb.modtype = gwj->mod.modtype.or; | ||
| 628 | if (nla_put(skb, CGW_MOD_OR, sizeof(mb), &mb) < 0) | ||
| 629 | goto cancel; | ||
| 630 | } | ||
| 631 | |||
| 632 | if (gwj->mod.modtype.xor) { | ||
| 633 | memcpy(&mb.cf, &gwj->mod.modframe.xor, sizeof(mb.cf)); | ||
| 634 | mb.modtype = gwj->mod.modtype.xor; | ||
| 635 | if (nla_put(skb, CGW_MOD_XOR, sizeof(mb), &mb) < 0) | ||
| 636 | goto cancel; | ||
| 637 | } | ||
| 638 | |||
| 639 | if (gwj->mod.modtype.set) { | ||
| 640 | memcpy(&mb.cf, &gwj->mod.modframe.set, sizeof(mb.cf)); | ||
| 641 | mb.modtype = gwj->mod.modtype.set; | ||
| 642 | if (nla_put(skb, CGW_MOD_SET, sizeof(mb), &mb) < 0) | ||
| 643 | goto cancel; | ||
| 644 | } | ||
| 572 | } | 645 | } |
| 573 | 646 | ||
| 574 | if (gwj->mod.uid) { | 647 | if (gwj->mod.uid) { |
| @@ -589,7 +662,6 @@ static int cgw_put_job(struct sk_buff *skb, struct cgw_job *gwj, int type, | |||
| 589 | } | 662 | } |
| 590 | 663 | ||
| 591 | if (gwj->gwtype == CGW_TYPE_CAN_CAN) { | 664 | if (gwj->gwtype == CGW_TYPE_CAN_CAN) { |
| 592 | |||
| 593 | if (gwj->ccgw.filter.can_id || gwj->ccgw.filter.can_mask) { | 665 | if (gwj->ccgw.filter.can_id || gwj->ccgw.filter.can_mask) { |
| 594 | if (nla_put(skb, CGW_FILTER, sizeof(struct can_filter), | 666 | if (nla_put(skb, CGW_FILTER, sizeof(struct can_filter), |
| 595 | &gwj->ccgw.filter) < 0) | 667 | &gwj->ccgw.filter) < 0) |
| @@ -624,8 +696,9 @@ static int cgw_dump_jobs(struct sk_buff *skb, struct netlink_callback *cb) | |||
| 624 | if (idx < s_idx) | 696 | if (idx < s_idx) |
| 625 | goto cont; | 697 | goto cont; |
| 626 | 698 | ||
| 627 | if (cgw_put_job(skb, gwj, RTM_NEWROUTE, NETLINK_CB(cb->skb).portid, | 699 | if (cgw_put_job(skb, gwj, RTM_NEWROUTE, |
| 628 | cb->nlh->nlmsg_seq, NLM_F_MULTI) < 0) | 700 | NETLINK_CB(cb->skb).portid, |
| 701 | cb->nlh->nlmsg_seq, NLM_F_MULTI) < 0) | ||
| 629 | break; | 702 | break; |
| 630 | cont: | 703 | cont: |
| 631 | idx++; | 704 | idx++; |
| @@ -637,7 +710,7 @@ cont: | |||
| 637 | return skb->len; | 710 | return skb->len; |
| 638 | } | 711 | } |
| 639 | 712 | ||
| 640 | static const struct nla_policy cgw_policy[CGW_MAX+1] = { | 713 | static const struct nla_policy cgw_policy[CGW_MAX + 1] = { |
| 641 | [CGW_MOD_AND] = { .len = sizeof(struct cgw_frame_mod) }, | 714 | [CGW_MOD_AND] = { .len = sizeof(struct cgw_frame_mod) }, |
| 642 | [CGW_MOD_OR] = { .len = sizeof(struct cgw_frame_mod) }, | 715 | [CGW_MOD_OR] = { .len = sizeof(struct cgw_frame_mod) }, |
| 643 | [CGW_MOD_XOR] = { .len = sizeof(struct cgw_frame_mod) }, | 716 | [CGW_MOD_XOR] = { .len = sizeof(struct cgw_frame_mod) }, |
| @@ -649,14 +722,18 @@ static const struct nla_policy cgw_policy[CGW_MAX+1] = { | |||
| 649 | [CGW_FILTER] = { .len = sizeof(struct can_filter) }, | 722 | [CGW_FILTER] = { .len = sizeof(struct can_filter) }, |
| 650 | [CGW_LIM_HOPS] = { .type = NLA_U8 }, | 723 | [CGW_LIM_HOPS] = { .type = NLA_U8 }, |
| 651 | [CGW_MOD_UID] = { .type = NLA_U32 }, | 724 | [CGW_MOD_UID] = { .type = NLA_U32 }, |
| 725 | [CGW_FDMOD_AND] = { .len = sizeof(struct cgw_fdframe_mod) }, | ||
| 726 | [CGW_FDMOD_OR] = { .len = sizeof(struct cgw_fdframe_mod) }, | ||
| 727 | [CGW_FDMOD_XOR] = { .len = sizeof(struct cgw_fdframe_mod) }, | ||
| 728 | [CGW_FDMOD_SET] = { .len = sizeof(struct cgw_fdframe_mod) }, | ||
| 652 | }; | 729 | }; |
| 653 | 730 | ||
| 654 | /* check for common and gwtype specific attributes */ | 731 | /* check for common and gwtype specific attributes */ |
| 655 | static int cgw_parse_attr(struct nlmsghdr *nlh, struct cf_mod *mod, | 732 | static int cgw_parse_attr(struct nlmsghdr *nlh, struct cf_mod *mod, |
| 656 | u8 gwtype, void *gwtypeattr, u8 *limhops) | 733 | u8 gwtype, void *gwtypeattr, u8 *limhops) |
| 657 | { | 734 | { |
| 658 | struct nlattr *tb[CGW_MAX+1]; | 735 | struct nlattr *tb[CGW_MAX + 1]; |
| 659 | struct cgw_frame_mod mb; | 736 | struct rtcanmsg *r = nlmsg_data(nlh); |
| 660 | int modidx = 0; | 737 | int modidx = 0; |
| 661 | int err = 0; | 738 | int err = 0; |
| 662 | 739 | ||
| @@ -676,87 +753,166 @@ static int cgw_parse_attr(struct nlmsghdr *nlh, struct cf_mod *mod, | |||
| 676 | } | 753 | } |
| 677 | 754 | ||
| 678 | /* check for AND/OR/XOR/SET modifications */ | 755 | /* check for AND/OR/XOR/SET modifications */ |
| 756 | if (r->flags & CGW_FLAGS_CAN_FD) { | ||
| 757 | struct cgw_fdframe_mod mb; | ||
| 679 | 758 | ||
| 680 | if (tb[CGW_MOD_AND]) { | 759 | if (tb[CGW_FDMOD_AND]) { |
| 681 | nla_memcpy(&mb, tb[CGW_MOD_AND], CGW_MODATTR_LEN); | 760 | nla_memcpy(&mb, tb[CGW_FDMOD_AND], CGW_FDMODATTR_LEN); |
| 682 | 761 | ||
| 683 | canframecpy(&mod->modframe.and, &mb.cf); | 762 | canfdframecpy(&mod->modframe.and, &mb.cf); |
| 684 | mod->modtype.and = mb.modtype; | 763 | mod->modtype.and = mb.modtype; |
| 685 | 764 | ||
| 686 | if (mb.modtype & CGW_MOD_ID) | 765 | if (mb.modtype & CGW_MOD_ID) |
| 687 | mod->modfunc[modidx++] = mod_and_id; | 766 | mod->modfunc[modidx++] = mod_and_id; |
| 688 | 767 | ||
| 689 | if (mb.modtype & CGW_MOD_DLC) | 768 | if (mb.modtype & CGW_MOD_LEN) |
| 690 | mod->modfunc[modidx++] = mod_and_dlc; | 769 | mod->modfunc[modidx++] = mod_and_len; |
| 691 | 770 | ||
| 692 | if (mb.modtype & CGW_MOD_DATA) | 771 | if (mb.modtype & CGW_MOD_FLAGS) |
| 693 | mod->modfunc[modidx++] = mod_and_data; | 772 | mod->modfunc[modidx++] = mod_and_flags; |
| 694 | } | 773 | |
| 774 | if (mb.modtype & CGW_MOD_DATA) | ||
| 775 | mod->modfunc[modidx++] = mod_and_fddata; | ||
| 776 | } | ||
| 695 | 777 | ||
| 696 | if (tb[CGW_MOD_OR]) { | 778 | if (tb[CGW_FDMOD_OR]) { |
| 697 | nla_memcpy(&mb, tb[CGW_MOD_OR], CGW_MODATTR_LEN); | 779 | nla_memcpy(&mb, tb[CGW_FDMOD_OR], CGW_FDMODATTR_LEN); |
| 698 | 780 | ||
| 699 | canframecpy(&mod->modframe.or, &mb.cf); | 781 | canfdframecpy(&mod->modframe.or, &mb.cf); |
| 700 | mod->modtype.or = mb.modtype; | 782 | mod->modtype.or = mb.modtype; |
| 701 | 783 | ||
| 702 | if (mb.modtype & CGW_MOD_ID) | 784 | if (mb.modtype & CGW_MOD_ID) |
| 703 | mod->modfunc[modidx++] = mod_or_id; | 785 | mod->modfunc[modidx++] = mod_or_id; |
| 704 | 786 | ||
| 705 | if (mb.modtype & CGW_MOD_DLC) | 787 | if (mb.modtype & CGW_MOD_LEN) |
| 706 | mod->modfunc[modidx++] = mod_or_dlc; | 788 | mod->modfunc[modidx++] = mod_or_len; |
| 707 | 789 | ||
| 708 | if (mb.modtype & CGW_MOD_DATA) | 790 | if (mb.modtype & CGW_MOD_FLAGS) |
| 709 | mod->modfunc[modidx++] = mod_or_data; | 791 | mod->modfunc[modidx++] = mod_or_flags; |
| 710 | } | ||
| 711 | 792 | ||
| 712 | if (tb[CGW_MOD_XOR]) { | 793 | if (mb.modtype & CGW_MOD_DATA) |
| 713 | nla_memcpy(&mb, tb[CGW_MOD_XOR], CGW_MODATTR_LEN); | 794 | mod->modfunc[modidx++] = mod_or_fddata; |
| 795 | } | ||
| 714 | 796 | ||
| 715 | canframecpy(&mod->modframe.xor, &mb.cf); | 797 | if (tb[CGW_FDMOD_XOR]) { |
| 716 | mod->modtype.xor = mb.modtype; | 798 | nla_memcpy(&mb, tb[CGW_FDMOD_XOR], CGW_FDMODATTR_LEN); |
| 717 | 799 | ||
| 718 | if (mb.modtype & CGW_MOD_ID) | 800 | canfdframecpy(&mod->modframe.xor, &mb.cf); |
| 719 | mod->modfunc[modidx++] = mod_xor_id; | 801 | mod->modtype.xor = mb.modtype; |
| 720 | 802 | ||
| 721 | if (mb.modtype & CGW_MOD_DLC) | 803 | if (mb.modtype & CGW_MOD_ID) |
| 722 | mod->modfunc[modidx++] = mod_xor_dlc; | 804 | mod->modfunc[modidx++] = mod_xor_id; |
| 723 | 805 | ||
| 724 | if (mb.modtype & CGW_MOD_DATA) | 806 | if (mb.modtype & CGW_MOD_LEN) |
| 725 | mod->modfunc[modidx++] = mod_xor_data; | 807 | mod->modfunc[modidx++] = mod_xor_len; |
| 726 | } | ||
| 727 | 808 | ||
| 728 | if (tb[CGW_MOD_SET]) { | 809 | if (mb.modtype & CGW_MOD_FLAGS) |
| 729 | nla_memcpy(&mb, tb[CGW_MOD_SET], CGW_MODATTR_LEN); | 810 | mod->modfunc[modidx++] = mod_xor_flags; |
| 811 | |||
| 812 | if (mb.modtype & CGW_MOD_DATA) | ||
| 813 | mod->modfunc[modidx++] = mod_xor_fddata; | ||
| 814 | } | ||
| 730 | 815 | ||
| 731 | canframecpy(&mod->modframe.set, &mb.cf); | 816 | if (tb[CGW_FDMOD_SET]) { |
| 732 | mod->modtype.set = mb.modtype; | 817 | nla_memcpy(&mb, tb[CGW_FDMOD_SET], CGW_FDMODATTR_LEN); |
| 818 | |||
| 819 | canfdframecpy(&mod->modframe.set, &mb.cf); | ||
| 820 | mod->modtype.set = mb.modtype; | ||
| 821 | |||
| 822 | if (mb.modtype & CGW_MOD_ID) | ||
| 823 | mod->modfunc[modidx++] = mod_set_id; | ||
| 824 | |||
| 825 | if (mb.modtype & CGW_MOD_LEN) | ||
| 826 | mod->modfunc[modidx++] = mod_set_len; | ||
| 827 | |||
| 828 | if (mb.modtype & CGW_MOD_FLAGS) | ||
| 829 | mod->modfunc[modidx++] = mod_set_flags; | ||
| 830 | |||
| 831 | if (mb.modtype & CGW_MOD_DATA) | ||
| 832 | mod->modfunc[modidx++] = mod_set_fddata; | ||
| 833 | } | ||
| 834 | } else { | ||
| 835 | struct cgw_frame_mod mb; | ||
| 733 | 836 | ||
| 734 | if (mb.modtype & CGW_MOD_ID) | 837 | if (tb[CGW_MOD_AND]) { |
| 735 | mod->modfunc[modidx++] = mod_set_id; | 838 | nla_memcpy(&mb, tb[CGW_MOD_AND], CGW_MODATTR_LEN); |
| 736 | 839 | ||
| 737 | if (mb.modtype & CGW_MOD_DLC) | 840 | canframecpy(&mod->modframe.and, &mb.cf); |
| 738 | mod->modfunc[modidx++] = mod_set_dlc; | 841 | mod->modtype.and = mb.modtype; |
| 739 | 842 | ||
| 740 | if (mb.modtype & CGW_MOD_DATA) | 843 | if (mb.modtype & CGW_MOD_ID) |
| 741 | mod->modfunc[modidx++] = mod_set_data; | 844 | mod->modfunc[modidx++] = mod_and_id; |
| 845 | |||
| 846 | if (mb.modtype & CGW_MOD_LEN) | ||
| 847 | mod->modfunc[modidx++] = mod_and_len; | ||
| 848 | |||
| 849 | if (mb.modtype & CGW_MOD_DATA) | ||
| 850 | mod->modfunc[modidx++] = mod_and_data; | ||
| 851 | } | ||
| 852 | |||
| 853 | if (tb[CGW_MOD_OR]) { | ||
| 854 | nla_memcpy(&mb, tb[CGW_MOD_OR], CGW_MODATTR_LEN); | ||
| 855 | |||
| 856 | canframecpy(&mod->modframe.or, &mb.cf); | ||
| 857 | mod->modtype.or = mb.modtype; | ||
| 858 | |||
| 859 | if (mb.modtype & CGW_MOD_ID) | ||
| 860 | mod->modfunc[modidx++] = mod_or_id; | ||
| 861 | |||
| 862 | if (mb.modtype & CGW_MOD_LEN) | ||
| 863 | mod->modfunc[modidx++] = mod_or_len; | ||
| 864 | |||
| 865 | if (mb.modtype & CGW_MOD_DATA) | ||
| 866 | mod->modfunc[modidx++] = mod_or_data; | ||
| 867 | } | ||
| 868 | |||
| 869 | if (tb[CGW_MOD_XOR]) { | ||
| 870 | nla_memcpy(&mb, tb[CGW_MOD_XOR], CGW_MODATTR_LEN); | ||
| 871 | |||
| 872 | canframecpy(&mod->modframe.xor, &mb.cf); | ||
| 873 | mod->modtype.xor = mb.modtype; | ||
| 874 | |||
| 875 | if (mb.modtype & CGW_MOD_ID) | ||
| 876 | mod->modfunc[modidx++] = mod_xor_id; | ||
| 877 | |||
| 878 | if (mb.modtype & CGW_MOD_LEN) | ||
| 879 | mod->modfunc[modidx++] = mod_xor_len; | ||
| 880 | |||
| 881 | if (mb.modtype & CGW_MOD_DATA) | ||
| 882 | mod->modfunc[modidx++] = mod_xor_data; | ||
| 883 | } | ||
| 884 | |||
| 885 | if (tb[CGW_MOD_SET]) { | ||
| 886 | nla_memcpy(&mb, tb[CGW_MOD_SET], CGW_MODATTR_LEN); | ||
| 887 | |||
| 888 | canframecpy(&mod->modframe.set, &mb.cf); | ||
| 889 | mod->modtype.set = mb.modtype; | ||
| 890 | |||
| 891 | if (mb.modtype & CGW_MOD_ID) | ||
| 892 | mod->modfunc[modidx++] = mod_set_id; | ||
| 893 | |||
| 894 | if (mb.modtype & CGW_MOD_LEN) | ||
| 895 | mod->modfunc[modidx++] = mod_set_len; | ||
| 896 | |||
| 897 | if (mb.modtype & CGW_MOD_DATA) | ||
| 898 | mod->modfunc[modidx++] = mod_set_data; | ||
| 899 | } | ||
| 742 | } | 900 | } |
| 743 | 901 | ||
| 744 | /* check for checksum operations after CAN frame modifications */ | 902 | /* check for checksum operations after CAN frame modifications */ |
| 745 | if (modidx) { | 903 | if (modidx) { |
| 746 | |||
| 747 | if (tb[CGW_CS_CRC8]) { | 904 | if (tb[CGW_CS_CRC8]) { |
| 748 | struct cgw_csum_crc8 *c = nla_data(tb[CGW_CS_CRC8]); | 905 | struct cgw_csum_crc8 *c = nla_data(tb[CGW_CS_CRC8]); |
| 749 | 906 | ||
| 750 | err = cgw_chk_csum_parms(c->from_idx, c->to_idx, | 907 | err = cgw_chk_csum_parms(c->from_idx, c->to_idx, |
| 751 | c->result_idx); | 908 | c->result_idx, r); |
| 752 | if (err) | 909 | if (err) |
| 753 | return err; | 910 | return err; |
| 754 | 911 | ||
| 755 | nla_memcpy(&mod->csum.crc8, tb[CGW_CS_CRC8], | 912 | nla_memcpy(&mod->csum.crc8, tb[CGW_CS_CRC8], |
| 756 | CGW_CS_CRC8_LEN); | 913 | CGW_CS_CRC8_LEN); |
| 757 | 914 | ||
| 758 | /* | 915 | /* select dedicated processing function to reduce |
| 759 | * select dedicated processing function to reduce | ||
| 760 | * runtime operations in receive hot path. | 916 | * runtime operations in receive hot path. |
| 761 | */ | 917 | */ |
| 762 | if (c->from_idx < 0 || c->to_idx < 0 || | 918 | if (c->from_idx < 0 || c->to_idx < 0 || |
| @@ -772,15 +928,14 @@ static int cgw_parse_attr(struct nlmsghdr *nlh, struct cf_mod *mod, | |||
| 772 | struct cgw_csum_xor *c = nla_data(tb[CGW_CS_XOR]); | 928 | struct cgw_csum_xor *c = nla_data(tb[CGW_CS_XOR]); |
| 773 | 929 | ||
| 774 | err = cgw_chk_csum_parms(c->from_idx, c->to_idx, | 930 | err = cgw_chk_csum_parms(c->from_idx, c->to_idx, |
| 775 | c->result_idx); | 931 | c->result_idx, r); |
| 776 | if (err) | 932 | if (err) |
| 777 | return err; | 933 | return err; |
| 778 | 934 | ||
| 779 | nla_memcpy(&mod->csum.xor, tb[CGW_CS_XOR], | 935 | nla_memcpy(&mod->csum.xor, tb[CGW_CS_XOR], |
| 780 | CGW_CS_XOR_LEN); | 936 | CGW_CS_XOR_LEN); |
| 781 | 937 | ||
| 782 | /* | 938 | /* select dedicated processing function to reduce |
| 783 | * select dedicated processing function to reduce | ||
| 784 | * runtime operations in receive hot path. | 939 | * runtime operations in receive hot path. |
| 785 | */ | 940 | */ |
| 786 | if (c->from_idx < 0 || c->to_idx < 0 || | 941 | if (c->from_idx < 0 || c->to_idx < 0 || |
| @@ -792,16 +947,14 @@ static int cgw_parse_attr(struct nlmsghdr *nlh, struct cf_mod *mod, | |||
| 792 | mod->csumfunc.xor = cgw_csum_xor_neg; | 947 | mod->csumfunc.xor = cgw_csum_xor_neg; |
| 793 | } | 948 | } |
| 794 | 949 | ||
| 795 | if (tb[CGW_MOD_UID]) { | 950 | if (tb[CGW_MOD_UID]) |
| 796 | nla_memcpy(&mod->uid, tb[CGW_MOD_UID], sizeof(u32)); | 951 | nla_memcpy(&mod->uid, tb[CGW_MOD_UID], sizeof(u32)); |
| 797 | } | ||
| 798 | } | 952 | } |
| 799 | 953 | ||
| 800 | if (gwtype == CGW_TYPE_CAN_CAN) { | 954 | if (gwtype == CGW_TYPE_CAN_CAN) { |
| 801 | |||
| 802 | /* check CGW_TYPE_CAN_CAN specific attributes */ | 955 | /* check CGW_TYPE_CAN_CAN specific attributes */ |
| 803 | |||
| 804 | struct can_can_gw *ccgw = (struct can_can_gw *)gwtypeattr; | 956 | struct can_can_gw *ccgw = (struct can_can_gw *)gwtypeattr; |
| 957 | |||
| 805 | memset(ccgw, 0, sizeof(*ccgw)); | 958 | memset(ccgw, 0, sizeof(*ccgw)); |
| 806 | 959 | ||
| 807 | /* check for can_filter in attributes */ | 960 | /* check for can_filter in attributes */ |
| @@ -862,12 +1015,10 @@ static int cgw_create_job(struct sk_buff *skb, struct nlmsghdr *nlh, | |||
| 862 | return err; | 1015 | return err; |
| 863 | 1016 | ||
| 864 | if (mod.uid) { | 1017 | if (mod.uid) { |
| 865 | |||
| 866 | ASSERT_RTNL(); | 1018 | ASSERT_RTNL(); |
| 867 | 1019 | ||
| 868 | /* check for updating an existing job with identical uid */ | 1020 | /* check for updating an existing job with identical uid */ |
| 869 | hlist_for_each_entry(gwj, &net->can.cgw_list, list) { | 1021 | hlist_for_each_entry(gwj, &net->can.cgw_list, list) { |
| 870 | |||
| 871 | if (gwj->mod.uid != mod.uid) | 1022 | if (gwj->mod.uid != mod.uid) |
| 872 | continue; | 1023 | continue; |
| 873 | 1024 | ||
| @@ -988,7 +1139,6 @@ static int cgw_remove_job(struct sk_buff *skb, struct nlmsghdr *nlh, | |||
| 988 | 1139 | ||
| 989 | /* remove only the first matching entry */ | 1140 | /* remove only the first matching entry */ |
| 990 | hlist_for_each_entry_safe(gwj, nx, &net->can.cgw_list, list) { | 1141 | hlist_for_each_entry_safe(gwj, nx, &net->can.cgw_list, list) { |
| 991 | |||
| 992 | if (gwj->flags != r->flags) | 1142 | if (gwj->flags != r->flags) |
| 993 | continue; | 1143 | continue; |
| 994 | 1144 | ||
diff --git a/net/can/raw.c b/net/can/raw.c index da386f1fa815..fdbc36140e9b 100644 --- a/net/can/raw.c +++ b/net/can/raw.c | |||
| @@ -1,6 +1,5 @@ | |||
| 1 | // SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) | 1 | // SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) |
| 2 | /* | 2 | /* raw.c - Raw sockets for protocol family CAN |
| 3 | * raw.c - Raw sockets for protocol family CAN | ||
| 4 | * | 3 | * |
| 5 | * Copyright (c) 2002-2007 Volkswagen Group Electronic Research | 4 | * Copyright (c) 2002-2007 Volkswagen Group Electronic Research |
| 6 | * All rights reserved. | 5 | * All rights reserved. |
| @@ -65,8 +64,7 @@ MODULE_ALIAS("can-proto-1"); | |||
| 65 | 64 | ||
| 66 | #define MASK_ALL 0 | 65 | #define MASK_ALL 0 |
| 67 | 66 | ||
| 68 | /* | 67 | /* A raw socket has a list of can_filters attached to it, each receiving |
| 69 | * A raw socket has a list of can_filters attached to it, each receiving | ||
| 70 | * the CAN frames matching that filter. If the filter list is empty, | 68 | * the CAN frames matching that filter. If the filter list is empty, |
| 71 | * no CAN frames will be received by the socket. The default after | 69 | * no CAN frames will be received by the socket. The default after |
| 72 | * opening the socket, is to have one filter which receives all frames. | 70 | * opening the socket, is to have one filter which receives all frames. |
| @@ -97,8 +95,7 @@ struct raw_sock { | |||
| 97 | struct uniqframe __percpu *uniq; | 95 | struct uniqframe __percpu *uniq; |
| 98 | }; | 96 | }; |
| 99 | 97 | ||
| 100 | /* | 98 | /* Return pointer to store the extra msg flags for raw_recvmsg(). |
| 101 | * Return pointer to store the extra msg flags for raw_recvmsg(). | ||
| 102 | * We use the space of one unsigned int beyond the 'struct sockaddr_can' | 99 | * We use the space of one unsigned int beyond the 'struct sockaddr_can' |
| 103 | * in skb->cb. | 100 | * in skb->cb. |
| 104 | */ | 101 | */ |
| @@ -157,8 +154,7 @@ static void raw_rcv(struct sk_buff *oskb, void *data) | |||
| 157 | if (!skb) | 154 | if (!skb) |
| 158 | return; | 155 | return; |
| 159 | 156 | ||
| 160 | /* | 157 | /* Put the datagram to the queue so that raw_recvmsg() can |
| 161 | * Put the datagram to the queue so that raw_recvmsg() can | ||
| 162 | * get it from there. We need to pass the interface index to | 158 | * get it from there. We need to pass the interface index to |
| 163 | * raw_recvmsg(). We pass a whole struct sockaddr_can in skb->cb | 159 | * raw_recvmsg(). We pass a whole struct sockaddr_can in skb->cb |
| 164 | * containing the interface index. | 160 | * containing the interface index. |
| @@ -284,7 +280,6 @@ static int raw_notifier(struct notifier_block *nb, | |||
| 284 | return NOTIFY_DONE; | 280 | return NOTIFY_DONE; |
| 285 | 281 | ||
| 286 | switch (msg) { | 282 | switch (msg) { |
| 287 | |||
| 288 | case NETDEV_UNREGISTER: | 283 | case NETDEV_UNREGISTER: |
| 289 | lock_sock(sk); | 284 | lock_sock(sk); |
| 290 | /* remove current filters & unregister */ | 285 | /* remove current filters & unregister */ |
| @@ -370,8 +365,9 @@ static int raw_release(struct socket *sock) | |||
| 370 | raw_disable_allfilters(dev_net(dev), dev, sk); | 365 | raw_disable_allfilters(dev_net(dev), dev, sk); |
| 371 | dev_put(dev); | 366 | dev_put(dev); |
| 372 | } | 367 | } |
| 373 | } else | 368 | } else { |
| 374 | raw_disable_allfilters(sock_net(sk), NULL, sk); | 369 | raw_disable_allfilters(sock_net(sk), NULL, sk); |
| 370 | } | ||
| 375 | } | 371 | } |
| 376 | 372 | ||
| 377 | if (ro->count > 1) | 373 | if (ro->count > 1) |
| @@ -451,8 +447,9 @@ static int raw_bind(struct socket *sock, struct sockaddr *uaddr, int len) | |||
| 451 | dev, sk); | 447 | dev, sk); |
| 452 | dev_put(dev); | 448 | dev_put(dev); |
| 453 | } | 449 | } |
| 454 | } else | 450 | } else { |
| 455 | raw_disable_allfilters(sock_net(sk), NULL, sk); | 451 | raw_disable_allfilters(sock_net(sk), NULL, sk); |
| 452 | } | ||
| 456 | } | 453 | } |
| 457 | ro->ifindex = ifindex; | 454 | ro->ifindex = ifindex; |
| 458 | ro->bound = 1; | 455 | ro->bound = 1; |
| @@ -503,7 +500,6 @@ static int raw_setsockopt(struct socket *sock, int level, int optname, | |||
| 503 | return -EINVAL; | 500 | return -EINVAL; |
| 504 | 501 | ||
| 505 | switch (optname) { | 502 | switch (optname) { |
| 506 | |||
| 507 | case CAN_RAW_FILTER: | 503 | case CAN_RAW_FILTER: |
| 508 | if (optlen % sizeof(struct can_filter) != 0) | 504 | if (optlen % sizeof(struct can_filter) != 0) |
| 509 | return -EINVAL; | 505 | return -EINVAL; |
| @@ -666,17 +662,18 @@ static int raw_getsockopt(struct socket *sock, int level, int optname, | |||
| 666 | return -EINVAL; | 662 | return -EINVAL; |
| 667 | 663 | ||
| 668 | switch (optname) { | 664 | switch (optname) { |
| 669 | |||
| 670 | case CAN_RAW_FILTER: | 665 | case CAN_RAW_FILTER: |
| 671 | lock_sock(sk); | 666 | lock_sock(sk); |
| 672 | if (ro->count > 0) { | 667 | if (ro->count > 0) { |
| 673 | int fsize = ro->count * sizeof(struct can_filter); | 668 | int fsize = ro->count * sizeof(struct can_filter); |
| 669 | |||
| 674 | if (len > fsize) | 670 | if (len > fsize) |
| 675 | len = fsize; | 671 | len = fsize; |
| 676 | if (copy_to_user(optval, ro->filter, len)) | 672 | if (copy_to_user(optval, ro->filter, len)) |
| 677 | err = -EFAULT; | 673 | err = -EFAULT; |
| 678 | } else | 674 | } else { |
| 679 | len = 0; | 675 | len = 0; |
| 676 | } | ||
| 680 | release_sock(sk); | 677 | release_sock(sk); |
| 681 | 678 | ||
| 682 | if (!err) | 679 | if (!err) |
| @@ -743,8 +740,9 @@ static int raw_sendmsg(struct socket *sock, struct msghdr *msg, size_t size) | |||
| 743 | return -EINVAL; | 740 | return -EINVAL; |
| 744 | 741 | ||
| 745 | ifindex = addr->can_ifindex; | 742 | ifindex = addr->can_ifindex; |
| 746 | } else | 743 | } else { |
| 747 | ifindex = ro->ifindex; | 744 | ifindex = ro->ifindex; |
| 745 | } | ||
| 748 | 746 | ||
| 749 | dev = dev_get_by_index(sock_net(sk), ifindex); | 747 | dev = dev_get_by_index(sock_net(sk), ifindex); |
| 750 | if (!dev) | 748 | if (!dev) |
| @@ -837,8 +835,8 @@ static int raw_recvmsg(struct socket *sock, struct msghdr *msg, size_t size, | |||
| 837 | return size; | 835 | return size; |
| 838 | } | 836 | } |
| 839 | 837 | ||
| 840 | int raw_sock_no_ioctlcmd(struct socket *sock, unsigned int cmd, | 838 | static int raw_sock_no_ioctlcmd(struct socket *sock, unsigned int cmd, |
| 841 | unsigned long arg) | 839 | unsigned long arg) |
| 842 | { | 840 | { |
| 843 | /* no ioctls for socket layer -> hand it down to NIC layer */ | 841 | /* no ioctls for socket layer -> hand it down to NIC layer */ |
| 844 | return -ENOIOCTLCMD; | 842 | return -ENOIOCTLCMD; |
| @@ -887,7 +885,7 @@ static __init int raw_module_init(void) | |||
| 887 | 885 | ||
| 888 | err = can_proto_register(&raw_can_proto); | 886 | err = can_proto_register(&raw_can_proto); |
| 889 | if (err < 0) | 887 | if (err < 0) |
| 890 | printk(KERN_ERR "can: registration of raw protocol failed\n"); | 888 | pr_err("can: registration of raw protocol failed\n"); |
| 891 | 889 | ||
| 892 | return err; | 890 | return err; |
| 893 | } | 891 | } |
