Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0
2// Copyright (c) 2010-2011 EIA Electronics,
3// Pieter Beyens <pieter.beyens@eia.be>
4// Copyright (c) 2010-2011 EIA Electronics,
5// Kurt Van Dijck <kurt.van.dijck@eia.be>
6// Copyright (c) 2018 Protonic,
7// Robin van der Gracht <robin@protonic.nl>
8// Copyright (c) 2017-2019 Pengutronix,
9// Marc Kleine-Budde <kernel@pengutronix.de>
10// Copyright (c) 2017-2019 Pengutronix,
11// Oleksij Rempel <kernel@pengutronix.de>
12
13#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14
15#include <linux/can/can-ml.h>
16#include <linux/can/core.h>
17#include <linux/can/skb.h>
18#include <linux/errqueue.h>
19#include <linux/if_arp.h>
20#include <net/can.h>
21
22#include "j1939-priv.h"
23
24#define J1939_MIN_NAMELEN CAN_REQUIRED_SIZE(struct sockaddr_can, can_addr.j1939)
25
26/* conversion function between struct sock::sk_priority from linux and
27 * j1939 priority field
28 */
29static inline priority_t j1939_prio(u32 sk_priority)
30{
31 sk_priority = min(sk_priority, 7U);
32
33 return 7 - sk_priority;
34}
35
36static inline u32 j1939_to_sk_priority(priority_t prio)
37{
38 return 7 - prio;
39}
40
41/* function to see if pgn is to be evaluated */
42static inline bool j1939_pgn_is_valid(pgn_t pgn)
43{
44 return pgn <= J1939_PGN_MAX;
45}
46
47/* test function to avoid non-zero DA placeholder for pdu1 pgn's */
48static inline bool j1939_pgn_is_clean_pdu(pgn_t pgn)
49{
50 if (j1939_pgn_is_pdu1(pgn))
51 return !(pgn & 0xff);
52 else
53 return true;
54}
55
56static inline void j1939_sock_pending_add(struct sock *sk)
57{
58 struct j1939_sock *jsk = j1939_sk(sk);
59
60 atomic_inc(&jsk->skb_pending);
61}
62
63static int j1939_sock_pending_get(struct sock *sk)
64{
65 struct j1939_sock *jsk = j1939_sk(sk);
66
67 return atomic_read(&jsk->skb_pending);
68}
69
70void j1939_sock_pending_del(struct sock *sk)
71{
72 struct j1939_sock *jsk = j1939_sk(sk);
73
74 /* atomic_dec_return returns the new value */
75 if (!atomic_dec_return(&jsk->skb_pending))
76 wake_up(&jsk->waitq); /* no pending SKB's */
77}
78
79static void j1939_jsk_add(struct j1939_priv *priv, struct j1939_sock *jsk)
80{
81 jsk->state |= J1939_SOCK_BOUND;
82 j1939_priv_get(priv);
83
84 write_lock_bh(&priv->j1939_socks_lock);
85 list_add_tail(&jsk->list, &priv->j1939_socks);
86 write_unlock_bh(&priv->j1939_socks_lock);
87}
88
89static void j1939_jsk_del(struct j1939_priv *priv, struct j1939_sock *jsk)
90{
91 write_lock_bh(&priv->j1939_socks_lock);
92 list_del_init(&jsk->list);
93 write_unlock_bh(&priv->j1939_socks_lock);
94
95 j1939_priv_put(priv);
96 jsk->state &= ~J1939_SOCK_BOUND;
97}
98
99static bool j1939_sk_queue_session(struct j1939_session *session)
100{
101 struct j1939_sock *jsk = j1939_sk(session->sk);
102 bool empty;
103
104 spin_lock_bh(&jsk->sk_session_queue_lock);
105 empty = list_empty(&jsk->sk_session_queue);
106 j1939_session_get(session);
107 list_add_tail(&session->sk_session_queue_entry, &jsk->sk_session_queue);
108 spin_unlock_bh(&jsk->sk_session_queue_lock);
109 j1939_sock_pending_add(&jsk->sk);
110
111 return empty;
112}
113
114static struct
115j1939_session *j1939_sk_get_incomplete_session(struct j1939_sock *jsk)
116{
117 struct j1939_session *session = NULL;
118
119 spin_lock_bh(&jsk->sk_session_queue_lock);
120 if (!list_empty(&jsk->sk_session_queue)) {
121 session = list_last_entry(&jsk->sk_session_queue,
122 struct j1939_session,
123 sk_session_queue_entry);
124 if (session->total_queued_size == session->total_message_size)
125 session = NULL;
126 else
127 j1939_session_get(session);
128 }
129 spin_unlock_bh(&jsk->sk_session_queue_lock);
130
131 return session;
132}
133
134static void j1939_sk_queue_drop_all(struct j1939_priv *priv,
135 struct j1939_sock *jsk, int err)
136{
137 struct j1939_session *session, *tmp;
138
139 netdev_dbg(priv->ndev, "%s: err: %i\n", __func__, err);
140 spin_lock_bh(&jsk->sk_session_queue_lock);
141 list_for_each_entry_safe(session, tmp, &jsk->sk_session_queue,
142 sk_session_queue_entry) {
143 list_del_init(&session->sk_session_queue_entry);
144 session->err = err;
145 j1939_session_put(session);
146 }
147 spin_unlock_bh(&jsk->sk_session_queue_lock);
148}
149
150static void j1939_sk_queue_activate_next_locked(struct j1939_session *session)
151{
152 struct j1939_sock *jsk;
153 struct j1939_session *first;
154 int err;
155
156 /* RX-Session don't have a socket (yet) */
157 if (!session->sk)
158 return;
159
160 jsk = j1939_sk(session->sk);
161 lockdep_assert_held(&jsk->sk_session_queue_lock);
162
163 err = session->err;
164
165 first = list_first_entry_or_null(&jsk->sk_session_queue,
166 struct j1939_session,
167 sk_session_queue_entry);
168
169 /* Some else has already activated the next session */
170 if (first != session)
171 return;
172
173activate_next:
174 list_del_init(&first->sk_session_queue_entry);
175 j1939_session_put(first);
176 first = list_first_entry_or_null(&jsk->sk_session_queue,
177 struct j1939_session,
178 sk_session_queue_entry);
179 if (!first)
180 return;
181
182 if (j1939_session_activate(first)) {
183 netdev_warn_once(first->priv->ndev,
184 "%s: 0x%p: Identical session is already activated.\n",
185 __func__, first);
186 first->err = -EBUSY;
187 goto activate_next;
188 } else {
189 /* Give receiver some time (arbitrary chosen) to recover */
190 int time_ms = 0;
191
192 if (err)
193 time_ms = 10 + get_random_u32_below(16);
194
195 j1939_tp_schedule_txtimer(first, time_ms);
196 }
197}
198
199void j1939_sk_queue_activate_next(struct j1939_session *session)
200{
201 struct j1939_sock *jsk;
202
203 if (!session->sk)
204 return;
205
206 jsk = j1939_sk(session->sk);
207
208 spin_lock_bh(&jsk->sk_session_queue_lock);
209 j1939_sk_queue_activate_next_locked(session);
210 spin_unlock_bh(&jsk->sk_session_queue_lock);
211}
212
213static bool j1939_sk_match_dst(struct j1939_sock *jsk,
214 const struct j1939_sk_buff_cb *skcb)
215{
216 if ((jsk->state & J1939_SOCK_PROMISC))
217 return true;
218
219 /* Destination address filter */
220 if (jsk->addr.src_name && skcb->addr.dst_name) {
221 if (jsk->addr.src_name != skcb->addr.dst_name)
222 return false;
223 } else {
224 /* receive (all sockets) if
225 * - all packages that match our bind() address
226 * - all broadcast on a socket if SO_BROADCAST
227 * is set
228 */
229 if (j1939_address_is_unicast(skcb->addr.da)) {
230 if (jsk->addr.sa != skcb->addr.da)
231 return false;
232 } else if (!sock_flag(&jsk->sk, SOCK_BROADCAST)) {
233 /* receiving broadcast without SO_BROADCAST
234 * flag is not allowed
235 */
236 return false;
237 }
238 }
239
240 /* Source address filter */
241 if (jsk->state & J1939_SOCK_CONNECTED) {
242 /* receive (all sockets) if
243 * - all packages that match our connect() name or address
244 */
245 if (jsk->addr.dst_name && skcb->addr.src_name) {
246 if (jsk->addr.dst_name != skcb->addr.src_name)
247 return false;
248 } else {
249 if (jsk->addr.da != skcb->addr.sa)
250 return false;
251 }
252 }
253
254 /* PGN filter */
255 if (j1939_pgn_is_valid(jsk->pgn_rx_filter) &&
256 jsk->pgn_rx_filter != skcb->addr.pgn)
257 return false;
258
259 return true;
260}
261
262/* matches skb control buffer (addr) with a j1939 filter */
263static bool j1939_sk_match_filter(struct j1939_sock *jsk,
264 const struct j1939_sk_buff_cb *skcb)
265{
266 const struct j1939_filter *f;
267 int nfilter;
268
269 spin_lock_bh(&jsk->filters_lock);
270
271 f = jsk->filters;
272 nfilter = jsk->nfilters;
273
274 if (!nfilter)
275 /* receive all when no filters are assigned */
276 goto filter_match_found;
277
278 for (; nfilter; ++f, --nfilter) {
279 if ((skcb->addr.pgn & f->pgn_mask) != f->pgn)
280 continue;
281 if ((skcb->addr.sa & f->addr_mask) != f->addr)
282 continue;
283 if ((skcb->addr.src_name & f->name_mask) != f->name)
284 continue;
285 goto filter_match_found;
286 }
287
288 spin_unlock_bh(&jsk->filters_lock);
289 return false;
290
291filter_match_found:
292 spin_unlock_bh(&jsk->filters_lock);
293 return true;
294}
295
296static bool j1939_sk_recv_match_one(struct j1939_sock *jsk,
297 const struct j1939_sk_buff_cb *skcb)
298{
299 if (!(jsk->state & J1939_SOCK_BOUND))
300 return false;
301
302 if (!j1939_sk_match_dst(jsk, skcb))
303 return false;
304
305 if (!j1939_sk_match_filter(jsk, skcb))
306 return false;
307
308 return true;
309}
310
311static void j1939_sk_recv_one(struct j1939_sock *jsk, struct sk_buff *oskb)
312{
313 const struct j1939_sk_buff_cb *oskcb = j1939_skb_to_cb(oskb);
314 struct j1939_sk_buff_cb *skcb;
315 enum skb_drop_reason reason;
316 struct sk_buff *skb;
317
318 if (oskb->sk == &jsk->sk)
319 return;
320
321 if (!j1939_sk_recv_match_one(jsk, oskcb))
322 return;
323
324 skb = skb_clone(oskb, GFP_ATOMIC);
325 if (!skb) {
326 pr_warn("skb clone failed\n");
327 return;
328 }
329 can_skb_set_owner(skb, oskb->sk);
330
331 skcb = j1939_skb_to_cb(skb);
332 skcb->msg_flags &= ~(MSG_DONTROUTE);
333 if (skb->sk)
334 skcb->msg_flags |= MSG_DONTROUTE;
335
336 reason = sock_queue_rcv_skb_reason(&jsk->sk, skb);
337 if (reason)
338 sk_skb_reason_drop(&jsk->sk, skb, reason);
339}
340
341bool j1939_sk_recv_match(struct j1939_priv *priv, struct j1939_sk_buff_cb *skcb)
342{
343 struct j1939_sock *jsk;
344 bool match = false;
345
346 read_lock_bh(&priv->j1939_socks_lock);
347 list_for_each_entry(jsk, &priv->j1939_socks, list) {
348 match = j1939_sk_recv_match_one(jsk, skcb);
349 if (match)
350 break;
351 }
352 read_unlock_bh(&priv->j1939_socks_lock);
353
354 return match;
355}
356
357void j1939_sk_recv(struct j1939_priv *priv, struct sk_buff *skb)
358{
359 struct j1939_sock *jsk;
360
361 read_lock_bh(&priv->j1939_socks_lock);
362 list_for_each_entry(jsk, &priv->j1939_socks, list) {
363 j1939_sk_recv_one(jsk, skb);
364 }
365 read_unlock_bh(&priv->j1939_socks_lock);
366}
367
368static void j1939_sk_sock_destruct(struct sock *sk)
369{
370 struct j1939_sock *jsk = j1939_sk(sk);
371
372 /* This function will be called by the generic networking code, when
373 * the socket is ultimately closed (sk->sk_destruct).
374 *
375 * The race between
376 * - processing a received CAN frame
377 * (can_receive -> j1939_can_recv)
378 * and accessing j1939_priv
379 * ... and ...
380 * - closing a socket
381 * (j1939_can_rx_unregister -> can_rx_unregister)
382 * and calling the final j1939_priv_put()
383 *
384 * is avoided by calling the final j1939_priv_put() from this
385 * RCU deferred cleanup call.
386 */
387 if (jsk->priv) {
388 j1939_priv_put(jsk->priv);
389 jsk->priv = NULL;
390 }
391
392 /* call generic CAN sock destruct */
393 can_sock_destruct(sk);
394}
395
396static int j1939_sk_init(struct sock *sk)
397{
398 struct j1939_sock *jsk = j1939_sk(sk);
399
400 /* Ensure that "sk" is first member in "struct j1939_sock", so that we
401 * can skip it during memset().
402 */
403 BUILD_BUG_ON(offsetof(struct j1939_sock, sk) != 0);
404 memset((void *)jsk + sizeof(jsk->sk), 0x0,
405 sizeof(*jsk) - sizeof(jsk->sk));
406
407 INIT_LIST_HEAD(&jsk->list);
408 init_waitqueue_head(&jsk->waitq);
409 jsk->sk.sk_priority = j1939_to_sk_priority(6);
410 jsk->sk.sk_reuse = 1; /* per default */
411 jsk->addr.sa = J1939_NO_ADDR;
412 jsk->addr.da = J1939_NO_ADDR;
413 jsk->addr.pgn = J1939_NO_PGN;
414 jsk->pgn_rx_filter = J1939_NO_PGN;
415 atomic_set(&jsk->skb_pending, 0);
416 spin_lock_init(&jsk->sk_session_queue_lock);
417 INIT_LIST_HEAD(&jsk->sk_session_queue);
418 spin_lock_init(&jsk->filters_lock);
419
420 /* j1939_sk_sock_destruct() depends on SOCK_RCU_FREE flag */
421 sock_set_flag(sk, SOCK_RCU_FREE);
422 sk->sk_destruct = j1939_sk_sock_destruct;
423 sk->sk_protocol = CAN_J1939;
424
425 return 0;
426}
427
428static int j1939_sk_sanity_check(struct sockaddr_can *addr, int len)
429{
430 if (!addr)
431 return -EDESTADDRREQ;
432 if (len < J1939_MIN_NAMELEN)
433 return -EINVAL;
434 if (addr->can_family != AF_CAN)
435 return -EINVAL;
436 if (!addr->can_ifindex)
437 return -ENODEV;
438 if (j1939_pgn_is_valid(addr->can_addr.j1939.pgn) &&
439 !j1939_pgn_is_clean_pdu(addr->can_addr.j1939.pgn))
440 return -EINVAL;
441
442 return 0;
443}
444
445static int j1939_sk_bind(struct socket *sock, struct sockaddr_unsized *uaddr, int len)
446{
447 struct sockaddr_can *addr = (struct sockaddr_can *)uaddr;
448 struct j1939_sock *jsk = j1939_sk(sock->sk);
449 struct j1939_priv *priv;
450 struct sock *sk;
451 struct net *net;
452 int ret = 0;
453
454 ret = j1939_sk_sanity_check(addr, len);
455 if (ret)
456 return ret;
457
458 lock_sock(sock->sk);
459
460 priv = jsk->priv;
461 sk = sock->sk;
462 net = sock_net(sk);
463
464 /* Already bound to an interface? */
465 if (jsk->state & J1939_SOCK_BOUND) {
466 /* A re-bind() to a different interface is not
467 * supported.
468 */
469 if (jsk->ifindex != addr->can_ifindex) {
470 ret = -EINVAL;
471 goto out_release_sock;
472 }
473
474 /* drop old references */
475 j1939_jsk_del(priv, jsk);
476 j1939_local_ecu_put(priv, jsk->addr.src_name, jsk->addr.sa);
477 } else {
478 struct can_ml_priv *can_ml;
479 struct net_device *ndev;
480
481 ndev = dev_get_by_index(net, addr->can_ifindex);
482 if (!ndev) {
483 ret = -ENODEV;
484 goto out_release_sock;
485 }
486
487 if (ndev->reg_state != NETREG_REGISTERED) {
488 dev_put(ndev);
489 ret = -ENODEV;
490 goto out_release_sock;
491 }
492
493 can_ml = can_get_ml_priv(ndev);
494 if (!can_ml) {
495 dev_put(ndev);
496 ret = -ENODEV;
497 goto out_release_sock;
498 }
499
500 if (!(ndev->flags & IFF_UP)) {
501 dev_put(ndev);
502 ret = -ENETDOWN;
503 goto out_release_sock;
504 }
505
506 priv = j1939_netdev_start(ndev);
507 dev_put(ndev);
508 if (IS_ERR(priv)) {
509 ret = PTR_ERR(priv);
510 goto out_release_sock;
511 }
512
513 jsk->ifindex = addr->can_ifindex;
514
515 /* the corresponding j1939_priv_put() is called via
516 * sk->sk_destruct, which points to j1939_sk_sock_destruct()
517 */
518 j1939_priv_get(priv);
519 jsk->priv = priv;
520 }
521
522 /* set default transmit pgn */
523 if (j1939_pgn_is_valid(addr->can_addr.j1939.pgn))
524 jsk->pgn_rx_filter = addr->can_addr.j1939.pgn;
525 jsk->addr.src_name = addr->can_addr.j1939.name;
526 jsk->addr.sa = addr->can_addr.j1939.addr;
527
528 /* get new references */
529 ret = j1939_local_ecu_get(priv, jsk->addr.src_name, jsk->addr.sa);
530 if (ret) {
531 j1939_netdev_stop(priv);
532 jsk->priv = NULL;
533 synchronize_rcu();
534 j1939_priv_put(priv);
535 goto out_release_sock;
536 }
537
538 j1939_jsk_add(priv, jsk);
539
540 out_release_sock: /* fall through */
541 release_sock(sock->sk);
542
543 return ret;
544}
545
546static int j1939_sk_connect(struct socket *sock, struct sockaddr_unsized *uaddr,
547 int len, int flags)
548{
549 struct sockaddr_can *addr = (struct sockaddr_can *)uaddr;
550 struct j1939_sock *jsk = j1939_sk(sock->sk);
551 int ret = 0;
552
553 ret = j1939_sk_sanity_check(addr, len);
554 if (ret)
555 return ret;
556
557 lock_sock(sock->sk);
558
559 /* bind() before connect() is mandatory */
560 if (!(jsk->state & J1939_SOCK_BOUND)) {
561 ret = -EINVAL;
562 goto out_release_sock;
563 }
564
565 /* A connect() to a different interface is not supported. */
566 if (jsk->ifindex != addr->can_ifindex) {
567 ret = -EINVAL;
568 goto out_release_sock;
569 }
570
571 if (!addr->can_addr.j1939.name &&
572 addr->can_addr.j1939.addr == J1939_NO_ADDR &&
573 !sock_flag(&jsk->sk, SOCK_BROADCAST)) {
574 /* broadcast, but SO_BROADCAST not set */
575 ret = -EACCES;
576 goto out_release_sock;
577 }
578
579 jsk->addr.dst_name = addr->can_addr.j1939.name;
580 jsk->addr.da = addr->can_addr.j1939.addr;
581
582 if (j1939_pgn_is_valid(addr->can_addr.j1939.pgn))
583 jsk->addr.pgn = addr->can_addr.j1939.pgn;
584
585 jsk->state |= J1939_SOCK_CONNECTED;
586
587 out_release_sock: /* fall through */
588 release_sock(sock->sk);
589
590 return ret;
591}
592
593static void j1939_sk_sock2sockaddr_can(struct sockaddr_can *addr,
594 const struct j1939_sock *jsk, int peer)
595{
596 /* There are two holes (2 bytes and 3 bytes) to clear to avoid
597 * leaking kernel information to user space.
598 */
599 memset(addr, 0, J1939_MIN_NAMELEN);
600
601 addr->can_family = AF_CAN;
602 addr->can_ifindex = jsk->ifindex;
603 addr->can_addr.j1939.pgn = jsk->addr.pgn;
604 if (peer) {
605 addr->can_addr.j1939.name = jsk->addr.dst_name;
606 addr->can_addr.j1939.addr = jsk->addr.da;
607 } else {
608 addr->can_addr.j1939.name = jsk->addr.src_name;
609 addr->can_addr.j1939.addr = jsk->addr.sa;
610 }
611}
612
613static int j1939_sk_getname(struct socket *sock, struct sockaddr *uaddr,
614 int peer)
615{
616 struct sockaddr_can *addr = (struct sockaddr_can *)uaddr;
617 struct sock *sk = sock->sk;
618 struct j1939_sock *jsk = j1939_sk(sk);
619 int ret = 0;
620
621 lock_sock(sk);
622
623 if (peer && !(jsk->state & J1939_SOCK_CONNECTED)) {
624 ret = -EADDRNOTAVAIL;
625 goto failure;
626 }
627
628 j1939_sk_sock2sockaddr_can(addr, jsk, peer);
629 ret = J1939_MIN_NAMELEN;
630
631 failure:
632 release_sock(sk);
633
634 return ret;
635}
636
637static int j1939_sk_release(struct socket *sock)
638{
639 struct sock *sk = sock->sk;
640 struct j1939_sock *jsk;
641
642 if (!sk)
643 return 0;
644
645 lock_sock(sk);
646 jsk = j1939_sk(sk);
647
648 if (jsk->state & J1939_SOCK_BOUND) {
649 struct j1939_priv *priv = jsk->priv;
650
651 if (wait_event_interruptible(jsk->waitq,
652 !j1939_sock_pending_get(&jsk->sk))) {
653 j1939_cancel_active_session(priv, sk);
654 j1939_sk_queue_drop_all(priv, jsk, ESHUTDOWN);
655 }
656
657 j1939_jsk_del(priv, jsk);
658
659 j1939_local_ecu_put(priv, jsk->addr.src_name,
660 jsk->addr.sa);
661
662 j1939_netdev_stop(priv);
663 }
664
665 kfree(jsk->filters);
666 sock_orphan(sk);
667 sock->sk = NULL;
668
669 release_sock(sk);
670 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
671 sock_put(sk);
672
673 return 0;
674}
675
676static int j1939_sk_setsockopt_flag(struct j1939_sock *jsk, sockptr_t optval,
677 unsigned int optlen, int flag)
678{
679 int tmp;
680
681 if (optlen != sizeof(tmp))
682 return -EINVAL;
683 if (copy_from_sockptr(&tmp, optval, optlen))
684 return -EFAULT;
685 lock_sock(&jsk->sk);
686 if (tmp)
687 jsk->state |= flag;
688 else
689 jsk->state &= ~flag;
690 release_sock(&jsk->sk);
691 return tmp;
692}
693
694static int j1939_sk_setsockopt(struct socket *sock, int level, int optname,
695 sockptr_t optval, unsigned int optlen)
696{
697 struct sock *sk = sock->sk;
698 struct j1939_sock *jsk = j1939_sk(sk);
699 int tmp, count = 0, ret = 0;
700 struct j1939_filter *filters = NULL, *ofilters;
701
702 if (level != SOL_CAN_J1939)
703 return -EINVAL;
704
705 switch (optname) {
706 case SO_J1939_FILTER:
707 if (!sockptr_is_null(optval) && optlen != 0) {
708 struct j1939_filter *f;
709 int c;
710
711 if (optlen % sizeof(*filters) != 0)
712 return -EINVAL;
713
714 if (optlen > J1939_FILTER_MAX *
715 sizeof(struct j1939_filter))
716 return -EINVAL;
717
718 count = optlen / sizeof(*filters);
719 filters = memdup_sockptr(optval, optlen);
720 if (IS_ERR(filters))
721 return PTR_ERR(filters);
722
723 for (f = filters, c = count; c; f++, c--) {
724 f->name &= f->name_mask;
725 f->pgn &= f->pgn_mask;
726 f->addr &= f->addr_mask;
727 }
728 }
729
730 lock_sock(&jsk->sk);
731 spin_lock_bh(&jsk->filters_lock);
732 ofilters = jsk->filters;
733 jsk->filters = filters;
734 jsk->nfilters = count;
735 spin_unlock_bh(&jsk->filters_lock);
736 release_sock(&jsk->sk);
737 kfree(ofilters);
738 return 0;
739 case SO_J1939_PROMISC:
740 return j1939_sk_setsockopt_flag(jsk, optval, optlen,
741 J1939_SOCK_PROMISC);
742 case SO_J1939_ERRQUEUE:
743 ret = j1939_sk_setsockopt_flag(jsk, optval, optlen,
744 J1939_SOCK_ERRQUEUE);
745 if (ret < 0)
746 return ret;
747
748 if (!(jsk->state & J1939_SOCK_ERRQUEUE))
749 skb_queue_purge(&sk->sk_error_queue);
750 return ret;
751 case SO_J1939_SEND_PRIO:
752 if (optlen != sizeof(tmp))
753 return -EINVAL;
754 if (copy_from_sockptr(&tmp, optval, optlen))
755 return -EFAULT;
756 if (tmp < 0 || tmp > 7)
757 return -EDOM;
758 if (tmp < 2 && !capable(CAP_NET_ADMIN))
759 return -EPERM;
760 lock_sock(&jsk->sk);
761 jsk->sk.sk_priority = j1939_to_sk_priority(tmp);
762 release_sock(&jsk->sk);
763 return 0;
764 default:
765 return -ENOPROTOOPT;
766 }
767}
768
769static int j1939_sk_getsockopt(struct socket *sock, int level, int optname,
770 char __user *optval, int __user *optlen)
771{
772 struct sock *sk = sock->sk;
773 struct j1939_sock *jsk = j1939_sk(sk);
774 int ret, ulen;
775 /* set defaults for using 'int' properties */
776 int tmp = 0;
777 int len = sizeof(tmp);
778 void *val = &tmp;
779
780 if (level != SOL_CAN_J1939)
781 return -EINVAL;
782 if (get_user(ulen, optlen))
783 return -EFAULT;
784 if (ulen < 0)
785 return -EINVAL;
786
787 lock_sock(&jsk->sk);
788 switch (optname) {
789 case SO_J1939_PROMISC:
790 tmp = (jsk->state & J1939_SOCK_PROMISC) ? 1 : 0;
791 break;
792 case SO_J1939_ERRQUEUE:
793 tmp = (jsk->state & J1939_SOCK_ERRQUEUE) ? 1 : 0;
794 break;
795 case SO_J1939_SEND_PRIO:
796 tmp = j1939_prio(jsk->sk.sk_priority);
797 break;
798 default:
799 ret = -ENOPROTOOPT;
800 goto no_copy;
801 }
802
803 /* copy to user, based on 'len' & 'val'
804 * but most sockopt's are 'int' properties, and have 'len' & 'val'
805 * left unchanged, but instead modified 'tmp'
806 */
807 if (len > ulen)
808 ret = -EFAULT;
809 else if (put_user(len, optlen))
810 ret = -EFAULT;
811 else if (copy_to_user(optval, val, len))
812 ret = -EFAULT;
813 else
814 ret = 0;
815 no_copy:
816 release_sock(&jsk->sk);
817 return ret;
818}
819
820static int j1939_sk_recvmsg(struct socket *sock, struct msghdr *msg,
821 size_t size, int flags)
822{
823 struct sock *sk = sock->sk;
824 struct sk_buff *skb;
825 struct j1939_sk_buff_cb *skcb;
826 int ret = 0;
827
828 if (flags & ~(MSG_DONTWAIT | MSG_ERRQUEUE | MSG_CMSG_COMPAT))
829 return -EINVAL;
830
831 if (flags & MSG_ERRQUEUE)
832 return sock_recv_errqueue(sock->sk, msg, size, SOL_CAN_J1939,
833 SCM_J1939_ERRQUEUE);
834
835 skb = skb_recv_datagram(sk, flags, &ret);
836 if (!skb)
837 return ret;
838
839 if (size < skb->len)
840 msg->msg_flags |= MSG_TRUNC;
841 else
842 size = skb->len;
843
844 ret = memcpy_to_msg(msg, skb->data, size);
845 if (ret < 0) {
846 skb_free_datagram(sk, skb);
847 return ret;
848 }
849
850 skcb = j1939_skb_to_cb(skb);
851 if (j1939_address_is_valid(skcb->addr.da))
852 put_cmsg(msg, SOL_CAN_J1939, SCM_J1939_DEST_ADDR,
853 sizeof(skcb->addr.da), &skcb->addr.da);
854
855 if (skcb->addr.dst_name)
856 put_cmsg(msg, SOL_CAN_J1939, SCM_J1939_DEST_NAME,
857 sizeof(skcb->addr.dst_name), &skcb->addr.dst_name);
858
859 put_cmsg(msg, SOL_CAN_J1939, SCM_J1939_PRIO,
860 sizeof(skcb->priority), &skcb->priority);
861
862 if (msg->msg_name) {
863 struct sockaddr_can *paddr = msg->msg_name;
864
865 msg->msg_namelen = J1939_MIN_NAMELEN;
866 memset(msg->msg_name, 0, msg->msg_namelen);
867 paddr->can_family = AF_CAN;
868 paddr->can_ifindex = skb->skb_iif;
869 paddr->can_addr.j1939.name = skcb->addr.src_name;
870 paddr->can_addr.j1939.addr = skcb->addr.sa;
871 paddr->can_addr.j1939.pgn = skcb->addr.pgn;
872 }
873
874 sock_recv_cmsgs(msg, sk, skb);
875 msg->msg_flags |= skcb->msg_flags;
876 skb_free_datagram(sk, skb);
877
878 return size;
879}
880
881static struct sk_buff *j1939_sk_alloc_skb(struct net_device *ndev,
882 struct sock *sk,
883 struct msghdr *msg, size_t size,
884 int *errcode)
885{
886 struct j1939_sock *jsk = j1939_sk(sk);
887 struct j1939_sk_buff_cb *skcb;
888 struct sk_buff *skb;
889 struct can_skb_ext *csx;
890 int ret;
891
892 skb = sock_alloc_send_skb(sk,
893 size +
894 sizeof(struct can_frame) -
895 sizeof(((struct can_frame *)NULL)->data),
896 msg->msg_flags & MSG_DONTWAIT, &ret);
897 if (!skb)
898 goto failure;
899
900 csx = can_skb_ext_add(skb);
901 if (!csx) {
902 kfree_skb(skb);
903 ret = -ENOMEM;
904 goto failure;
905 }
906
907 csx->can_iif = ndev->ifindex;
908 skb_reserve(skb, offsetof(struct can_frame, data));
909
910 ret = memcpy_from_msg(skb_put(skb, size), msg, size);
911 if (ret < 0)
912 goto free_skb;
913
914 skb->dev = ndev;
915
916 skcb = j1939_skb_to_cb(skb);
917 memset(skcb, 0, sizeof(*skcb));
918 skcb->addr = jsk->addr;
919 skcb->priority = j1939_prio(READ_ONCE(sk->sk_priority));
920
921 if (msg->msg_name) {
922 struct sockaddr_can *addr = msg->msg_name;
923
924 if (addr->can_addr.j1939.name ||
925 addr->can_addr.j1939.addr != J1939_NO_ADDR) {
926 skcb->addr.dst_name = addr->can_addr.j1939.name;
927 skcb->addr.da = addr->can_addr.j1939.addr;
928 }
929 if (j1939_pgn_is_valid(addr->can_addr.j1939.pgn))
930 skcb->addr.pgn = addr->can_addr.j1939.pgn;
931 }
932
933 *errcode = ret;
934 return skb;
935
936free_skb:
937 kfree_skb(skb);
938failure:
939 *errcode = ret;
940 return NULL;
941}
942
943static size_t j1939_sk_opt_stats_get_size(enum j1939_sk_errqueue_type type)
944{
945 switch (type) {
946 case J1939_ERRQUEUE_RX_RTS:
947 return
948 nla_total_size(sizeof(u32)) + /* J1939_NLA_TOTAL_SIZE */
949 nla_total_size(sizeof(u32)) + /* J1939_NLA_PGN */
950 nla_total_size(sizeof(u64)) + /* J1939_NLA_SRC_NAME */
951 nla_total_size(sizeof(u64)) + /* J1939_NLA_DEST_NAME */
952 nla_total_size(sizeof(u8)) + /* J1939_NLA_SRC_ADDR */
953 nla_total_size(sizeof(u8)) + /* J1939_NLA_DEST_ADDR */
954 0;
955 default:
956 return
957 nla_total_size(sizeof(u32)) + /* J1939_NLA_BYTES_ACKED */
958 0;
959 }
960}
961
962static struct sk_buff *
963j1939_sk_get_timestamping_opt_stats(struct j1939_session *session,
964 enum j1939_sk_errqueue_type type)
965{
966 struct sk_buff *stats;
967 u32 size;
968
969 stats = alloc_skb(j1939_sk_opt_stats_get_size(type), GFP_ATOMIC);
970 if (!stats)
971 return NULL;
972
973 if (session->skcb.addr.type == J1939_SIMPLE)
974 size = session->total_message_size;
975 else
976 size = min(session->pkt.tx_acked * 7,
977 session->total_message_size);
978
979 switch (type) {
980 case J1939_ERRQUEUE_RX_RTS:
981 nla_put_u32(stats, J1939_NLA_TOTAL_SIZE,
982 session->total_message_size);
983 nla_put_u32(stats, J1939_NLA_PGN,
984 session->skcb.addr.pgn);
985 nla_put_u64_64bit(stats, J1939_NLA_SRC_NAME,
986 session->skcb.addr.src_name, J1939_NLA_PAD);
987 nla_put_u64_64bit(stats, J1939_NLA_DEST_NAME,
988 session->skcb.addr.dst_name, J1939_NLA_PAD);
989 nla_put_u8(stats, J1939_NLA_SRC_ADDR,
990 session->skcb.addr.sa);
991 nla_put_u8(stats, J1939_NLA_DEST_ADDR,
992 session->skcb.addr.da);
993 break;
994 default:
995 nla_put_u32(stats, J1939_NLA_BYTES_ACKED, size);
996 }
997
998 return stats;
999}
1000
1001static void __j1939_sk_errqueue(struct j1939_session *session, struct sock *sk,
1002 enum j1939_sk_errqueue_type type)
1003{
1004 struct j1939_priv *priv = session->priv;
1005 struct j1939_sock *jsk;
1006 struct sock_exterr_skb *serr;
1007 struct sk_buff *skb;
1008 char *state = "UNK";
1009 u32 tsflags;
1010 int err;
1011
1012 jsk = j1939_sk(sk);
1013
1014 if (!(jsk->state & J1939_SOCK_ERRQUEUE))
1015 return;
1016
1017 tsflags = READ_ONCE(sk->sk_tsflags);
1018 switch (type) {
1019 case J1939_ERRQUEUE_TX_ACK:
1020 if (!(tsflags & SOF_TIMESTAMPING_TX_ACK))
1021 return;
1022 break;
1023 case J1939_ERRQUEUE_TX_SCHED:
1024 if (!(tsflags & SOF_TIMESTAMPING_TX_SCHED))
1025 return;
1026 break;
1027 case J1939_ERRQUEUE_TX_ABORT:
1028 break;
1029 case J1939_ERRQUEUE_RX_RTS:
1030 fallthrough;
1031 case J1939_ERRQUEUE_RX_DPO:
1032 fallthrough;
1033 case J1939_ERRQUEUE_RX_ABORT:
1034 if (!(tsflags & SOF_TIMESTAMPING_RX_SOFTWARE))
1035 return;
1036 break;
1037 default:
1038 netdev_err(priv->ndev, "Unknown errqueue type %i\n", type);
1039 }
1040
1041 skb = j1939_sk_get_timestamping_opt_stats(session, type);
1042 if (!skb)
1043 return;
1044
1045 skb->tstamp = ktime_get_real();
1046
1047 BUILD_BUG_ON(sizeof(struct sock_exterr_skb) > sizeof(skb->cb));
1048
1049 serr = SKB_EXT_ERR(skb);
1050 memset(serr, 0, sizeof(*serr));
1051 switch (type) {
1052 case J1939_ERRQUEUE_TX_ACK:
1053 serr->ee.ee_errno = ENOMSG;
1054 serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING;
1055 serr->ee.ee_info = SCM_TSTAMP_ACK;
1056 state = "TX ACK";
1057 break;
1058 case J1939_ERRQUEUE_TX_SCHED:
1059 serr->ee.ee_errno = ENOMSG;
1060 serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING;
1061 serr->ee.ee_info = SCM_TSTAMP_SCHED;
1062 state = "TX SCH";
1063 break;
1064 case J1939_ERRQUEUE_TX_ABORT:
1065 serr->ee.ee_errno = session->err;
1066 serr->ee.ee_origin = SO_EE_ORIGIN_LOCAL;
1067 serr->ee.ee_info = J1939_EE_INFO_TX_ABORT;
1068 state = "TX ABT";
1069 break;
1070 case J1939_ERRQUEUE_RX_RTS:
1071 serr->ee.ee_errno = ENOMSG;
1072 serr->ee.ee_origin = SO_EE_ORIGIN_LOCAL;
1073 serr->ee.ee_info = J1939_EE_INFO_RX_RTS;
1074 state = "RX RTS";
1075 break;
1076 case J1939_ERRQUEUE_RX_DPO:
1077 serr->ee.ee_errno = ENOMSG;
1078 serr->ee.ee_origin = SO_EE_ORIGIN_LOCAL;
1079 serr->ee.ee_info = J1939_EE_INFO_RX_DPO;
1080 state = "RX DPO";
1081 break;
1082 case J1939_ERRQUEUE_RX_ABORT:
1083 serr->ee.ee_errno = session->err;
1084 serr->ee.ee_origin = SO_EE_ORIGIN_LOCAL;
1085 serr->ee.ee_info = J1939_EE_INFO_RX_ABORT;
1086 state = "RX ABT";
1087 break;
1088 }
1089
1090 serr->opt_stats = true;
1091 if (tsflags & SOF_TIMESTAMPING_OPT_ID)
1092 serr->ee.ee_data = session->tskey;
1093
1094 netdev_dbg(session->priv->ndev, "%s: 0x%p tskey: %i, state: %s\n",
1095 __func__, session, session->tskey, state);
1096 err = sock_queue_err_skb(sk, skb);
1097
1098 if (err)
1099 kfree_skb(skb);
1100};
1101
1102void j1939_sk_errqueue(struct j1939_session *session,
1103 enum j1939_sk_errqueue_type type)
1104{
1105 struct j1939_priv *priv = session->priv;
1106 struct j1939_sock *jsk;
1107
1108 if (session->sk) {
1109 /* send TX notifications to the socket of origin */
1110 __j1939_sk_errqueue(session, session->sk, type);
1111 return;
1112 }
1113
1114 /* spread RX notifications to all sockets subscribed to this session */
1115 read_lock_bh(&priv->j1939_socks_lock);
1116 list_for_each_entry(jsk, &priv->j1939_socks, list) {
1117 if (j1939_sk_recv_match_one(jsk, &session->skcb))
1118 __j1939_sk_errqueue(session, &jsk->sk, type);
1119 }
1120 read_unlock_bh(&priv->j1939_socks_lock);
1121};
1122
1123void j1939_sk_send_loop_abort(struct sock *sk, int err)
1124{
1125 struct j1939_sock *jsk = j1939_sk(sk);
1126
1127 if (jsk->state & J1939_SOCK_ERRQUEUE)
1128 return;
1129
1130 sk->sk_err = err;
1131
1132 sk_error_report(sk);
1133}
1134
1135static int j1939_sk_send_loop(struct j1939_priv *priv, struct sock *sk,
1136 struct msghdr *msg, size_t size)
1137
1138{
1139 struct j1939_sock *jsk = j1939_sk(sk);
1140 struct j1939_session *session = j1939_sk_get_incomplete_session(jsk);
1141 struct sk_buff *skb;
1142 size_t segment_size, todo_size;
1143 int ret = 0;
1144
1145 if (session &&
1146 session->total_message_size != session->total_queued_size + size) {
1147 j1939_session_put(session);
1148 return -EIO;
1149 }
1150
1151 todo_size = size;
1152
1153 do {
1154 struct j1939_sk_buff_cb *skcb;
1155
1156 segment_size = min_t(size_t, J1939_MAX_TP_PACKET_SIZE,
1157 todo_size);
1158
1159 /* Allocate skb for one segment */
1160 skb = j1939_sk_alloc_skb(priv->ndev, sk, msg, segment_size,
1161 &ret);
1162 if (ret)
1163 break;
1164
1165 skcb = j1939_skb_to_cb(skb);
1166
1167 if (!session) {
1168 /* at this point the size should be full size
1169 * of the session
1170 */
1171 skcb->offset = 0;
1172 session = j1939_tp_send(priv, skb, size);
1173 if (IS_ERR(session)) {
1174 ret = PTR_ERR(session);
1175 goto kfree_skb;
1176 }
1177 if (j1939_sk_queue_session(session)) {
1178 /* try to activate session if we a
1179 * fist in the queue
1180 */
1181 if (!j1939_session_activate(session)) {
1182 j1939_tp_schedule_txtimer(session, 0);
1183 } else {
1184 ret = -EBUSY;
1185 session->err = ret;
1186 j1939_sk_queue_drop_all(priv, jsk,
1187 EBUSY);
1188 break;
1189 }
1190 }
1191 } else {
1192 skcb->offset = session->total_queued_size;
1193 j1939_session_skb_queue(session, skb);
1194 }
1195
1196 todo_size -= segment_size;
1197 session->total_queued_size += segment_size;
1198 } while (todo_size);
1199
1200 switch (ret) {
1201 case 0: /* OK */
1202 if (todo_size)
1203 netdev_warn(priv->ndev,
1204 "no error found and not completely queued?! %zu\n",
1205 todo_size);
1206 ret = size;
1207 break;
1208 case -ERESTARTSYS:
1209 ret = -EINTR;
1210 fallthrough;
1211 case -EAGAIN: /* OK */
1212 if (todo_size != size)
1213 ret = size - todo_size;
1214 break;
1215 default: /* ERROR */
1216 break;
1217 }
1218
1219 if (session)
1220 j1939_session_put(session);
1221
1222 return ret;
1223
1224 kfree_skb:
1225 kfree_skb(skb);
1226 return ret;
1227}
1228
1229static int j1939_sk_sendmsg(struct socket *sock, struct msghdr *msg,
1230 size_t size)
1231{
1232 struct sock *sk = sock->sk;
1233 struct j1939_sock *jsk = j1939_sk(sk);
1234 struct j1939_priv *priv;
1235 int ifindex;
1236 int ret;
1237
1238 lock_sock(sock->sk);
1239 /* various socket state tests */
1240 if (!(jsk->state & J1939_SOCK_BOUND)) {
1241 ret = -EBADFD;
1242 goto sendmsg_done;
1243 }
1244
1245 priv = jsk->priv;
1246 ifindex = jsk->ifindex;
1247
1248 if (!jsk->addr.src_name && jsk->addr.sa == J1939_NO_ADDR) {
1249 /* no source address assigned yet */
1250 ret = -EBADFD;
1251 goto sendmsg_done;
1252 }
1253
1254 /* deal with provided destination address info */
1255 if (msg->msg_name) {
1256 struct sockaddr_can *addr = msg->msg_name;
1257
1258 if (msg->msg_namelen < J1939_MIN_NAMELEN) {
1259 ret = -EINVAL;
1260 goto sendmsg_done;
1261 }
1262
1263 if (addr->can_family != AF_CAN) {
1264 ret = -EINVAL;
1265 goto sendmsg_done;
1266 }
1267
1268 if (addr->can_ifindex && addr->can_ifindex != ifindex) {
1269 ret = -EBADFD;
1270 goto sendmsg_done;
1271 }
1272
1273 if (j1939_pgn_is_valid(addr->can_addr.j1939.pgn) &&
1274 !j1939_pgn_is_clean_pdu(addr->can_addr.j1939.pgn)) {
1275 ret = -EINVAL;
1276 goto sendmsg_done;
1277 }
1278
1279 if (!addr->can_addr.j1939.name &&
1280 addr->can_addr.j1939.addr == J1939_NO_ADDR &&
1281 !sock_flag(sk, SOCK_BROADCAST)) {
1282 /* broadcast, but SO_BROADCAST not set */
1283 ret = -EACCES;
1284 goto sendmsg_done;
1285 }
1286 } else {
1287 if (!jsk->addr.dst_name && jsk->addr.da == J1939_NO_ADDR &&
1288 !sock_flag(sk, SOCK_BROADCAST)) {
1289 /* broadcast, but SO_BROADCAST not set */
1290 ret = -EACCES;
1291 goto sendmsg_done;
1292 }
1293 }
1294
1295 ret = j1939_sk_send_loop(priv, sk, msg, size);
1296
1297sendmsg_done:
1298 release_sock(sock->sk);
1299
1300 return ret;
1301}
1302
1303void j1939_sk_netdev_event_netdown(struct j1939_priv *priv)
1304{
1305 struct j1939_sock *jsk;
1306 int error_code = ENETDOWN;
1307
1308 read_lock_bh(&priv->j1939_socks_lock);
1309 list_for_each_entry(jsk, &priv->j1939_socks, list) {
1310 jsk->sk.sk_err = error_code;
1311 if (!sock_flag(&jsk->sk, SOCK_DEAD))
1312 sk_error_report(&jsk->sk);
1313
1314 j1939_sk_queue_drop_all(priv, jsk, error_code);
1315 }
1316 read_unlock_bh(&priv->j1939_socks_lock);
1317}
1318
1319void j1939_sk_netdev_event_unregister(struct j1939_priv *priv)
1320{
1321 struct sock *sk;
1322 struct j1939_sock *jsk;
1323 bool wait_rcu = false;
1324
1325rescan: /* The caller is holding a ref on this "priv" via j1939_priv_get_by_ndev(). */
1326 read_lock_bh(&priv->j1939_socks_lock);
1327 list_for_each_entry(jsk, &priv->j1939_socks, list) {
1328 /* Skip if j1939_jsk_add() is not called on this socket. */
1329 if (!(jsk->state & J1939_SOCK_BOUND))
1330 continue;
1331 sk = &jsk->sk;
1332 sock_hold(sk);
1333 read_unlock_bh(&priv->j1939_socks_lock);
1334 /* Check if j1939_jsk_del() is not yet called on this socket after holding
1335 * socket's lock, for both j1939_sk_bind() and j1939_sk_release() call
1336 * j1939_jsk_del() with socket's lock held.
1337 */
1338 lock_sock(sk);
1339 if (jsk->state & J1939_SOCK_BOUND) {
1340 /* Neither j1939_sk_bind() nor j1939_sk_release() called j1939_jsk_del().
1341 * Make this socket no longer bound, by pretending as if j1939_sk_bind()
1342 * dropped old references but did not get new references.
1343 */
1344 j1939_jsk_del(priv, jsk);
1345 j1939_local_ecu_put(priv, jsk->addr.src_name, jsk->addr.sa);
1346 j1939_netdev_stop(priv);
1347 /* Call j1939_priv_put() now and prevent j1939_sk_sock_destruct() from
1348 * calling the corresponding j1939_priv_put().
1349 *
1350 * j1939_sk_sock_destruct() is supposed to call j1939_priv_put() after
1351 * an RCU grace period. But since the caller is holding a ref on this
1352 * "priv", we can defer synchronize_rcu() until immediately before
1353 * the caller calls j1939_priv_put().
1354 */
1355 j1939_priv_put(priv);
1356 jsk->priv = NULL;
1357 wait_rcu = true;
1358 }
1359 release_sock(sk);
1360 sock_put(sk);
1361 goto rescan;
1362 }
1363 read_unlock_bh(&priv->j1939_socks_lock);
1364 if (wait_rcu)
1365 synchronize_rcu();
1366}
1367
1368static int j1939_sk_no_ioctlcmd(struct socket *sock, unsigned int cmd,
1369 unsigned long arg)
1370{
1371 /* no ioctls for socket layer -> hand it down to NIC layer */
1372 return -ENOIOCTLCMD;
1373}
1374
1375static const struct proto_ops j1939_ops = {
1376 .family = PF_CAN,
1377 .release = j1939_sk_release,
1378 .bind = j1939_sk_bind,
1379 .connect = j1939_sk_connect,
1380 .socketpair = sock_no_socketpair,
1381 .accept = sock_no_accept,
1382 .getname = j1939_sk_getname,
1383 .poll = datagram_poll,
1384 .ioctl = j1939_sk_no_ioctlcmd,
1385 .listen = sock_no_listen,
1386 .shutdown = sock_no_shutdown,
1387 .setsockopt = j1939_sk_setsockopt,
1388 .getsockopt = j1939_sk_getsockopt,
1389 .sendmsg = j1939_sk_sendmsg,
1390 .recvmsg = j1939_sk_recvmsg,
1391 .mmap = sock_no_mmap,
1392};
1393
1394static struct proto j1939_proto __read_mostly = {
1395 .name = "CAN_J1939",
1396 .owner = THIS_MODULE,
1397 .obj_size = sizeof(struct j1939_sock),
1398 .init = j1939_sk_init,
1399};
1400
1401const struct can_proto j1939_can_proto = {
1402 .type = SOCK_DGRAM,
1403 .protocol = CAN_J1939,
1404 .ops = &j1939_ops,
1405 .prot = &j1939_proto,
1406};