Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/*
2 * net/tipc/msg.c: TIPC message header routines
3 *
4 * Copyright (c) 2000-2006, 2014-2015, Ericsson AB
5 * Copyright (c) 2005, 2010-2011, Wind River Systems
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37#include <net/sock.h>
38#include "core.h"
39#include "msg.h"
40#include "addr.h"
41#include "name_table.h"
42#include "crypto.h"
43
44#define BUF_ALIGN(x) ALIGN(x, 4)
45#define MAX_FORWARD_SIZE 1024
46#ifdef CONFIG_TIPC_CRYPTO
47#define BUF_HEADROOM ALIGN(((LL_MAX_HEADER + 48) + EHDR_MAX_SIZE), 16)
48#define BUF_OVERHEAD (BUF_HEADROOM + TIPC_AES_GCM_TAG_SIZE)
49#else
50#define BUF_HEADROOM (LL_MAX_HEADER + 48)
51#define BUF_OVERHEAD BUF_HEADROOM
52#endif
53
54const int one_page_mtu = PAGE_SIZE - SKB_DATA_ALIGN(BUF_OVERHEAD) -
55 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
56
57/**
58 * tipc_buf_acquire - creates a TIPC message buffer
59 * @size: message size (including TIPC header)
60 * @gfp: memory allocation flags
61 *
62 * Return: a new buffer with data pointers set to the specified size.
63 *
64 * NOTE:
65 * Headroom is reserved to allow prepending of a data link header.
66 * There may also be unrequested tailroom present at the buffer's end.
67 */
68struct sk_buff *tipc_buf_acquire(u32 size, gfp_t gfp)
69{
70 struct sk_buff *skb;
71
72 skb = alloc_skb_fclone(BUF_OVERHEAD + size, gfp);
73 if (skb) {
74 skb_reserve(skb, BUF_HEADROOM);
75 skb_put(skb, size);
76 skb->next = NULL;
77 }
78 return skb;
79}
80
81void tipc_msg_init(u32 own_node, struct tipc_msg *m, u32 user, u32 type,
82 u32 hsize, u32 dnode)
83{
84 memset(m, 0, hsize);
85 msg_set_version(m);
86 msg_set_user(m, user);
87 msg_set_hdr_sz(m, hsize);
88 msg_set_size(m, hsize);
89 msg_set_prevnode(m, own_node);
90 msg_set_type(m, type);
91 if (hsize > SHORT_H_SIZE) {
92 msg_set_orignode(m, own_node);
93 msg_set_destnode(m, dnode);
94 }
95}
96
97struct sk_buff *tipc_msg_create(uint user, uint type,
98 uint hdr_sz, uint data_sz, u32 dnode,
99 u32 onode, u32 dport, u32 oport, int errcode)
100{
101 struct tipc_msg *msg;
102 struct sk_buff *buf;
103
104 buf = tipc_buf_acquire(hdr_sz + data_sz, GFP_ATOMIC);
105 if (unlikely(!buf))
106 return NULL;
107
108 msg = buf_msg(buf);
109 tipc_msg_init(onode, msg, user, type, hdr_sz, dnode);
110 msg_set_size(msg, hdr_sz + data_sz);
111 msg_set_origport(msg, oport);
112 msg_set_destport(msg, dport);
113 msg_set_errcode(msg, errcode);
114 return buf;
115}
116
117/* tipc_buf_append(): Append a buffer to the fragment list of another buffer
118 * @*headbuf: in: NULL for first frag, otherwise value returned from prev call
119 * out: set when successful non-complete reassembly, otherwise NULL
120 * @*buf: in: the buffer to append. Always defined
121 * out: head buf after successful complete reassembly, otherwise NULL
122 * Returns 1 when reassembly complete, otherwise 0
123 */
124int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf)
125{
126 struct sk_buff *head = *headbuf;
127 struct sk_buff *frag = *buf;
128 struct sk_buff *tail = NULL;
129 struct tipc_msg *msg;
130 u32 fragid;
131 int delta;
132 bool headstolen;
133
134 if (!frag)
135 goto err;
136
137 msg = buf_msg(frag);
138 fragid = msg_type(msg);
139 frag->next = NULL;
140 skb_pull(frag, msg_hdr_sz(msg));
141
142 if (fragid == FIRST_FRAGMENT) {
143 if (unlikely(head))
144 goto err;
145 if (skb_has_frag_list(frag) && __skb_linearize(frag))
146 goto err;
147 *buf = NULL;
148 frag = skb_unshare(frag, GFP_ATOMIC);
149 if (unlikely(!frag))
150 goto err;
151 head = *headbuf = frag;
152 TIPC_SKB_CB(head)->tail = NULL;
153 return 0;
154 }
155
156 if (!head)
157 goto err;
158
159 /* Either the input skb ownership is transferred to headskb
160 * or the input skb is freed, clear the reference to avoid
161 * bad access on error path.
162 */
163 *buf = NULL;
164 if (skb_try_coalesce(head, frag, &headstolen, &delta)) {
165 kfree_skb_partial(frag, headstolen);
166 } else {
167 tail = TIPC_SKB_CB(head)->tail;
168 if (!skb_has_frag_list(head))
169 skb_shinfo(head)->frag_list = frag;
170 else
171 tail->next = frag;
172 head->truesize += frag->truesize;
173 head->data_len += frag->len;
174 head->len += frag->len;
175 TIPC_SKB_CB(head)->tail = frag;
176 }
177
178 if (fragid == LAST_FRAGMENT) {
179 TIPC_SKB_CB(head)->validated = 0;
180
181 /* If the reassembled skb has been freed in
182 * tipc_msg_validate() because of an invalid truesize,
183 * then head will point to a newly allocated reassembled
184 * skb, while *headbuf points to freed reassembled skb.
185 * In such cases, correct *headbuf for freeing the newly
186 * allocated reassembled skb later.
187 */
188 if (unlikely(!tipc_msg_validate(&head))) {
189 if (head != *headbuf)
190 *headbuf = head;
191 goto err;
192 }
193
194 *buf = head;
195 TIPC_SKB_CB(head)->tail = NULL;
196 *headbuf = NULL;
197 return 1;
198 }
199 return 0;
200err:
201 kfree_skb(*buf);
202 kfree_skb(*headbuf);
203 *buf = *headbuf = NULL;
204 return 0;
205}
206
207/**
208 * tipc_msg_append(): Append data to tail of an existing buffer queue
209 * @_hdr: header to be used
210 * @m: the data to be appended
211 * @mss: max allowable size of buffer
212 * @dlen: size of data to be appended
213 * @txq: queue to append to
214 *
215 * Return: the number of 1k blocks appended or errno value
216 */
217int tipc_msg_append(struct tipc_msg *_hdr, struct msghdr *m, int dlen,
218 int mss, struct sk_buff_head *txq)
219{
220 struct sk_buff *skb;
221 int accounted, total, curr;
222 int mlen, cpy, rem = dlen;
223 struct tipc_msg *hdr;
224
225 skb = skb_peek_tail(txq);
226 accounted = skb ? msg_blocks(buf_msg(skb)) : 0;
227 total = accounted;
228
229 do {
230 if (!skb || skb->len >= mss) {
231 skb = tipc_buf_acquire(mss, GFP_KERNEL);
232 if (unlikely(!skb))
233 return -ENOMEM;
234 skb_orphan(skb);
235 skb_trim(skb, MIN_H_SIZE);
236 hdr = buf_msg(skb);
237 skb_copy_to_linear_data(skb, _hdr, MIN_H_SIZE);
238 msg_set_hdr_sz(hdr, MIN_H_SIZE);
239 msg_set_size(hdr, MIN_H_SIZE);
240 __skb_queue_tail(txq, skb);
241 total += 1;
242 }
243 hdr = buf_msg(skb);
244 curr = msg_blocks(hdr);
245 mlen = msg_size(hdr);
246 cpy = min_t(size_t, rem, mss - mlen);
247 if (cpy != copy_from_iter(skb->data + mlen, cpy, &m->msg_iter))
248 return -EFAULT;
249 msg_set_size(hdr, mlen + cpy);
250 skb_put(skb, cpy);
251 rem -= cpy;
252 total += msg_blocks(hdr) - curr;
253 } while (rem > 0);
254 return total - accounted;
255}
256
257/* tipc_msg_validate - validate basic format of received message
258 *
259 * This routine ensures a TIPC message has an acceptable header, and at least
260 * as much data as the header indicates it should. The routine also ensures
261 * that the entire message header is stored in the main fragment of the message
262 * buffer, to simplify future access to message header fields.
263 *
264 * Note: Having extra info present in the message header or data areas is OK.
265 * TIPC will ignore the excess, under the assumption that it is optional info
266 * introduced by a later release of the protocol.
267 */
268bool tipc_msg_validate(struct sk_buff **_skb)
269{
270 struct sk_buff *skb = *_skb;
271 struct tipc_msg *hdr;
272 int msz, hsz;
273
274 /* Ensure that flow control ratio condition is satisfied */
275 if (unlikely(skb->truesize / buf_roundup_len(skb) >= 4)) {
276 skb = skb_copy_expand(skb, BUF_HEADROOM, 0, GFP_ATOMIC);
277 if (!skb)
278 return false;
279 kfree_skb(*_skb);
280 *_skb = skb;
281 }
282
283 if (unlikely(TIPC_SKB_CB(skb)->validated))
284 return true;
285
286 if (unlikely(!pskb_may_pull(skb, MIN_H_SIZE)))
287 return false;
288
289 hsz = msg_hdr_sz(buf_msg(skb));
290 if (unlikely(hsz < MIN_H_SIZE) || (hsz > MAX_H_SIZE))
291 return false;
292 if (unlikely(!pskb_may_pull(skb, hsz)))
293 return false;
294
295 hdr = buf_msg(skb);
296 if (unlikely(msg_version(hdr) != TIPC_VERSION))
297 return false;
298
299 msz = msg_size(hdr);
300 if (unlikely(msz < hsz))
301 return false;
302 if (unlikely((msz - hsz) > TIPC_MAX_USER_MSG_SIZE))
303 return false;
304 if (unlikely(skb->len < msz))
305 return false;
306
307 TIPC_SKB_CB(skb)->validated = 1;
308 return true;
309}
310
311/**
312 * tipc_msg_fragment - build a fragment skb list for TIPC message
313 *
314 * @skb: TIPC message skb
315 * @hdr: internal msg header to be put on the top of the fragments
316 * @pktmax: max size of a fragment incl. the header
317 * @frags: returned fragment skb list
318 *
319 * Return: 0 if the fragmentation is successful, otherwise: -EINVAL
320 * or -ENOMEM
321 */
322int tipc_msg_fragment(struct sk_buff *skb, const struct tipc_msg *hdr,
323 int pktmax, struct sk_buff_head *frags)
324{
325 int pktno, nof_fragms, dsz, dmax, eat;
326 struct tipc_msg *_hdr;
327 struct sk_buff *_skb;
328 u8 *data;
329
330 /* Non-linear buffer? */
331 if (skb_linearize(skb))
332 return -ENOMEM;
333
334 data = (u8 *)skb->data;
335 dsz = msg_size(buf_msg(skb));
336 dmax = pktmax - INT_H_SIZE;
337 if (dsz <= dmax || !dmax)
338 return -EINVAL;
339
340 nof_fragms = dsz / dmax + 1;
341 for (pktno = 1; pktno <= nof_fragms; pktno++) {
342 if (pktno < nof_fragms)
343 eat = dmax;
344 else
345 eat = dsz % dmax;
346 /* Allocate a new fragment */
347 _skb = tipc_buf_acquire(INT_H_SIZE + eat, GFP_ATOMIC);
348 if (!_skb)
349 goto error;
350 skb_orphan(_skb);
351 __skb_queue_tail(frags, _skb);
352 /* Copy header & data to the fragment */
353 skb_copy_to_linear_data(_skb, hdr, INT_H_SIZE);
354 skb_copy_to_linear_data_offset(_skb, INT_H_SIZE, data, eat);
355 data += eat;
356 /* Update the fragment's header */
357 _hdr = buf_msg(_skb);
358 msg_set_fragm_no(_hdr, pktno);
359 msg_set_nof_fragms(_hdr, nof_fragms);
360 msg_set_size(_hdr, INT_H_SIZE + eat);
361 }
362 return 0;
363
364error:
365 __skb_queue_purge(frags);
366 __skb_queue_head_init(frags);
367 return -ENOMEM;
368}
369
370/**
371 * tipc_msg_build - create buffer chain containing specified header and data
372 * @mhdr: Message header, to be prepended to data
373 * @m: User message
374 * @offset: buffer offset for fragmented messages (FIXME)
375 * @dsz: Total length of user data
376 * @pktmax: Max packet size that can be used
377 * @list: Buffer or chain of buffers to be returned to caller
378 *
379 * Note that the recursive call we are making here is safe, since it can
380 * logically go only one further level down.
381 *
382 * Return: message data size or errno: -ENOMEM, -EFAULT
383 */
384int tipc_msg_build(struct tipc_msg *mhdr, struct msghdr *m, int offset,
385 int dsz, int pktmax, struct sk_buff_head *list)
386{
387 int mhsz = msg_hdr_sz(mhdr);
388 struct tipc_msg pkthdr;
389 int msz = mhsz + dsz;
390 int pktrem = pktmax;
391 struct sk_buff *skb;
392 int drem = dsz;
393 int pktno = 1;
394 char *pktpos;
395 int pktsz;
396 int rc;
397
398 msg_set_size(mhdr, msz);
399
400 /* No fragmentation needed? */
401 if (likely(msz <= pktmax)) {
402 skb = tipc_buf_acquire(msz, GFP_KERNEL);
403
404 /* Fall back to smaller MTU if node local message */
405 if (unlikely(!skb)) {
406 if (pktmax != MAX_MSG_SIZE)
407 return -ENOMEM;
408 rc = tipc_msg_build(mhdr, m, offset, dsz,
409 one_page_mtu, list);
410 if (rc != dsz)
411 return rc;
412 if (tipc_msg_assemble(list))
413 return dsz;
414 return -ENOMEM;
415 }
416 skb_orphan(skb);
417 __skb_queue_tail(list, skb);
418 skb_copy_to_linear_data(skb, mhdr, mhsz);
419 pktpos = skb->data + mhsz;
420 if (copy_from_iter_full(pktpos, dsz, &m->msg_iter))
421 return dsz;
422 rc = -EFAULT;
423 goto error;
424 }
425
426 /* Prepare reusable fragment header */
427 tipc_msg_init(msg_prevnode(mhdr), &pkthdr, MSG_FRAGMENTER,
428 FIRST_FRAGMENT, INT_H_SIZE, msg_destnode(mhdr));
429 msg_set_size(&pkthdr, pktmax);
430 msg_set_fragm_no(&pkthdr, pktno);
431 msg_set_importance(&pkthdr, msg_importance(mhdr));
432
433 /* Prepare first fragment */
434 skb = tipc_buf_acquire(pktmax, GFP_KERNEL);
435 if (!skb)
436 return -ENOMEM;
437 skb_orphan(skb);
438 __skb_queue_tail(list, skb);
439 pktpos = skb->data;
440 skb_copy_to_linear_data(skb, &pkthdr, INT_H_SIZE);
441 pktpos += INT_H_SIZE;
442 pktrem -= INT_H_SIZE;
443 skb_copy_to_linear_data_offset(skb, INT_H_SIZE, mhdr, mhsz);
444 pktpos += mhsz;
445 pktrem -= mhsz;
446
447 do {
448 if (drem < pktrem)
449 pktrem = drem;
450
451 if (!copy_from_iter_full(pktpos, pktrem, &m->msg_iter)) {
452 rc = -EFAULT;
453 goto error;
454 }
455 drem -= pktrem;
456
457 if (!drem)
458 break;
459
460 /* Prepare new fragment: */
461 if (drem < (pktmax - INT_H_SIZE))
462 pktsz = drem + INT_H_SIZE;
463 else
464 pktsz = pktmax;
465 skb = tipc_buf_acquire(pktsz, GFP_KERNEL);
466 if (!skb) {
467 rc = -ENOMEM;
468 goto error;
469 }
470 skb_orphan(skb);
471 __skb_queue_tail(list, skb);
472 msg_set_type(&pkthdr, FRAGMENT);
473 msg_set_size(&pkthdr, pktsz);
474 msg_set_fragm_no(&pkthdr, ++pktno);
475 skb_copy_to_linear_data(skb, &pkthdr, INT_H_SIZE);
476 pktpos = skb->data + INT_H_SIZE;
477 pktrem = pktsz - INT_H_SIZE;
478
479 } while (1);
480 msg_set_type(buf_msg(skb), LAST_FRAGMENT);
481 return dsz;
482error:
483 __skb_queue_purge(list);
484 __skb_queue_head_init(list);
485 return rc;
486}
487
488/**
489 * tipc_msg_bundle - Append contents of a buffer to tail of an existing one
490 * @bskb: the bundle buffer to append to
491 * @msg: message to be appended
492 * @max: max allowable size for the bundle buffer
493 *
494 * Return: "true" if bundling has been performed, otherwise "false"
495 */
496static bool tipc_msg_bundle(struct sk_buff *bskb, struct tipc_msg *msg,
497 u32 max)
498{
499 struct tipc_msg *bmsg = buf_msg(bskb);
500 u32 msz, bsz, offset, pad;
501
502 msz = msg_size(msg);
503 bsz = msg_size(bmsg);
504 offset = BUF_ALIGN(bsz);
505 pad = offset - bsz;
506
507 if (unlikely(skb_tailroom(bskb) < (pad + msz)))
508 return false;
509 if (unlikely(max < (offset + msz)))
510 return false;
511
512 skb_put(bskb, pad + msz);
513 skb_copy_to_linear_data_offset(bskb, offset, msg, msz);
514 msg_set_size(bmsg, offset + msz);
515 msg_set_msgcnt(bmsg, msg_msgcnt(bmsg) + 1);
516 return true;
517}
518
519/**
520 * tipc_msg_try_bundle - Try to bundle a new message to the last one
521 * @tskb: the last/target message to which the new one will be appended
522 * @skb: the new message skb pointer
523 * @mss: max message size (header inclusive)
524 * @dnode: destination node for the message
525 * @new_bundle: if this call made a new bundle or not
526 *
527 * Return: "true" if the new message skb is potential for bundling this time or
528 * later, in the case a bundling has been done this time, the skb is consumed
529 * (the skb pointer = NULL).
530 * Otherwise, "false" if the skb cannot be bundled at all.
531 */
532bool tipc_msg_try_bundle(struct sk_buff *tskb, struct sk_buff **skb, u32 mss,
533 u32 dnode, bool *new_bundle)
534{
535 struct tipc_msg *msg, *inner, *outer;
536 u32 tsz;
537
538 /* First, check if the new buffer is suitable for bundling */
539 msg = buf_msg(*skb);
540 if (msg_user(msg) == MSG_FRAGMENTER)
541 return false;
542 if (msg_user(msg) == TUNNEL_PROTOCOL)
543 return false;
544 if (msg_user(msg) == BCAST_PROTOCOL)
545 return false;
546 if (mss <= INT_H_SIZE + msg_size(msg))
547 return false;
548
549 /* Ok, but the last/target buffer can be empty? */
550 if (unlikely(!tskb))
551 return true;
552
553 /* Is it a bundle already? Try to bundle the new message to it */
554 if (msg_user(buf_msg(tskb)) == MSG_BUNDLER) {
555 *new_bundle = false;
556 goto bundle;
557 }
558
559 /* Make a new bundle of the two messages if possible */
560 tsz = msg_size(buf_msg(tskb));
561 if (unlikely(mss < BUF_ALIGN(INT_H_SIZE + tsz) + msg_size(msg)))
562 return true;
563 if (unlikely(pskb_expand_head(tskb, INT_H_SIZE, mss - tsz - INT_H_SIZE,
564 GFP_ATOMIC)))
565 return true;
566 inner = buf_msg(tskb);
567 skb_push(tskb, INT_H_SIZE);
568 outer = buf_msg(tskb);
569 tipc_msg_init(msg_prevnode(inner), outer, MSG_BUNDLER, 0, INT_H_SIZE,
570 dnode);
571 msg_set_importance(outer, msg_importance(inner));
572 msg_set_size(outer, INT_H_SIZE + tsz);
573 msg_set_msgcnt(outer, 1);
574 *new_bundle = true;
575
576bundle:
577 if (likely(tipc_msg_bundle(tskb, msg, mss))) {
578 consume_skb(*skb);
579 *skb = NULL;
580 }
581 return true;
582}
583
584/**
585 * tipc_msg_extract(): extract bundled inner packet from buffer
586 * @skb: buffer to be extracted from.
587 * @iskb: extracted inner buffer, to be returned
588 * @pos: position in outer message of msg to be extracted.
589 * Returns position of next msg.
590 * Consumes outer buffer when last packet extracted
591 * Return: true when there is an extracted buffer, otherwise false
592 */
593bool tipc_msg_extract(struct sk_buff *skb, struct sk_buff **iskb, int *pos)
594{
595 struct tipc_msg *hdr, *ihdr;
596 int imsz;
597
598 *iskb = NULL;
599 if (unlikely(skb_linearize(skb)))
600 goto none;
601
602 hdr = buf_msg(skb);
603 if (unlikely(*pos > (msg_data_sz(hdr) - MIN_H_SIZE)))
604 goto none;
605
606 ihdr = (struct tipc_msg *)(msg_data(hdr) + *pos);
607 imsz = msg_size(ihdr);
608
609 if ((*pos + imsz) > msg_data_sz(hdr))
610 goto none;
611
612 *iskb = tipc_buf_acquire(imsz, GFP_ATOMIC);
613 if (!*iskb)
614 goto none;
615
616 skb_copy_to_linear_data(*iskb, ihdr, imsz);
617 if (unlikely(!tipc_msg_validate(iskb)))
618 goto none;
619
620 *pos += BUF_ALIGN(imsz);
621 return true;
622none:
623 kfree_skb(skb);
624 kfree_skb(*iskb);
625 *iskb = NULL;
626 return false;
627}
628
629/**
630 * tipc_msg_reverse(): swap source and destination addresses and add error code
631 * @own_node: originating node id for reversed message
632 * @skb: buffer containing message to be reversed; will be consumed
633 * @err: error code to be set in message, if any
634 * Replaces consumed buffer with new one when successful
635 * Return: true if success, otherwise false
636 */
637bool tipc_msg_reverse(u32 own_node, struct sk_buff **skb, int err)
638{
639 struct sk_buff *_skb = *skb;
640 struct tipc_msg *_hdr, *hdr;
641 int hlen, dlen;
642
643 if (skb_linearize(_skb))
644 goto exit;
645 _hdr = buf_msg(_skb);
646 dlen = min_t(uint, msg_data_sz(_hdr), MAX_FORWARD_SIZE);
647 hlen = msg_hdr_sz(_hdr);
648
649 if (msg_dest_droppable(_hdr))
650 goto exit;
651 if (msg_errcode(_hdr))
652 goto exit;
653
654 /* Never return SHORT header */
655 if (hlen == SHORT_H_SIZE)
656 hlen = BASIC_H_SIZE;
657
658 /* Don't return data along with SYN+, - sender has a clone */
659 if (msg_is_syn(_hdr) && err == TIPC_ERR_OVERLOAD)
660 dlen = 0;
661
662 /* Allocate new buffer to return */
663 *skb = tipc_buf_acquire(hlen + dlen, GFP_ATOMIC);
664 if (!*skb)
665 goto exit;
666 memcpy((*skb)->data, _skb->data, msg_hdr_sz(_hdr));
667 memcpy((*skb)->data + hlen, msg_data(_hdr), dlen);
668
669 /* Build reverse header in new buffer */
670 hdr = buf_msg(*skb);
671 msg_set_hdr_sz(hdr, hlen);
672 msg_set_errcode(hdr, err);
673 msg_set_non_seq(hdr, 0);
674 msg_set_origport(hdr, msg_destport(_hdr));
675 msg_set_destport(hdr, msg_origport(_hdr));
676 msg_set_destnode(hdr, msg_prevnode(_hdr));
677 msg_set_prevnode(hdr, own_node);
678 msg_set_orignode(hdr, own_node);
679 msg_set_size(hdr, hlen + dlen);
680 skb_orphan(_skb);
681 kfree_skb(_skb);
682 return true;
683exit:
684 kfree_skb(_skb);
685 *skb = NULL;
686 return false;
687}
688
689bool tipc_msg_skb_clone(struct sk_buff_head *msg, struct sk_buff_head *cpy)
690{
691 struct sk_buff *skb, *_skb;
692
693 skb_queue_walk(msg, skb) {
694 _skb = skb_clone(skb, GFP_ATOMIC);
695 if (!_skb) {
696 __skb_queue_purge(cpy);
697 pr_err_ratelimited("Failed to clone buffer chain\n");
698 return false;
699 }
700 __skb_queue_tail(cpy, _skb);
701 }
702 return true;
703}
704
705/**
706 * tipc_msg_lookup_dest(): try to find new destination for named message
707 * @net: pointer to associated network namespace
708 * @skb: the buffer containing the message.
709 * @err: error code to be used by caller if lookup fails
710 * Does not consume buffer
711 * Return: true if a destination is found, false otherwise
712 */
713bool tipc_msg_lookup_dest(struct net *net, struct sk_buff *skb, int *err)
714{
715 struct tipc_msg *msg = buf_msg(skb);
716 u32 scope = msg_lookup_scope(msg);
717 u32 self = tipc_own_addr(net);
718 u32 inst = msg_nameinst(msg);
719 struct tipc_socket_addr sk;
720 struct tipc_uaddr ua;
721
722 if (!msg_isdata(msg))
723 return false;
724 if (!msg_named(msg))
725 return false;
726 if (msg_errcode(msg))
727 return false;
728 *err = TIPC_ERR_NO_NAME;
729 if (skb_linearize(skb))
730 return false;
731 msg = buf_msg(skb);
732 if (msg_reroute_cnt(msg))
733 return false;
734 tipc_uaddr(&ua, TIPC_SERVICE_RANGE, scope,
735 msg_nametype(msg), inst, inst);
736 sk.node = tipc_scope2node(net, scope);
737 if (!tipc_nametbl_lookup_anycast(net, &ua, &sk))
738 return false;
739 msg_incr_reroute_cnt(msg);
740 if (sk.node != self)
741 msg_set_prevnode(msg, self);
742 msg_set_destnode(msg, sk.node);
743 msg_set_destport(msg, sk.ref);
744 *err = TIPC_OK;
745
746 return true;
747}
748
749/* tipc_msg_assemble() - assemble chain of fragments into one message
750 */
751bool tipc_msg_assemble(struct sk_buff_head *list)
752{
753 struct sk_buff *skb, *tmp = NULL;
754
755 if (skb_queue_len(list) == 1)
756 return true;
757
758 while ((skb = __skb_dequeue(list))) {
759 skb->next = NULL;
760 if (tipc_buf_append(&tmp, &skb)) {
761 __skb_queue_tail(list, skb);
762 return true;
763 }
764 if (!tmp)
765 break;
766 }
767 __skb_queue_purge(list);
768 __skb_queue_head_init(list);
769 pr_warn("Failed do assemble buffer\n");
770 return false;
771}
772
773/* tipc_msg_reassemble() - clone a buffer chain of fragments and
774 * reassemble the clones into one message
775 */
776bool tipc_msg_reassemble(struct sk_buff_head *list, struct sk_buff_head *rcvq)
777{
778 struct sk_buff *skb, *_skb;
779 struct sk_buff *frag = NULL;
780 struct sk_buff *head = NULL;
781 int hdr_len;
782
783 /* Copy header if single buffer */
784 if (skb_queue_len(list) == 1) {
785 skb = skb_peek(list);
786 hdr_len = skb_headroom(skb) + msg_hdr_sz(buf_msg(skb));
787 _skb = __pskb_copy(skb, hdr_len, GFP_ATOMIC);
788 if (!_skb)
789 return false;
790 __skb_queue_tail(rcvq, _skb);
791 return true;
792 }
793
794 /* Clone all fragments and reassemble */
795 skb_queue_walk(list, skb) {
796 frag = skb_clone(skb, GFP_ATOMIC);
797 if (!frag)
798 goto error;
799 frag->next = NULL;
800 if (tipc_buf_append(&head, &frag))
801 break;
802 if (!head)
803 goto error;
804 }
805 __skb_queue_tail(rcvq, frag);
806 return true;
807error:
808 pr_warn("Failed do clone local mcast rcv buffer\n");
809 kfree_skb(head);
810 return false;
811}
812
813bool tipc_msg_pskb_copy(u32 dst, struct sk_buff_head *msg,
814 struct sk_buff_head *cpy)
815{
816 struct sk_buff *skb, *_skb;
817
818 skb_queue_walk(msg, skb) {
819 _skb = pskb_copy(skb, GFP_ATOMIC);
820 if (!_skb) {
821 __skb_queue_purge(cpy);
822 return false;
823 }
824 msg_set_destnode(buf_msg(_skb), dst);
825 __skb_queue_tail(cpy, _skb);
826 }
827 return true;
828}
829
830/* tipc_skb_queue_sorted(); sort pkt into list according to sequence number
831 * @list: list to be appended to
832 * @seqno: sequence number of buffer to add
833 * @skb: buffer to add
834 */
835bool __tipc_skb_queue_sorted(struct sk_buff_head *list, u16 seqno,
836 struct sk_buff *skb)
837{
838 struct sk_buff *_skb, *tmp;
839
840 if (skb_queue_empty(list) || less(seqno, buf_seqno(skb_peek(list)))) {
841 __skb_queue_head(list, skb);
842 return true;
843 }
844
845 if (more(seqno, buf_seqno(skb_peek_tail(list)))) {
846 __skb_queue_tail(list, skb);
847 return true;
848 }
849
850 skb_queue_walk_safe(list, _skb, tmp) {
851 if (more(seqno, buf_seqno(_skb)))
852 continue;
853 if (seqno == buf_seqno(_skb))
854 break;
855 __skb_queue_before(list, _skb, skb);
856 return true;
857 }
858 kfree_skb(skb);
859 return false;
860}
861
862void tipc_skb_reject(struct net *net, int err, struct sk_buff *skb,
863 struct sk_buff_head *xmitq)
864{
865 if (tipc_msg_reverse(tipc_own_addr(net), &skb, err))
866 __skb_queue_tail(xmitq, skb);
867}