Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

Pull networking fixes from David Miller:
"Last bit of straggler fixes...

1) Fix btf library licensing to LGPL, from Martin KaFai lau.

2) Fix error handling in bpf sockmap code, from Daniel Borkmann.

3) XDP cpumap teardown handling wrt. execution contexts, from Jesper
Dangaard Brouer.

4) Fix loss of runtime PM on failed vlan add/del, from Ivan
Khoronzhuk.

5) xen-netfront caches skb_shinfo(skb) across a __pskb_pull_tail()
call, which potentially changes the skb's data buffer, and thus
skb_shinfo(). Fix from Juergen Gross"

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net:
xen/netfront: don't cache skb_shinfo()
net: ethernet: ti: cpsw: fix runtime_pm while add/kill vlan
net: ethernet: ti: cpsw: clear all entries when delete vid
xdp: fix bug in devmap teardown code path
samples/bpf: xdp_redirect_cpu adjustment to reproduce teardown race easier
xdp: fix bug in cpumap teardown code path
bpf, sockmap: fix cork timeout for select due to epipe
bpf, sockmap: fix leak in bpf_tcp_sendmsg wait for mem path
bpf, sockmap: fix bpf_tcp_sendmsg sock error handling
bpf: btf: Change tools/lib/bpf/btf to LGPL

+46 -39
+11 -14
drivers/net/ethernet/ti/cpsw.c
··· 2086 2086 int i; 2087 2087 2088 2088 for (i = 0; i < cpsw->data.slaves; i++) { 2089 - if (vid == cpsw->slaves[i].port_vlan) 2090 - return -EINVAL; 2089 + if (vid == cpsw->slaves[i].port_vlan) { 2090 + ret = -EINVAL; 2091 + goto err; 2092 + } 2091 2093 } 2092 2094 } 2093 2095 2094 2096 dev_info(priv->dev, "Adding vlanid %d to vlan filter\n", vid); 2095 2097 ret = cpsw_add_vlan_ale_entry(priv, vid); 2096 - 2098 + err: 2097 2099 pm_runtime_put(cpsw->dev); 2098 2100 return ret; 2099 2101 } ··· 2121 2119 2122 2120 for (i = 0; i < cpsw->data.slaves; i++) { 2123 2121 if (vid == cpsw->slaves[i].port_vlan) 2124 - return -EINVAL; 2122 + goto err; 2125 2123 } 2126 2124 } 2127 2125 2128 2126 dev_info(priv->dev, "removing vlanid %d from vlan filter\n", vid); 2129 2127 ret = cpsw_ale_del_vlan(cpsw->ale, vid, 0); 2130 - if (ret != 0) 2131 - return ret; 2132 - 2133 - ret = cpsw_ale_del_ucast(cpsw->ale, priv->mac_addr, 2134 - HOST_PORT_NUM, ALE_VLAN, vid); 2135 - if (ret != 0) 2136 - return ret; 2137 - 2138 - ret = cpsw_ale_del_mcast(cpsw->ale, priv->ndev->broadcast, 2139 - 0, ALE_VLAN, vid); 2128 + ret |= cpsw_ale_del_ucast(cpsw->ale, priv->mac_addr, 2129 + HOST_PORT_NUM, ALE_VLAN, vid); 2130 + ret |= cpsw_ale_del_mcast(cpsw->ale, priv->ndev->broadcast, 2131 + 0, ALE_VLAN, vid); 2132 + err: 2140 2133 pm_runtime_put(cpsw->dev); 2141 2134 return ret; 2142 2135 }
+1 -1
drivers/net/ethernet/ti/cpsw_ale.c
··· 394 394 395 395 idx = cpsw_ale_match_addr(ale, addr, (flags & ALE_VLAN) ? vid : 0); 396 396 if (idx < 0) 397 - return -EINVAL; 397 + return -ENOENT; 398 398 399 399 cpsw_ale_read(ale, idx, ale_entry); 400 400
+4 -4
drivers/net/xen-netfront.c
··· 894 894 struct sk_buff *skb, 895 895 struct sk_buff_head *list) 896 896 { 897 - struct skb_shared_info *shinfo = skb_shinfo(skb); 898 897 RING_IDX cons = queue->rx.rsp_cons; 899 898 struct sk_buff *nskb; 900 899 ··· 902 903 RING_GET_RESPONSE(&queue->rx, ++cons); 903 904 skb_frag_t *nfrag = &skb_shinfo(nskb)->frags[0]; 904 905 905 - if (shinfo->nr_frags == MAX_SKB_FRAGS) { 906 + if (skb_shinfo(skb)->nr_frags == MAX_SKB_FRAGS) { 906 907 unsigned int pull_to = NETFRONT_SKB_CB(skb)->pull_to; 907 908 908 909 BUG_ON(pull_to <= skb_headlen(skb)); 909 910 __pskb_pull_tail(skb, pull_to - skb_headlen(skb)); 910 911 } 911 - BUG_ON(shinfo->nr_frags >= MAX_SKB_FRAGS); 912 + BUG_ON(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS); 912 913 913 - skb_add_rx_frag(skb, shinfo->nr_frags, skb_frag_page(nfrag), 914 + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, 915 + skb_frag_page(nfrag), 914 916 rx->offset, rx->status, PAGE_SIZE); 915 917 916 918 skb_shinfo(nskb)->nr_frags = 0;
+9 -6
kernel/bpf/cpumap.c
··· 69 69 }; 70 70 71 71 static int bq_flush_to_queue(struct bpf_cpu_map_entry *rcpu, 72 - struct xdp_bulk_queue *bq); 72 + struct xdp_bulk_queue *bq, bool in_napi_ctx); 73 73 74 74 static u64 cpu_map_bitmap_size(const union bpf_attr *attr) 75 75 { ··· 375 375 struct xdp_bulk_queue *bq = per_cpu_ptr(rcpu->bulkq, cpu); 376 376 377 377 /* No concurrent bq_enqueue can run at this point */ 378 - bq_flush_to_queue(rcpu, bq); 378 + bq_flush_to_queue(rcpu, bq, false); 379 379 } 380 380 free_percpu(rcpu->bulkq); 381 381 /* Cannot kthread_stop() here, last put free rcpu resources */ ··· 558 558 }; 559 559 560 560 static int bq_flush_to_queue(struct bpf_cpu_map_entry *rcpu, 561 - struct xdp_bulk_queue *bq) 561 + struct xdp_bulk_queue *bq, bool in_napi_ctx) 562 562 { 563 563 unsigned int processed = 0, drops = 0; 564 564 const int to_cpu = rcpu->cpu; ··· 578 578 err = __ptr_ring_produce(q, xdpf); 579 579 if (err) { 580 580 drops++; 581 - xdp_return_frame_rx_napi(xdpf); 581 + if (likely(in_napi_ctx)) 582 + xdp_return_frame_rx_napi(xdpf); 583 + else 584 + xdp_return_frame(xdpf); 582 585 } 583 586 processed++; 584 587 } ··· 601 598 struct xdp_bulk_queue *bq = this_cpu_ptr(rcpu->bulkq); 602 599 603 600 if (unlikely(bq->count == CPU_MAP_BULK_SIZE)) 604 - bq_flush_to_queue(rcpu, bq); 601 + bq_flush_to_queue(rcpu, bq, true); 605 602 606 603 /* Notice, xdp_buff/page MUST be queued here, long enough for 607 604 * driver to code invoking us to finished, due to driver ··· 664 661 665 662 /* Flush all frames in bulkq to real queue */ 666 663 bq = this_cpu_ptr(rcpu->bulkq); 667 - bq_flush_to_queue(rcpu, bq); 664 + bq_flush_to_queue(rcpu, bq, true); 668 665 669 666 /* If already running, costs spin_lock_irqsave + smb_mb */ 670 667 wake_up_process(rcpu->kthread);
+9 -5
kernel/bpf/devmap.c
··· 217 217 } 218 218 219 219 static int bq_xmit_all(struct bpf_dtab_netdev *obj, 220 - struct xdp_bulk_queue *bq, u32 flags) 220 + struct xdp_bulk_queue *bq, u32 flags, 221 + bool in_napi_ctx) 221 222 { 222 223 struct net_device *dev = obj->dev; 223 224 int sent = 0, drops = 0, err = 0; ··· 255 254 struct xdp_frame *xdpf = bq->q[i]; 256 255 257 256 /* RX path under NAPI protection, can return frames faster */ 258 - xdp_return_frame_rx_napi(xdpf); 257 + if (likely(in_napi_ctx)) 258 + xdp_return_frame_rx_napi(xdpf); 259 + else 260 + xdp_return_frame(xdpf); 259 261 drops++; 260 262 } 261 263 goto out; ··· 290 286 __clear_bit(bit, bitmap); 291 287 292 288 bq = this_cpu_ptr(dev->bulkq); 293 - bq_xmit_all(dev, bq, XDP_XMIT_FLUSH); 289 + bq_xmit_all(dev, bq, XDP_XMIT_FLUSH, true); 294 290 } 295 291 } 296 292 ··· 320 316 struct xdp_bulk_queue *bq = this_cpu_ptr(obj->bulkq); 321 317 322 318 if (unlikely(bq->count == DEV_MAP_BULK_SIZE)) 323 - bq_xmit_all(obj, bq, 0); 319 + bq_xmit_all(obj, bq, 0, true); 324 320 325 321 /* Ingress dev_rx will be the same for all xdp_frame's in 326 322 * bulk_queue, because bq stored per-CPU and must be flushed ··· 389 385 __clear_bit(dev->bit, bitmap); 390 386 391 387 bq = per_cpu_ptr(dev->bulkq, cpu); 392 - bq_xmit_all(dev, bq, XDP_XMIT_FLUSH); 388 + bq_xmit_all(dev, bq, XDP_XMIT_FLUSH, false); 393 389 } 394 390 } 395 391 }
+6 -3
kernel/bpf/sockmap.c
··· 1048 1048 timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT); 1049 1049 1050 1050 while (msg_data_left(msg)) { 1051 - struct sk_msg_buff *m; 1051 + struct sk_msg_buff *m = NULL; 1052 1052 bool enospc = false; 1053 1053 int copy; 1054 1054 1055 1055 if (sk->sk_err) { 1056 - err = sk->sk_err; 1056 + err = -sk->sk_err; 1057 1057 goto out_err; 1058 1058 } 1059 1059 ··· 1116 1116 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); 1117 1117 wait_for_memory: 1118 1118 err = sk_stream_wait_memory(sk, &timeo); 1119 - if (err) 1119 + if (err) { 1120 + if (m && m != psock->cork) 1121 + free_start_sg(sk, m); 1120 1122 goto out_err; 1123 + } 1121 1124 } 1122 1125 out_err: 1123 1126 if (err < 0)
+1 -1
samples/bpf/xdp_redirect_cpu_kern.c
··· 14 14 #include <uapi/linux/bpf.h> 15 15 #include "bpf_helpers.h" 16 16 17 - #define MAX_CPUS 12 /* WARNING - sync with _user.c */ 17 + #define MAX_CPUS 64 /* WARNING - sync with _user.c */ 18 18 19 19 /* Special map type that can XDP_REDIRECT frames to another CPU */ 20 20 struct bpf_map_def SEC("maps") cpu_map = {
+2 -2
samples/bpf/xdp_redirect_cpu_user.c
··· 19 19 #include <arpa/inet.h> 20 20 #include <linux/if_link.h> 21 21 22 - #define MAX_CPUS 12 /* WARNING - sync with _kern.c */ 22 + #define MAX_CPUS 64 /* WARNING - sync with _kern.c */ 23 23 24 24 /* How many xdp_progs are defined in _kern.c */ 25 25 #define MAX_PROG 5 ··· 527 527 * procedure. 528 528 */ 529 529 create_cpu_entry(1, 1024, 0, false); 530 - create_cpu_entry(1, 128, 0, false); 530 + create_cpu_entry(1, 8, 0, false); 531 531 create_cpu_entry(1, 16000, 0, false); 532 532 } 533 533
+1 -1
tools/lib/bpf/btf.c
··· 1 - /* SPDX-License-Identifier: GPL-2.0 */ 1 + // SPDX-License-Identifier: LGPL-2.1 2 2 /* Copyright (c) 2018 Facebook */ 3 3 4 4 #include <stdlib.h>
+1 -1
tools/lib/bpf/btf.h
··· 1 - /* SPDX-License-Identifier: GPL-2.0 */ 1 + /* SPDX-License-Identifier: LGPL-2.1 */ 2 2 /* Copyright (c) 2018 Facebook */ 3 3 4 4 #ifndef __BPF_BTF_H
+1 -1
tools/testing/selftests/bpf/test_sockmap.c
··· 354 354 while (s->bytes_recvd < total_bytes) { 355 355 if (txmsg_cork) { 356 356 timeout.tv_sec = 0; 357 - timeout.tv_usec = 1000; 357 + timeout.tv_usec = 300000; 358 358 } else { 359 359 timeout.tv_sec = 1; 360 360 timeout.tv_usec = 0;