Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

Pull networking fixes from David Miller:

1) Don't race in IPSEC dumps, from Yuejie Shi.

2) Verify lengths properly in IPSEC reqeusts, from Herbert Xu.

3) Fix out of bounds access in ipv6 segment routing code, from David
Lebrun.

4) Don't write into the header of cloned SKBs in smsc95xx driver, from
James Hughes.

5) Several other drivers have this bug too, fix them. From Eric
Dumazet.

6) Fix access to uninitialized data in TC action cookie code, from
Wolfgang Bumiller.

7) Fix double free in IPV6 segment routing, again from David Lebrun.

8) Don't let userspace set the RTF_PCPU flag, oops. From David Ahern.

9) Fix use after free in qrtr code, from Dan Carpenter.

10) Don't double-destroy devices in ip6mr code, from Nikolay
Aleksandrov.

11) Don't pass out-of-range TX queue indices into drivers, from Tushar
Dave.

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (30 commits)
netpoll: Check for skb->queue_mapping
ip6mr: fix notification device destruction
bpf, doc: update bpf maintainers entry
net: qrtr: potential use after free in qrtr_sendmsg()
bpf: Fix values type used in test_maps
net: ipv6: RTF_PCPU should not be settable from userspace
gso: Validate assumption of frag_list segementation
kaweth: use skb_cow_head() to deal with cloned skbs
ch9200: use skb_cow_head() to deal with cloned skbs
lan78xx: use skb_cow_head() to deal with cloned skbs
sr9700: use skb_cow_head() to deal with cloned skbs
cx82310_eth: use skb_cow_head() to deal with cloned skbs
smsc75xx: use skb_cow_head() to deal with cloned skbs
ipv6: sr: fix double free of skb after handling invalid SRH
MAINTAINERS: Add "B:" field for networking.
net sched actions: allocate act cookie early
qed: Fix issue in populating the PFC config paramters.
qed: Fix possible system hang in the dcbnl-getdcbx() path.
qed: Fix sending an invalid PFC error mask to MFW.
qed: Fix possible error in populating max_tc field.
...

+345 -210
+16 -2
MAINTAINERS
··· 2585 2585 2586 2586 BPF (Safe dynamic programs and tools) 2587 2587 M: Alexei Starovoitov <ast@kernel.org> 2588 + M: Daniel Borkmann <daniel@iogearbox.net> 2588 2589 L: netdev@vger.kernel.org 2589 2590 L: linux-kernel@vger.kernel.org 2590 2591 S: Supported 2592 + F: arch/x86/net/bpf_jit* 2593 + F: Documentation/networking/filter.txt 2594 + F: include/linux/bpf* 2595 + F: include/linux/filter.h 2596 + F: include/uapi/linux/bpf* 2597 + F: include/uapi/linux/filter.h 2591 2598 F: kernel/bpf/ 2592 - F: tools/testing/selftests/bpf/ 2599 + F: kernel/trace/bpf_trace.c 2593 2600 F: lib/test_bpf.c 2601 + F: net/bpf/ 2602 + F: net/core/filter.c 2603 + F: net/sched/act_bpf.c 2604 + F: net/sched/cls_bpf.c 2605 + F: samples/bpf/ 2606 + F: tools/net/bpf* 2607 + F: tools/testing/selftests/bpf/ 2594 2608 2595 2609 BROADCOM B44 10/100 ETHERNET DRIVER 2596 2610 M: Michael Chan <michael.chan@broadcom.com> ··· 8775 8761 Q: http://patchwork.ozlabs.org/project/netdev/list/ 8776 8762 T: git git://git.kernel.org/pub/scm/linux/kernel/git/davem/net.git 8777 8763 T: git git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next.git 8764 + B: mailto:netdev@vger.kernel.org 8778 8765 S: Maintained 8779 8766 F: net/ 8780 8767 F: include/net/ ··· 12479 12464 F: include/linux/clk/ti.h 12480 12465 12481 12466 TI ETHERNET SWITCH DRIVER (CPSW) 12482 - M: Mugunthan V N <mugunthanvnm@ti.com> 12483 12467 R: Grygorii Strashko <grygorii.strashko@ti.com> 12484 12468 L: linux-omap@vger.kernel.org 12485 12469 L: netdev@vger.kernel.org
+12 -1
drivers/net/ethernet/qlogic/qed/qed_dcbx.c
··· 583 583 p_params->ets_cbs, 584 584 p_ets->pri_tc_tbl[0], p_params->max_ets_tc); 585 585 586 + if (p_params->ets_enabled && !p_params->max_ets_tc) { 587 + p_params->max_ets_tc = QED_MAX_PFC_PRIORITIES; 588 + DP_VERBOSE(p_hwfn, QED_MSG_DCB, 589 + "ETS params: max_ets_tc is forced to %d\n", 590 + p_params->max_ets_tc); 591 + } 592 + 586 593 /* 8 bit tsa and bw data corresponding to each of the 8 TC's are 587 594 * encoded in a type u32 array of size 2. 588 595 */ ··· 1008 1001 u8 pfc_map = 0; 1009 1002 int i; 1010 1003 1004 + *pfc &= ~DCBX_PFC_ERROR_MASK; 1005 + 1011 1006 if (p_params->pfc.willing) 1012 1007 *pfc |= DCBX_PFC_WILLING_MASK; 1013 1008 else ··· 1264 1255 { 1265 1256 struct qed_dcbx_get *dcbx_info; 1266 1257 1267 - dcbx_info = kzalloc(sizeof(*dcbx_info), GFP_KERNEL); 1258 + dcbx_info = kmalloc(sizeof(*dcbx_info), GFP_ATOMIC); 1268 1259 if (!dcbx_info) 1269 1260 return NULL; 1270 1261 ··· 2081 2072 dcbx_set.override_flags |= QED_DCBX_OVERRIDE_PFC_CFG; 2082 2073 for (i = 0; i < QED_MAX_PFC_PRIORITIES; i++) 2083 2074 dcbx_set.config.params.pfc.prio[i] = !!(pfc->pfc_en & BIT(i)); 2075 + 2076 + dcbx_set.config.params.pfc.max_tc = pfc->pfc_cap; 2084 2077 2085 2078 ptt = qed_ptt_acquire(hwfn); 2086 2079 if (!ptt)
+67 -55
drivers/net/ethernet/renesas/sh_eth.c
··· 1127 1127 .get_mdio_data = sh_get_mdio, 1128 1128 }; 1129 1129 1130 + /* free Tx skb function */ 1131 + static int sh_eth_tx_free(struct net_device *ndev, bool sent_only) 1132 + { 1133 + struct sh_eth_private *mdp = netdev_priv(ndev); 1134 + struct sh_eth_txdesc *txdesc; 1135 + int free_num = 0; 1136 + int entry; 1137 + bool sent; 1138 + 1139 + for (; mdp->cur_tx - mdp->dirty_tx > 0; mdp->dirty_tx++) { 1140 + entry = mdp->dirty_tx % mdp->num_tx_ring; 1141 + txdesc = &mdp->tx_ring[entry]; 1142 + sent = !(txdesc->status & cpu_to_le32(TD_TACT)); 1143 + if (sent_only && !sent) 1144 + break; 1145 + /* TACT bit must be checked before all the following reads */ 1146 + dma_rmb(); 1147 + netif_info(mdp, tx_done, ndev, 1148 + "tx entry %d status 0x%08x\n", 1149 + entry, le32_to_cpu(txdesc->status)); 1150 + /* Free the original skb. */ 1151 + if (mdp->tx_skbuff[entry]) { 1152 + dma_unmap_single(&ndev->dev, le32_to_cpu(txdesc->addr), 1153 + le32_to_cpu(txdesc->len) >> 16, 1154 + DMA_TO_DEVICE); 1155 + dev_kfree_skb_irq(mdp->tx_skbuff[entry]); 1156 + mdp->tx_skbuff[entry] = NULL; 1157 + free_num++; 1158 + } 1159 + txdesc->status = cpu_to_le32(TD_TFP); 1160 + if (entry >= mdp->num_tx_ring - 1) 1161 + txdesc->status |= cpu_to_le32(TD_TDLE); 1162 + 1163 + if (sent) { 1164 + ndev->stats.tx_packets++; 1165 + ndev->stats.tx_bytes += le32_to_cpu(txdesc->len) >> 16; 1166 + } 1167 + } 1168 + return free_num; 1169 + } 1170 + 1130 1171 /* free skb and descriptor buffer */ 1131 1172 static void sh_eth_ring_free(struct net_device *ndev) 1132 1173 { 1133 1174 struct sh_eth_private *mdp = netdev_priv(ndev); 1134 1175 int ringsize, i; 1176 + 1177 + if (mdp->rx_ring) { 1178 + for (i = 0; i < mdp->num_rx_ring; i++) { 1179 + if (mdp->rx_skbuff[i]) { 1180 + struct sh_eth_rxdesc *rxdesc = &mdp->rx_ring[i]; 1181 + 1182 + dma_unmap_single(&ndev->dev, 1183 + le32_to_cpu(rxdesc->addr), 1184 + ALIGN(mdp->rx_buf_sz, 32), 1185 + DMA_FROM_DEVICE); 1186 + } 1187 + } 1188 + ringsize = sizeof(struct sh_eth_rxdesc) * mdp->num_rx_ring; 1189 + dma_free_coherent(NULL, ringsize, mdp->rx_ring, 1190 + mdp->rx_desc_dma); 1191 + mdp->rx_ring = NULL; 1192 + } 1135 1193 1136 1194 /* Free Rx skb ringbuffer */ 1137 1195 if (mdp->rx_skbuff) { ··· 1199 1141 kfree(mdp->rx_skbuff); 1200 1142 mdp->rx_skbuff = NULL; 1201 1143 1202 - /* Free Tx skb ringbuffer */ 1203 - if (mdp->tx_skbuff) { 1204 - for (i = 0; i < mdp->num_tx_ring; i++) 1205 - dev_kfree_skb(mdp->tx_skbuff[i]); 1206 - } 1207 - kfree(mdp->tx_skbuff); 1208 - mdp->tx_skbuff = NULL; 1209 - 1210 - if (mdp->rx_ring) { 1211 - ringsize = sizeof(struct sh_eth_rxdesc) * mdp->num_rx_ring; 1212 - dma_free_coherent(NULL, ringsize, mdp->rx_ring, 1213 - mdp->rx_desc_dma); 1214 - mdp->rx_ring = NULL; 1215 - } 1216 - 1217 1144 if (mdp->tx_ring) { 1145 + sh_eth_tx_free(ndev, false); 1146 + 1218 1147 ringsize = sizeof(struct sh_eth_txdesc) * mdp->num_tx_ring; 1219 1148 dma_free_coherent(NULL, ringsize, mdp->tx_ring, 1220 1149 mdp->tx_desc_dma); 1221 1150 mdp->tx_ring = NULL; 1222 1151 } 1152 + 1153 + /* Free Tx skb ringbuffer */ 1154 + kfree(mdp->tx_skbuff); 1155 + mdp->tx_skbuff = NULL; 1223 1156 } 1224 1157 1225 1158 /* format skb and descriptor buffer */ ··· 1458 1409 update_mac_address(ndev); 1459 1410 } 1460 1411 1461 - /* free Tx skb function */ 1462 - static int sh_eth_txfree(struct net_device *ndev) 1463 - { 1464 - struct sh_eth_private *mdp = netdev_priv(ndev); 1465 - struct sh_eth_txdesc *txdesc; 1466 - int free_num = 0; 1467 - int entry; 1468 - 1469 - for (; mdp->cur_tx - mdp->dirty_tx > 0; mdp->dirty_tx++) { 1470 - entry = mdp->dirty_tx % mdp->num_tx_ring; 1471 - txdesc = &mdp->tx_ring[entry]; 1472 - if (txdesc->status & cpu_to_le32(TD_TACT)) 1473 - break; 1474 - /* TACT bit must be checked before all the following reads */ 1475 - dma_rmb(); 1476 - netif_info(mdp, tx_done, ndev, 1477 - "tx entry %d status 0x%08x\n", 1478 - entry, le32_to_cpu(txdesc->status)); 1479 - /* Free the original skb. */ 1480 - if (mdp->tx_skbuff[entry]) { 1481 - dma_unmap_single(&ndev->dev, le32_to_cpu(txdesc->addr), 1482 - le32_to_cpu(txdesc->len) >> 16, 1483 - DMA_TO_DEVICE); 1484 - dev_kfree_skb_irq(mdp->tx_skbuff[entry]); 1485 - mdp->tx_skbuff[entry] = NULL; 1486 - free_num++; 1487 - } 1488 - txdesc->status = cpu_to_le32(TD_TFP); 1489 - if (entry >= mdp->num_tx_ring - 1) 1490 - txdesc->status |= cpu_to_le32(TD_TDLE); 1491 - 1492 - ndev->stats.tx_packets++; 1493 - ndev->stats.tx_bytes += le32_to_cpu(txdesc->len) >> 16; 1494 - } 1495 - return free_num; 1496 - } 1497 - 1498 1412 /* Packet receive function */ 1499 1413 static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota) 1500 1414 { ··· 1702 1690 intr_status, mdp->cur_tx, mdp->dirty_tx, 1703 1691 (u32)ndev->state, edtrr); 1704 1692 /* dirty buffer free */ 1705 - sh_eth_txfree(ndev); 1693 + sh_eth_tx_free(ndev, true); 1706 1694 1707 1695 /* SH7712 BUG */ 1708 1696 if (edtrr ^ sh_eth_get_edtrr_trns(mdp)) { ··· 1763 1751 /* Clear Tx interrupts */ 1764 1752 sh_eth_write(ndev, intr_status & cd->tx_check, EESR); 1765 1753 1766 - sh_eth_txfree(ndev); 1754 + sh_eth_tx_free(ndev, true); 1767 1755 netif_wake_queue(ndev); 1768 1756 } 1769 1757 ··· 2424 2412 2425 2413 spin_lock_irqsave(&mdp->lock, flags); 2426 2414 if ((mdp->cur_tx - mdp->dirty_tx) >= (mdp->num_tx_ring - 4)) { 2427 - if (!sh_eth_txfree(ndev)) { 2415 + if (!sh_eth_tx_free(ndev, true)) { 2428 2416 netif_warn(mdp, tx_queued, ndev, "TxFD exhausted.\n"); 2429 2417 netif_stop_queue(ndev); 2430 2418 spin_unlock_irqrestore(&mdp->lock, flags);
-2
drivers/net/phy/dp83640.c
··· 1438 1438 skb_info->tmo = jiffies + SKB_TIMESTAMP_TIMEOUT; 1439 1439 skb_queue_tail(&dp83640->rx_queue, skb); 1440 1440 schedule_delayed_work(&dp83640->ts_work, SKB_TIMESTAMP_TIMEOUT); 1441 - } else { 1442 - netif_rx_ni(skb); 1443 1441 } 1444 1442 1445 1443 return true;
+2 -7
drivers/net/usb/ch9200.c
··· 254 254 tx_overhead = 0x40; 255 255 256 256 len = skb->len; 257 - if (skb_headroom(skb) < tx_overhead) { 258 - struct sk_buff *skb2; 259 - 260 - skb2 = skb_copy_expand(skb, tx_overhead, 0, flags); 257 + if (skb_cow_head(skb, tx_overhead)) { 261 258 dev_kfree_skb_any(skb); 262 - skb = skb2; 263 - if (!skb) 264 - return NULL; 259 + return NULL; 265 260 } 266 261 267 262 __skb_push(skb, tx_overhead);
+2 -5
drivers/net/usb/cx82310_eth.c
··· 293 293 { 294 294 int len = skb->len; 295 295 296 - if (skb_headroom(skb) < 2) { 297 - struct sk_buff *skb2 = skb_copy_expand(skb, 2, 0, flags); 296 + if (skb_cow_head(skb, 2)) { 298 297 dev_kfree_skb_any(skb); 299 - skb = skb2; 300 - if (!skb) 301 - return NULL; 298 + return NULL; 302 299 } 303 300 skb_push(skb, 2); 304 301
+6 -12
drivers/net/usb/kaweth.c
··· 803 803 } 804 804 805 805 /* We now decide whether we can put our special header into the sk_buff */ 806 - if (skb_cloned(skb) || skb_headroom(skb) < 2) { 807 - /* no such luck - we make our own */ 808 - struct sk_buff *copied_skb; 809 - copied_skb = skb_copy_expand(skb, 2, 0, GFP_ATOMIC); 810 - dev_kfree_skb_irq(skb); 811 - skb = copied_skb; 812 - if (!copied_skb) { 813 - kaweth->stats.tx_errors++; 814 - netif_start_queue(net); 815 - spin_unlock_irq(&kaweth->device_lock); 816 - return NETDEV_TX_OK; 817 - } 806 + if (skb_cow_head(skb, 2)) { 807 + kaweth->stats.tx_errors++; 808 + netif_start_queue(net); 809 + spin_unlock_irq(&kaweth->device_lock); 810 + dev_kfree_skb_any(skb); 811 + return NETDEV_TX_OK; 818 812 } 819 813 820 814 private_header = (__le16 *)__skb_push(skb, 2);
+2 -7
drivers/net/usb/lan78xx.c
··· 2607 2607 { 2608 2608 u32 tx_cmd_a, tx_cmd_b; 2609 2609 2610 - if (skb_headroom(skb) < TX_OVERHEAD) { 2611 - struct sk_buff *skb2; 2612 - 2613 - skb2 = skb_copy_expand(skb, TX_OVERHEAD, 0, flags); 2610 + if (skb_cow_head(skb, TX_OVERHEAD)) { 2614 2611 dev_kfree_skb_any(skb); 2615 - skb = skb2; 2616 - if (!skb) 2617 - return NULL; 2612 + return NULL; 2618 2613 } 2619 2614 2620 2615 if (lan78xx_linearize(skb) < 0)
+2 -6
drivers/net/usb/smsc75xx.c
··· 2203 2203 { 2204 2204 u32 tx_cmd_a, tx_cmd_b; 2205 2205 2206 - if (skb_headroom(skb) < SMSC75XX_TX_OVERHEAD) { 2207 - struct sk_buff *skb2 = 2208 - skb_copy_expand(skb, SMSC75XX_TX_OVERHEAD, 0, flags); 2206 + if (skb_cow_head(skb, SMSC75XX_TX_OVERHEAD)) { 2209 2207 dev_kfree_skb_any(skb); 2210 - skb = skb2; 2211 - if (!skb) 2212 - return NULL; 2208 + return NULL; 2213 2209 } 2214 2210 2215 2211 tx_cmd_a = (u32)(skb->len & TX_CMD_A_LEN) | TX_CMD_A_FCS;
+6 -6
drivers/net/usb/smsc95xx.c
··· 2001 2001 /* We do not advertise SG, so skbs should be already linearized */ 2002 2002 BUG_ON(skb_shinfo(skb)->nr_frags); 2003 2003 2004 - if (skb_headroom(skb) < overhead) { 2005 - struct sk_buff *skb2 = skb_copy_expand(skb, 2006 - overhead, 0, flags); 2004 + /* Make writable and expand header space by overhead if required */ 2005 + if (skb_cow_head(skb, overhead)) { 2006 + /* Must deallocate here as returning NULL to indicate error 2007 + * means the skb won't be deallocated in the caller. 2008 + */ 2007 2009 dev_kfree_skb_any(skb); 2008 - skb = skb2; 2009 - if (!skb) 2010 - return NULL; 2010 + return NULL; 2011 2011 } 2012 2012 2013 2013 if (csum) {
+2 -7
drivers/net/usb/sr9700.c
··· 456 456 457 457 len = skb->len; 458 458 459 - if (skb_headroom(skb) < SR_TX_OVERHEAD) { 460 - struct sk_buff *skb2; 461 - 462 - skb2 = skb_copy_expand(skb, SR_TX_OVERHEAD, 0, flags); 459 + if (skb_cow_head(skb, SR_TX_OVERHEAD)) { 463 460 dev_kfree_skb_any(skb); 464 - skb = skb2; 465 - if (!skb) 466 - return NULL; 461 + return NULL; 467 462 } 468 463 469 464 __skb_push(skb, SR_TX_OVERHEAD);
+1 -1
include/uapi/linux/ipv6_route.h
··· 35 35 #define RTF_PREF(pref) ((pref) << 27) 36 36 #define RTF_PREF_MASK 0x18000000 37 37 38 - #define RTF_PCPU 0x40000000 38 + #define RTF_PCPU 0x40000000 /* read-only: can not be set by user */ 39 39 #define RTF_LOCAL 0x80000000 40 40 41 41
+8 -2
net/core/netpoll.c
··· 105 105 while ((skb = skb_dequeue(&npinfo->txq))) { 106 106 struct net_device *dev = skb->dev; 107 107 struct netdev_queue *txq; 108 + unsigned int q_index; 108 109 109 110 if (!netif_device_present(dev) || !netif_running(dev)) { 110 111 kfree_skb(skb); 111 112 continue; 112 113 } 113 114 114 - txq = skb_get_tx_queue(dev, skb); 115 - 116 115 local_irq_save(flags); 116 + /* check if skb->queue_mapping is still valid */ 117 + q_index = skb_get_queue_mapping(skb); 118 + if (unlikely(q_index >= dev->real_num_tx_queues)) { 119 + q_index = q_index % dev->real_num_tx_queues; 120 + skb_set_queue_mapping(skb, q_index); 121 + } 122 + txq = netdev_get_tx_queue(dev, q_index); 117 123 HARD_TX_LOCK(dev, txq, smp_processor_id()); 118 124 if (netif_xmit_frozen_or_stopped(txq) || 119 125 netpoll_start_xmit(skb, dev, txq) != NETDEV_TX_OK) {
+14 -4
net/core/skbuff.c
··· 3082 3082 if (sg && csum && (mss != GSO_BY_FRAGS)) { 3083 3083 if (!(features & NETIF_F_GSO_PARTIAL)) { 3084 3084 struct sk_buff *iter; 3085 + unsigned int frag_len; 3085 3086 3086 3087 if (!list_skb || 3087 3088 !net_gso_ok(features, skb_shinfo(head_skb)->gso_type)) 3088 3089 goto normal; 3089 3090 3090 - /* Split the buffer at the frag_list pointer. 3091 - * This is based on the assumption that all 3092 - * buffers in the chain excluding the last 3093 - * containing the same amount of data. 3091 + /* If we get here then all the required 3092 + * GSO features except frag_list are supported. 3093 + * Try to split the SKB to multiple GSO SKBs 3094 + * with no frag_list. 3095 + * Currently we can do that only when the buffers don't 3096 + * have a linear part and all the buffers except 3097 + * the last are of the same length. 3094 3098 */ 3099 + frag_len = list_skb->len; 3095 3100 skb_walk_frags(head_skb, iter) { 3101 + if (frag_len != iter->len && iter->next) 3102 + goto normal; 3096 3103 if (skb_headlen(iter)) 3097 3104 goto normal; 3098 3105 3099 3106 len -= iter->len; 3100 3107 } 3108 + 3109 + if (len != frag_len) 3110 + goto normal; 3101 3111 } 3102 3112 3103 3113 /* GSO partial only requires that we trim off any excess that
-1
net/ipv6/exthdrs.c
··· 388 388 icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, 389 389 ((&hdr->segments_left) - 390 390 skb_network_header(skb))); 391 - kfree_skb(skb); 392 391 return -1; 393 392 } 394 393
+6 -7
net/ipv6/ip6mr.c
··· 774 774 * Delete a VIF entry 775 775 */ 776 776 777 - static int mif6_delete(struct mr6_table *mrt, int vifi, struct list_head *head) 777 + static int mif6_delete(struct mr6_table *mrt, int vifi, int notify, 778 + struct list_head *head) 778 779 { 779 780 struct mif_device *v; 780 781 struct net_device *dev; ··· 821 820 dev->ifindex, &in6_dev->cnf); 822 821 } 823 822 824 - if (v->flags & MIFF_REGISTER) 823 + if ((v->flags & MIFF_REGISTER) && !notify) 825 824 unregister_netdevice_queue(dev, head); 826 825 827 826 dev_put(dev); ··· 1332 1331 struct mr6_table *mrt; 1333 1332 struct mif_device *v; 1334 1333 int ct; 1335 - LIST_HEAD(list); 1336 1334 1337 1335 if (event != NETDEV_UNREGISTER) 1338 1336 return NOTIFY_DONE; ··· 1340 1340 v = &mrt->vif6_table[0]; 1341 1341 for (ct = 0; ct < mrt->maxvif; ct++, v++) { 1342 1342 if (v->dev == dev) 1343 - mif6_delete(mrt, ct, &list); 1343 + mif6_delete(mrt, ct, 1, NULL); 1344 1344 } 1345 1345 } 1346 - unregister_netdevice_many(&list); 1347 1346 1348 1347 return NOTIFY_DONE; 1349 1348 } ··· 1551 1552 for (i = 0; i < mrt->maxvif; i++) { 1552 1553 if (!all && (mrt->vif6_table[i].flags & VIFF_STATIC)) 1553 1554 continue; 1554 - mif6_delete(mrt, i, &list); 1555 + mif6_delete(mrt, i, 0, &list); 1555 1556 } 1556 1557 unregister_netdevice_many(&list); 1557 1558 ··· 1706 1707 if (copy_from_user(&mifi, optval, sizeof(mifi_t))) 1707 1708 return -EFAULT; 1708 1709 rtnl_lock(); 1709 - ret = mif6_delete(mrt, mifi, NULL); 1710 + ret = mif6_delete(mrt, mifi, 0, NULL); 1710 1711 rtnl_unlock(); 1711 1712 return ret; 1712 1713
+4
net/ipv6/route.c
··· 1854 1854 int addr_type; 1855 1855 int err = -EINVAL; 1856 1856 1857 + /* RTF_PCPU is an internal flag; can not be set by userspace */ 1858 + if (cfg->fc_flags & RTF_PCPU) 1859 + goto out; 1860 + 1857 1861 if (cfg->fc_dst_len > 128 || cfg->fc_src_len > 128) 1858 1862 goto out; 1859 1863 #ifndef CONFIG_IPV6_SUBTREES
+3
net/ipv6/seg6.c
··· 53 53 struct sr6_tlv *tlv; 54 54 unsigned int tlv_len; 55 55 56 + if (trailing < sizeof(*tlv)) 57 + return false; 58 + 56 59 tlv = (struct sr6_tlv *)((unsigned char *)srh + tlv_offset); 57 60 tlv_len = sizeof(*tlv) + tlv->len; 58 61
+64 -29
net/key/af_key.c
··· 63 63 } u; 64 64 struct sk_buff *skb; 65 65 } dump; 66 + struct mutex dump_lock; 66 67 }; 68 + 69 + static int parse_sockaddr_pair(struct sockaddr *sa, int ext_len, 70 + xfrm_address_t *saddr, xfrm_address_t *daddr, 71 + u16 *family); 67 72 68 73 static inline struct pfkey_sock *pfkey_sk(struct sock *sk) 69 74 { ··· 144 139 { 145 140 struct netns_pfkey *net_pfkey = net_generic(net, pfkey_net_id); 146 141 struct sock *sk; 142 + struct pfkey_sock *pfk; 147 143 int err; 148 144 149 145 if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) ··· 158 152 sk = sk_alloc(net, PF_KEY, GFP_KERNEL, &key_proto, kern); 159 153 if (sk == NULL) 160 154 goto out; 155 + 156 + pfk = pfkey_sk(sk); 157 + mutex_init(&pfk->dump_lock); 161 158 162 159 sock->ops = &pfkey_ops; 163 160 sock_init_data(sock, sk); ··· 290 281 struct sadb_msg *hdr; 291 282 int rc; 292 283 284 + mutex_lock(&pfk->dump_lock); 285 + if (!pfk->dump.dump) { 286 + rc = 0; 287 + goto out; 288 + } 289 + 293 290 rc = pfk->dump.dump(pfk); 294 - if (rc == -ENOBUFS) 295 - return 0; 291 + if (rc == -ENOBUFS) { 292 + rc = 0; 293 + goto out; 294 + } 296 295 297 296 if (pfk->dump.skb) { 298 - if (!pfkey_can_dump(&pfk->sk)) 299 - return 0; 297 + if (!pfkey_can_dump(&pfk->sk)) { 298 + rc = 0; 299 + goto out; 300 + } 300 301 301 302 hdr = (struct sadb_msg *) pfk->dump.skb->data; 302 303 hdr->sadb_msg_seq = 0; ··· 317 298 } 318 299 319 300 pfkey_terminate_dump(pfk); 301 + 302 + out: 303 + mutex_unlock(&pfk->dump_lock); 320 304 return rc; 321 305 } 322 306 ··· 1815 1793 struct xfrm_address_filter *filter = NULL; 1816 1794 struct pfkey_sock *pfk = pfkey_sk(sk); 1817 1795 1818 - if (pfk->dump.dump != NULL) 1796 + mutex_lock(&pfk->dump_lock); 1797 + if (pfk->dump.dump != NULL) { 1798 + mutex_unlock(&pfk->dump_lock); 1819 1799 return -EBUSY; 1800 + } 1820 1801 1821 1802 proto = pfkey_satype2proto(hdr->sadb_msg_satype); 1822 - if (proto == 0) 1803 + if (proto == 0) { 1804 + mutex_unlock(&pfk->dump_lock); 1823 1805 return -EINVAL; 1806 + } 1824 1807 1825 1808 if (ext_hdrs[SADB_X_EXT_FILTER - 1]) { 1826 1809 struct sadb_x_filter *xfilter = ext_hdrs[SADB_X_EXT_FILTER - 1]; 1827 1810 1828 1811 filter = kmalloc(sizeof(*filter), GFP_KERNEL); 1829 - if (filter == NULL) 1812 + if (filter == NULL) { 1813 + mutex_unlock(&pfk->dump_lock); 1830 1814 return -ENOMEM; 1815 + } 1831 1816 1832 1817 memcpy(&filter->saddr, &xfilter->sadb_x_filter_saddr, 1833 1818 sizeof(xfrm_address_t)); ··· 1850 1821 pfk->dump.dump = pfkey_dump_sa; 1851 1822 pfk->dump.done = pfkey_dump_sa_done; 1852 1823 xfrm_state_walk_init(&pfk->dump.u.state, proto, filter); 1824 + mutex_unlock(&pfk->dump_lock); 1853 1825 1854 1826 return pfkey_do_dump(pfk); 1855 1827 } ··· 1943 1913 1944 1914 /* addresses present only in tunnel mode */ 1945 1915 if (t->mode == XFRM_MODE_TUNNEL) { 1946 - u8 *sa = (u8 *) (rq + 1); 1947 - int family, socklen; 1916 + int err; 1948 1917 1949 - family = pfkey_sockaddr_extract((struct sockaddr *)sa, 1950 - &t->saddr); 1951 - if (!family) 1952 - return -EINVAL; 1953 - 1954 - socklen = pfkey_sockaddr_len(family); 1955 - if (pfkey_sockaddr_extract((struct sockaddr *)(sa + socklen), 1956 - &t->id.daddr) != family) 1957 - return -EINVAL; 1958 - t->encap_family = family; 1918 + err = parse_sockaddr_pair( 1919 + (struct sockaddr *)(rq + 1), 1920 + rq->sadb_x_ipsecrequest_len - sizeof(*rq), 1921 + &t->saddr, &t->id.daddr, &t->encap_family); 1922 + if (err) 1923 + return err; 1959 1924 } else 1960 1925 t->encap_family = xp->family; 1961 1926 ··· 1970 1945 if (pol->sadb_x_policy_len * 8 < sizeof(struct sadb_x_policy)) 1971 1946 return -EINVAL; 1972 1947 1973 - while (len >= sizeof(struct sadb_x_ipsecrequest)) { 1948 + while (len >= sizeof(*rq)) { 1949 + if (len < rq->sadb_x_ipsecrequest_len || 1950 + rq->sadb_x_ipsecrequest_len < sizeof(*rq)) 1951 + return -EINVAL; 1952 + 1974 1953 if ((err = parse_ipsecrequest(xp, rq)) < 0) 1975 1954 return err; 1976 1955 len -= rq->sadb_x_ipsecrequest_len; ··· 2437 2408 return err; 2438 2409 } 2439 2410 2440 - #ifdef CONFIG_NET_KEY_MIGRATE 2441 2411 static int pfkey_sockaddr_pair_size(sa_family_t family) 2442 2412 { 2443 2413 return PFKEY_ALIGN8(pfkey_sockaddr_len(family) * 2); ··· 2448 2420 { 2449 2421 int af, socklen; 2450 2422 2451 - if (ext_len < pfkey_sockaddr_pair_size(sa->sa_family)) 2423 + if (ext_len < 2 || ext_len < pfkey_sockaddr_pair_size(sa->sa_family)) 2452 2424 return -EINVAL; 2453 2425 2454 2426 af = pfkey_sockaddr_extract(sa, saddr); ··· 2464 2436 return 0; 2465 2437 } 2466 2438 2439 + #ifdef CONFIG_NET_KEY_MIGRATE 2467 2440 static int ipsecrequests_to_migrate(struct sadb_x_ipsecrequest *rq1, int len, 2468 2441 struct xfrm_migrate *m) 2469 2442 { ··· 2472 2443 struct sadb_x_ipsecrequest *rq2; 2473 2444 int mode; 2474 2445 2475 - if (len <= sizeof(struct sadb_x_ipsecrequest) || 2476 - len < rq1->sadb_x_ipsecrequest_len) 2446 + if (len < sizeof(*rq1) || 2447 + len < rq1->sadb_x_ipsecrequest_len || 2448 + rq1->sadb_x_ipsecrequest_len < sizeof(*rq1)) 2477 2449 return -EINVAL; 2478 2450 2479 2451 /* old endoints */ 2480 2452 err = parse_sockaddr_pair((struct sockaddr *)(rq1 + 1), 2481 - rq1->sadb_x_ipsecrequest_len, 2453 + rq1->sadb_x_ipsecrequest_len - sizeof(*rq1), 2482 2454 &m->old_saddr, &m->old_daddr, 2483 2455 &m->old_family); 2484 2456 if (err) ··· 2488 2458 rq2 = (struct sadb_x_ipsecrequest *)((u8 *)rq1 + rq1->sadb_x_ipsecrequest_len); 2489 2459 len -= rq1->sadb_x_ipsecrequest_len; 2490 2460 2491 - if (len <= sizeof(struct sadb_x_ipsecrequest) || 2492 - len < rq2->sadb_x_ipsecrequest_len) 2461 + if (len <= sizeof(*rq2) || 2462 + len < rq2->sadb_x_ipsecrequest_len || 2463 + rq2->sadb_x_ipsecrequest_len < sizeof(*rq2)) 2493 2464 return -EINVAL; 2494 2465 2495 2466 /* new endpoints */ 2496 2467 err = parse_sockaddr_pair((struct sockaddr *)(rq2 + 1), 2497 - rq2->sadb_x_ipsecrequest_len, 2468 + rq2->sadb_x_ipsecrequest_len - sizeof(*rq2), 2498 2469 &m->new_saddr, &m->new_daddr, 2499 2470 &m->new_family); 2500 2471 if (err) ··· 2710 2679 { 2711 2680 struct pfkey_sock *pfk = pfkey_sk(sk); 2712 2681 2713 - if (pfk->dump.dump != NULL) 2682 + mutex_lock(&pfk->dump_lock); 2683 + if (pfk->dump.dump != NULL) { 2684 + mutex_unlock(&pfk->dump_lock); 2714 2685 return -EBUSY; 2686 + } 2715 2687 2716 2688 pfk->dump.msg_version = hdr->sadb_msg_version; 2717 2689 pfk->dump.msg_portid = hdr->sadb_msg_pid; 2718 2690 pfk->dump.dump = pfkey_dump_sp; 2719 2691 pfk->dump.done = pfkey_dump_sp_done; 2720 2692 xfrm_policy_walk_init(&pfk->dump.u.policy, XFRM_POLICY_TYPE_MAIN); 2693 + mutex_unlock(&pfk->dump_lock); 2721 2694 2722 2695 return pfkey_do_dump(pfk); 2723 2696 }
+68 -18
net/mac80211/rx.c
··· 208 208 return len; 209 209 } 210 210 211 + static void ieee80211_handle_mu_mimo_mon(struct ieee80211_sub_if_data *sdata, 212 + struct sk_buff *skb, 213 + int rtap_vendor_space) 214 + { 215 + struct { 216 + struct ieee80211_hdr_3addr hdr; 217 + u8 category; 218 + u8 action_code; 219 + } __packed action; 220 + 221 + if (!sdata) 222 + return; 223 + 224 + BUILD_BUG_ON(sizeof(action) != IEEE80211_MIN_ACTION_SIZE + 1); 225 + 226 + if (skb->len < rtap_vendor_space + sizeof(action) + 227 + VHT_MUMIMO_GROUPS_DATA_LEN) 228 + return; 229 + 230 + if (!is_valid_ether_addr(sdata->u.mntr.mu_follow_addr)) 231 + return; 232 + 233 + skb_copy_bits(skb, rtap_vendor_space, &action, sizeof(action)); 234 + 235 + if (!ieee80211_is_action(action.hdr.frame_control)) 236 + return; 237 + 238 + if (action.category != WLAN_CATEGORY_VHT) 239 + return; 240 + 241 + if (action.action_code != WLAN_VHT_ACTION_GROUPID_MGMT) 242 + return; 243 + 244 + if (!ether_addr_equal(action.hdr.addr1, sdata->u.mntr.mu_follow_addr)) 245 + return; 246 + 247 + skb = skb_copy(skb, GFP_ATOMIC); 248 + if (!skb) 249 + return; 250 + 251 + skb->pkt_type = IEEE80211_SDATA_QUEUE_TYPE_FRAME; 252 + skb_queue_tail(&sdata->skb_queue, skb); 253 + ieee80211_queue_work(&sdata->local->hw, &sdata->work); 254 + } 255 + 211 256 /* 212 257 * ieee80211_add_rx_radiotap_header - add radiotap header 213 258 * ··· 560 515 struct net_device *prev_dev = NULL; 561 516 int present_fcs_len = 0; 562 517 unsigned int rtap_vendor_space = 0; 563 - struct ieee80211_mgmt *mgmt; 564 518 struct ieee80211_sub_if_data *monitor_sdata = 565 519 rcu_dereference(local->monitor_sdata); 566 520 ··· 596 552 597 553 return remove_monitor_info(local, origskb, rtap_vendor_space); 598 554 } 555 + 556 + ieee80211_handle_mu_mimo_mon(monitor_sdata, origskb, rtap_vendor_space); 599 557 600 558 /* room for the radiotap header based on driver features */ 601 559 rt_hdrlen = ieee80211_rx_radiotap_hdrlen(local, status, origskb); ··· 662 616 663 617 prev_dev = sdata->dev; 664 618 ieee80211_rx_stats(sdata->dev, skb->len); 665 - } 666 - 667 - mgmt = (void *)skb->data; 668 - if (monitor_sdata && 669 - skb->len >= IEEE80211_MIN_ACTION_SIZE + 1 + VHT_MUMIMO_GROUPS_DATA_LEN && 670 - ieee80211_is_action(mgmt->frame_control) && 671 - mgmt->u.action.category == WLAN_CATEGORY_VHT && 672 - mgmt->u.action.u.vht_group_notif.action_code == WLAN_VHT_ACTION_GROUPID_MGMT && 673 - is_valid_ether_addr(monitor_sdata->u.mntr.mu_follow_addr) && 674 - ether_addr_equal(mgmt->da, monitor_sdata->u.mntr.mu_follow_addr)) { 675 - struct sk_buff *mu_skb = skb_copy(skb, GFP_ATOMIC); 676 - 677 - if (mu_skb) { 678 - mu_skb->pkt_type = IEEE80211_SDATA_QUEUE_TYPE_FRAME; 679 - skb_queue_tail(&monitor_sdata->skb_queue, mu_skb); 680 - ieee80211_queue_work(&local->hw, &monitor_sdata->work); 681 - } 682 619 } 683 620 684 621 if (prev_dev) { ··· 3639 3610 !ether_addr_equal(bssid, hdr->addr1)) 3640 3611 return false; 3641 3612 } 3613 + 3614 + /* 3615 + * 802.11-2016 Table 9-26 says that for data frames, A1 must be 3616 + * the BSSID - we've checked that already but may have accepted 3617 + * the wildcard (ff:ff:ff:ff:ff:ff). 3618 + * 3619 + * It also says: 3620 + * The BSSID of the Data frame is determined as follows: 3621 + * a) If the STA is contained within an AP or is associated 3622 + * with an AP, the BSSID is the address currently in use 3623 + * by the STA contained in the AP. 3624 + * 3625 + * So we should not accept data frames with an address that's 3626 + * multicast. 3627 + * 3628 + * Accepting it also opens a security problem because stations 3629 + * could encrypt it with the GTK and inject traffic that way. 3630 + */ 3631 + if (ieee80211_is_data(hdr->frame_control) && multicast) 3632 + return false; 3633 + 3642 3634 return true; 3643 3635 case NL80211_IFTYPE_WDS: 3644 3636 if (bssid || !ieee80211_is_data(hdr->frame_control))
+3 -1
net/qrtr/qrtr.c
··· 658 658 } 659 659 660 660 if (plen != len) { 661 - skb_pad(skb, plen - len); 661 + rc = skb_pad(skb, plen - len); 662 + if (rc) 663 + goto out_node; 662 664 skb_put(skb, plen - len); 663 665 } 664 666
+32 -23
net/sched/act_api.c
··· 529 529 return err; 530 530 } 531 531 532 - static int nla_memdup_cookie(struct tc_action *a, struct nlattr **tb) 532 + static struct tc_cookie *nla_memdup_cookie(struct nlattr **tb) 533 533 { 534 - a->act_cookie = kzalloc(sizeof(*a->act_cookie), GFP_KERNEL); 535 - if (!a->act_cookie) 536 - return -ENOMEM; 534 + struct tc_cookie *c = kzalloc(sizeof(*c), GFP_KERNEL); 535 + if (!c) 536 + return NULL; 537 537 538 - a->act_cookie->data = nla_memdup(tb[TCA_ACT_COOKIE], GFP_KERNEL); 539 - if (!a->act_cookie->data) { 540 - kfree(a->act_cookie); 541 - return -ENOMEM; 538 + c->data = nla_memdup(tb[TCA_ACT_COOKIE], GFP_KERNEL); 539 + if (!c->data) { 540 + kfree(c); 541 + return NULL; 542 542 } 543 - a->act_cookie->len = nla_len(tb[TCA_ACT_COOKIE]); 543 + c->len = nla_len(tb[TCA_ACT_COOKIE]); 544 544 545 - return 0; 545 + return c; 546 546 } 547 547 548 548 struct tc_action *tcf_action_init_1(struct net *net, struct nlattr *nla, ··· 551 551 { 552 552 struct tc_action *a; 553 553 struct tc_action_ops *a_o; 554 + struct tc_cookie *cookie = NULL; 554 555 char act_name[IFNAMSIZ]; 555 556 struct nlattr *tb[TCA_ACT_MAX + 1]; 556 557 struct nlattr *kind; ··· 567 566 goto err_out; 568 567 if (nla_strlcpy(act_name, kind, IFNAMSIZ) >= IFNAMSIZ) 569 568 goto err_out; 569 + if (tb[TCA_ACT_COOKIE]) { 570 + int cklen = nla_len(tb[TCA_ACT_COOKIE]); 571 + 572 + if (cklen > TC_COOKIE_MAX_SIZE) 573 + goto err_out; 574 + 575 + cookie = nla_memdup_cookie(tb); 576 + if (!cookie) { 577 + err = -ENOMEM; 578 + goto err_out; 579 + } 580 + } 570 581 } else { 571 582 err = -EINVAL; 572 583 if (strlcpy(act_name, name, IFNAMSIZ) >= IFNAMSIZ) ··· 617 604 if (err < 0) 618 605 goto err_mod; 619 606 620 - if (tb[TCA_ACT_COOKIE]) { 621 - int cklen = nla_len(tb[TCA_ACT_COOKIE]); 622 - 623 - if (cklen > TC_COOKIE_MAX_SIZE) { 624 - err = -EINVAL; 625 - tcf_hash_release(a, bind); 626 - goto err_mod; 607 + if (name == NULL && tb[TCA_ACT_COOKIE]) { 608 + if (a->act_cookie) { 609 + kfree(a->act_cookie->data); 610 + kfree(a->act_cookie); 627 611 } 628 - 629 - if (nla_memdup_cookie(a, tb) < 0) { 630 - err = -ENOMEM; 631 - tcf_hash_release(a, bind); 632 - goto err_mod; 633 - } 612 + a->act_cookie = cookie; 634 613 } 635 614 636 615 /* module count goes up only when brand new policy is created ··· 637 632 err_mod: 638 633 module_put(a_o->owner); 639 634 err_out: 635 + if (cookie) { 636 + kfree(cookie->data); 637 + kfree(cookie); 638 + } 640 639 return ERR_PTR(err); 641 640 } 642 641
+2 -2
tools/testing/selftests/bpf/test_maps.c
··· 282 282 { 283 283 unsigned int nr_cpus = bpf_num_possible_cpus(); 284 284 int key, next_key, fd, i; 285 - long values[nr_cpus]; 285 + long long values[nr_cpus]; 286 286 287 287 fd = bpf_create_map(BPF_MAP_TYPE_PERCPU_ARRAY, sizeof(key), 288 288 sizeof(values[0]), 2, 0); ··· 340 340 * allocator more than anything else 341 341 */ 342 342 unsigned int nr_keys = 2000; 343 - long values[nr_cpus]; 343 + long long values[nr_cpus]; 344 344 int key, fd, i; 345 345 346 346 fd = bpf_create_map(BPF_MAP_TYPE_PERCPU_ARRAY, sizeof(key),
+20 -2
tools/testing/selftests/net/psock_fanout.c
··· 75 75 { 76 76 int fd, val; 77 77 78 - fd = socket(PF_PACKET, SOCK_DGRAM, htons(ETH_P_IP)); 78 + fd = socket(PF_PACKET, SOCK_RAW, htons(ETH_P_IP)); 79 79 if (fd < 0) { 80 80 perror("socket packet"); 81 81 exit(1); ··· 93 93 94 94 pair_udp_setfilter(fd); 95 95 return fd; 96 + } 97 + 98 + static void sock_fanout_set_cbpf(int fd) 99 + { 100 + struct sock_filter bpf_filter[] = { 101 + BPF_STMT(BPF_LD+BPF_B+BPF_ABS, 80), /* ldb [80] */ 102 + BPF_STMT(BPF_RET+BPF_A, 0), /* ret A */ 103 + }; 104 + struct sock_fprog bpf_prog; 105 + 106 + bpf_prog.filter = bpf_filter; 107 + bpf_prog.len = sizeof(bpf_filter) / sizeof(struct sock_filter); 108 + 109 + if (setsockopt(fd, SOL_PACKET, PACKET_FANOUT_DATA, &bpf_prog, 110 + sizeof(bpf_prog))) { 111 + perror("fanout data cbpf"); 112 + exit(1); 113 + } 96 114 } 97 115 98 116 static void sock_fanout_set_ebpf(int fd) ··· 288 270 exit(1); 289 271 } 290 272 if (type == PACKET_FANOUT_CBPF) 291 - sock_setfilter(fds[0], SOL_PACKET, PACKET_FANOUT_DATA); 273 + sock_fanout_set_cbpf(fds[0]); 292 274 else if (type == PACKET_FANOUT_EBPF) 293 275 sock_fanout_set_ebpf(fds[0]); 294 276
+3 -10
tools/testing/selftests/net/psock_lib.h
··· 38 38 # define __maybe_unused __attribute__ ((__unused__)) 39 39 #endif 40 40 41 - static __maybe_unused void sock_setfilter(int fd, int lvl, int optnum) 41 + static __maybe_unused void pair_udp_setfilter(int fd) 42 42 { 43 43 /* the filter below checks for all of the following conditions that 44 44 * are based on the contents of create_payload() ··· 76 76 }; 77 77 struct sock_fprog bpf_prog; 78 78 79 - if (lvl == SOL_PACKET && optnum == PACKET_FANOUT_DATA) 80 - bpf_filter[5].code = 0x16; /* RET A */ 81 - 82 79 bpf_prog.filter = bpf_filter; 83 80 bpf_prog.len = sizeof(bpf_filter) / sizeof(struct sock_filter); 84 - if (setsockopt(fd, lvl, optnum, &bpf_prog, 81 + 82 + if (setsockopt(fd, SOL_SOCKET, SO_ATTACH_FILTER, &bpf_prog, 85 83 sizeof(bpf_prog))) { 86 84 perror("setsockopt SO_ATTACH_FILTER"); 87 85 exit(1); 88 86 } 89 - } 90 - 91 - static __maybe_unused void pair_udp_setfilter(int fd) 92 - { 93 - sock_setfilter(fd, SOL_SOCKET, SO_ATTACH_FILTER); 94 87 } 95 88 96 89 static __maybe_unused void pair_udp_open(int fds[], uint16_t port)