Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

Pull networking fixes from David Miller:

1) Fix memory leak in nftables, from Liping Zhang.

2) Need to check result of vlan_insert_tag() in batman-adv otherwise we
risk NULL skb derefs, from Sven Eckelmann.

3) Check for dev_alloc_skb() failures in cfg80211, from Gregory
Greenman.

4) Handle properly when we have ppp_unregister_channel() happening in
parallel with ppp_connect_channel(), from WANG Cong.

5) Fix DCCP deadlock, from Eric Dumazet.

6) Bail out properly in UDP if sk_filter() truncates the packet to be
smaller than even the space that the protocol headers need. From
Michal Kubecek.

7) Similarly for rose, dccp, and sctp, from Willem de Bruijn.

8) Make TCP challenge ACKs less predictable, from Eric Dumazet.

9) Fix infinite loop in bgmac_dma_tx_add() from Florian Fainelli.

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (65 commits)
packet: propagate sock_cmsg_send() error
net/mlx5e: Fix del vxlan port command buffer memset
packet: fix second argument of sock_tx_timestamp()
net: switchdev: change ageing_time type to clock_t
Update maintainer for EHEA driver.
net/mlx4_en: Add resilience in low memory systems
net/mlx4_en: Move filters cleanup to a proper location
sctp: load transport header after sk_filter
net/sched/sch_htb: clamp xstats tokens to fit into 32-bit int
net: cavium: liquidio: Avoid dma_unmap_single on uninitialized ndata
net: nb8800: Fix SKB leak in nb8800_receive()
et131x: Fix logical vs bitwise check in et131x_tx_timeout()
vlan: use a valid default mtu value for vlan over macsec
net: bgmac: Fix infinite loop in bgmac_dma_tx_add()
mlxsw: spectrum: Prevent invalid ingress buffer mapping
mlxsw: spectrum: Prevent overwrite of DCB capability fields
mlxsw: spectrum: Don't emit errors when PFC is disabled
mlxsw: spectrum: Indicate support for autonegotiation
mlxsw: spectrum: Force link training according to admin state
r8152: add MODULE_VERSION
...

+848 -318
+1 -1
MAINTAINERS
··· 4476 4476 F: fs/efs/ 4477 4477 4478 4478 EHEA (IBM pSeries eHEA 10Gb ethernet adapter) DRIVER 4479 - M: Thadeu Lima de Souza Cascardo <cascardo@linux.vnet.ibm.com> 4479 + M: Douglas Miller <dougmill@linux.vnet.ibm.com> 4480 4480 L: netdev@vger.kernel.org 4481 4481 S: Maintained 4482 4482 F: drivers/net/ethernet/ibm/ehea/
+5 -1
drivers/net/bonding/bond_netlink.c
··· 446 446 if (err < 0) 447 447 return err; 448 448 449 - return register_netdevice(bond_dev); 449 + err = register_netdevice(bond_dev); 450 + 451 + netif_carrier_off(bond_dev); 452 + 453 + return err; 450 454 } 451 455 452 456 static size_t bond_get_size(const struct net_device *bond_dev)
+1 -1
drivers/net/ethernet/agere/et131x.c
··· 3851 3851 unsigned long flags; 3852 3852 3853 3853 /* If the device is closed, ignore the timeout */ 3854 - if (~(adapter->flags & FMP_ADAPTER_INTERRUPT_IN_USE)) 3854 + if (!(adapter->flags & FMP_ADAPTER_INTERRUPT_IN_USE)) 3855 3855 return; 3856 3856 3857 3857 /* Any nonrecoverable hardware error?
+1
drivers/net/ethernet/aurora/nb8800.c
··· 259 259 if (err) { 260 260 netdev_err(dev, "rx buffer allocation failed\n"); 261 261 dev->stats.rx_dropped++; 262 + dev_kfree_skb(skb); 262 263 return; 263 264 } 264 265
+1 -1
drivers/net/ethernet/broadcom/bgmac.c
··· 231 231 dma_unmap_single(dma_dev, slot->dma_addr, skb_headlen(skb), 232 232 DMA_TO_DEVICE); 233 233 234 - while (i > 0) { 234 + while (i-- > 0) { 235 235 int index = (ring->end + i) % BGMAC_TX_RING_SLOTS; 236 236 struct bgmac_slot_info *slot = &ring->slots[index]; 237 237 u32 ctl1 = le32_to_cpu(ring->cpu_base[index].ctl1);
+1 -1
drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
··· 1591 1591 { 1592 1592 struct bnxt *bp = netdev_priv(dev); 1593 1593 u16 start = eeprom->offset, length = eeprom->len; 1594 - int rc; 1594 + int rc = 0; 1595 1595 1596 1596 memset(data, 0, eeprom->len); 1597 1597
+5 -4
drivers/net/ethernet/cavium/liquidio/lio_main.c
··· 2821 2821 if (!g) { 2822 2822 netif_info(lio, tx_err, lio->netdev, 2823 2823 "Transmit scatter gather: glist null!\n"); 2824 - goto lio_xmit_failed; 2824 + goto lio_xmit_dma_failed; 2825 2825 } 2826 2826 2827 2827 cmdsetup.s.gather = 1; ··· 2892 2892 else 2893 2893 status = octnet_send_nic_data_pkt(oct, &ndata, xmit_more); 2894 2894 if (status == IQ_SEND_FAILED) 2895 - goto lio_xmit_failed; 2895 + goto lio_xmit_dma_failed; 2896 2896 2897 2897 netif_info(lio, tx_queued, lio->netdev, "Transmit queued successfully\n"); 2898 2898 ··· 2906 2906 2907 2907 return NETDEV_TX_OK; 2908 2908 2909 + lio_xmit_dma_failed: 2910 + dma_unmap_single(&oct->pci_dev->dev, ndata.cmd.dptr, 2911 + ndata.datasize, DMA_TO_DEVICE); 2909 2912 lio_xmit_failed: 2910 2913 stats->tx_dropped++; 2911 2914 netif_info(lio, tx_err, lio->netdev, "IQ%d Transmit dropped:%llu\n", 2912 2915 iq_no, stats->tx_dropped); 2913 - dma_unmap_single(&oct->pci_dev->dev, ndata.cmd.dptr, 2914 - ndata.datasize, DMA_TO_DEVICE); 2915 2916 recv_buffer_free(skb); 2916 2917 return NETDEV_TX_OK; 2917 2918 }
+11 -5
drivers/net/ethernet/ethoc.c
··· 860 860 unsigned int entry; 861 861 void *dest; 862 862 863 + if (skb_put_padto(skb, ETHOC_ZLEN)) { 864 + dev->stats.tx_errors++; 865 + goto out_no_free; 866 + } 867 + 863 868 if (unlikely(skb->len > ETHOC_BUFSIZ)) { 864 869 dev->stats.tx_errors++; 865 870 goto out; ··· 899 894 skb_tx_timestamp(skb); 900 895 out: 901 896 dev_kfree_skb(skb); 897 + out_no_free: 902 898 return NETDEV_TX_OK; 903 899 } 904 900 ··· 1092 1086 if (!priv->iobase) { 1093 1087 dev_err(&pdev->dev, "cannot remap I/O memory space\n"); 1094 1088 ret = -ENXIO; 1095 - goto error; 1089 + goto free; 1096 1090 } 1097 1091 1098 1092 if (netdev->mem_end) { ··· 1101 1095 if (!priv->membase) { 1102 1096 dev_err(&pdev->dev, "cannot remap memory space\n"); 1103 1097 ret = -ENXIO; 1104 - goto error; 1098 + goto free; 1105 1099 } 1106 1100 } else { 1107 1101 /* Allocate buffer memory */ ··· 1112 1106 dev_err(&pdev->dev, "cannot allocate %dB buffer\n", 1113 1107 buffer_size); 1114 1108 ret = -ENOMEM; 1115 - goto error; 1109 + goto free; 1116 1110 } 1117 1111 netdev->mem_end = netdev->mem_start + buffer_size; 1118 1112 priv->dma_alloc = buffer_size; ··· 1126 1120 128, (netdev->mem_end - netdev->mem_start + 1) / ETHOC_BUFSIZ); 1127 1121 if (num_bd < 4) { 1128 1122 ret = -ENODEV; 1129 - goto error; 1123 + goto free; 1130 1124 } 1131 1125 priv->num_bd = num_bd; 1132 1126 /* num_tx must be a power of two */ ··· 1139 1133 priv->vma = devm_kzalloc(&pdev->dev, num_bd*sizeof(void *), GFP_KERNEL); 1140 1134 if (!priv->vma) { 1141 1135 ret = -ENOMEM; 1142 - goto error; 1136 + goto free; 1143 1137 } 1144 1138 1145 1139 /* Allow the platform setup code to pass in a MAC address. */
+1
drivers/net/ethernet/ezchip/nps_enet.c
··· 285 285 ge_rst_value |= NPS_ENET_ENABLE << RST_GMAC_0_SHIFT; 286 286 nps_enet_reg_set(priv, NPS_ENET_REG_GE_RST, ge_rst_value); 287 287 usleep_range(10, 20); 288 + ge_rst_value = 0; 288 289 nps_enet_reg_set(priv, NPS_ENET_REG_GE_RST, ge_rst_value); 289 290 290 291 /* Tx fifo reset sequence */
+167 -60
drivers/net/ethernet/ibm/ibmvnic.c
··· 75 75 #include <linux/uaccess.h> 76 76 #include <asm/firmware.h> 77 77 #include <linux/seq_file.h> 78 + #include <linux/workqueue.h> 78 79 79 80 #include "ibmvnic.h" 80 81 ··· 90 89 static int ibmvnic_version = IBMVNIC_INITIAL_VERSION; 91 90 static int ibmvnic_remove(struct vio_dev *); 92 91 static void release_sub_crqs(struct ibmvnic_adapter *); 92 + static void release_sub_crqs_no_irqs(struct ibmvnic_adapter *); 93 93 static int ibmvnic_reset_crq(struct ibmvnic_adapter *); 94 94 static int ibmvnic_send_crq_init(struct ibmvnic_adapter *); 95 95 static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *); ··· 471 469 crq.logical_link_state.link_state = IBMVNIC_LOGICAL_LNK_UP; 472 470 ibmvnic_send_crq(adapter, &crq); 473 471 474 - netif_start_queue(netdev); 472 + netif_tx_start_all_queues(netdev); 473 + 475 474 return 0; 476 475 477 476 bounce_map_failed: ··· 522 519 for (i = 0; i < adapter->req_rx_queues; i++) 523 520 napi_disable(&adapter->napi[i]); 524 521 525 - netif_stop_queue(netdev); 522 + netif_tx_stop_all_queues(netdev); 526 523 527 524 if (adapter->bounce_buffer) { 528 525 if (!dma_mapping_error(dev, adapter->bounce_buffer_dma)) { ··· 1215 1212 goto reg_failed; 1216 1213 } 1217 1214 1218 - scrq->irq = irq_create_mapping(NULL, scrq->hw_irq); 1219 - if (scrq->irq == NO_IRQ) { 1220 - dev_err(dev, "Error mapping irq\n"); 1221 - goto map_irq_failed; 1222 - } 1223 - 1224 1215 scrq->adapter = adapter; 1225 1216 scrq->size = 4 * PAGE_SIZE / sizeof(*scrq->msgs); 1226 1217 scrq->cur = 0; ··· 1227 1230 1228 1231 return scrq; 1229 1232 1230 - map_irq_failed: 1231 - do { 1232 - rc = plpar_hcall_norets(H_FREE_SUB_CRQ, 1233 - adapter->vdev->unit_address, 1234 - scrq->crq_num); 1235 - } while (rc == H_BUSY || H_IS_LONG_BUSY(rc)); 1236 1233 reg_failed: 1237 1234 dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE, 1238 1235 DMA_BIDIRECTIONAL); ··· 1247 1256 if (adapter->tx_scrq[i]) { 1248 1257 free_irq(adapter->tx_scrq[i]->irq, 1249 1258 adapter->tx_scrq[i]); 1259 + irq_dispose_mapping(adapter->tx_scrq[i]->irq); 1250 1260 release_sub_crq_queue(adapter, 1251 1261 adapter->tx_scrq[i]); 1252 1262 } ··· 1259 1267 if (adapter->rx_scrq[i]) { 1260 1268 free_irq(adapter->rx_scrq[i]->irq, 1261 1269 adapter->rx_scrq[i]); 1270 + irq_dispose_mapping(adapter->rx_scrq[i]->irq); 1262 1271 release_sub_crq_queue(adapter, 1263 1272 adapter->rx_scrq[i]); 1264 1273 } 1274 + adapter->rx_scrq = NULL; 1275 + } 1276 + 1277 + adapter->requested_caps = 0; 1278 + } 1279 + 1280 + static void release_sub_crqs_no_irqs(struct ibmvnic_adapter *adapter) 1281 + { 1282 + int i; 1283 + 1284 + if (adapter->tx_scrq) { 1285 + for (i = 0; i < adapter->req_tx_queues; i++) 1286 + if (adapter->tx_scrq[i]) 1287 + release_sub_crq_queue(adapter, 1288 + adapter->tx_scrq[i]); 1289 + adapter->tx_scrq = NULL; 1290 + } 1291 + 1292 + if (adapter->rx_scrq) { 1293 + for (i = 0; i < adapter->req_rx_queues; i++) 1294 + if (adapter->rx_scrq[i]) 1295 + release_sub_crq_queue(adapter, 1296 + adapter->rx_scrq[i]); 1265 1297 adapter->rx_scrq = NULL; 1266 1298 } 1267 1299 ··· 1411 1395 return IRQ_HANDLED; 1412 1396 } 1413 1397 1398 + static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter) 1399 + { 1400 + struct device *dev = &adapter->vdev->dev; 1401 + struct ibmvnic_sub_crq_queue *scrq; 1402 + int i = 0, j = 0; 1403 + int rc = 0; 1404 + 1405 + for (i = 0; i < adapter->req_tx_queues; i++) { 1406 + scrq = adapter->tx_scrq[i]; 1407 + scrq->irq = irq_create_mapping(NULL, scrq->hw_irq); 1408 + 1409 + if (scrq->irq == NO_IRQ) { 1410 + rc = -EINVAL; 1411 + dev_err(dev, "Error mapping irq\n"); 1412 + goto req_tx_irq_failed; 1413 + } 1414 + 1415 + rc = request_irq(scrq->irq, ibmvnic_interrupt_tx, 1416 + 0, "ibmvnic_tx", scrq); 1417 + 1418 + if (rc) { 1419 + dev_err(dev, "Couldn't register tx irq 0x%x. rc=%d\n", 1420 + scrq->irq, rc); 1421 + irq_dispose_mapping(scrq->irq); 1422 + goto req_rx_irq_failed; 1423 + } 1424 + } 1425 + 1426 + for (i = 0; i < adapter->req_rx_queues; i++) { 1427 + scrq = adapter->rx_scrq[i]; 1428 + scrq->irq = irq_create_mapping(NULL, scrq->hw_irq); 1429 + if (scrq->irq == NO_IRQ) { 1430 + rc = -EINVAL; 1431 + dev_err(dev, "Error mapping irq\n"); 1432 + goto req_rx_irq_failed; 1433 + } 1434 + rc = request_irq(scrq->irq, ibmvnic_interrupt_rx, 1435 + 0, "ibmvnic_rx", scrq); 1436 + if (rc) { 1437 + dev_err(dev, "Couldn't register rx irq 0x%x. rc=%d\n", 1438 + scrq->irq, rc); 1439 + irq_dispose_mapping(scrq->irq); 1440 + goto req_rx_irq_failed; 1441 + } 1442 + } 1443 + return rc; 1444 + 1445 + req_rx_irq_failed: 1446 + for (j = 0; j < i; j++) 1447 + free_irq(adapter->rx_scrq[j]->irq, adapter->rx_scrq[j]); 1448 + irq_dispose_mapping(adapter->rx_scrq[j]->irq); 1449 + i = adapter->req_tx_queues; 1450 + req_tx_irq_failed: 1451 + for (j = 0; j < i; j++) 1452 + free_irq(adapter->tx_scrq[j]->irq, adapter->tx_scrq[j]); 1453 + irq_dispose_mapping(adapter->rx_scrq[j]->irq); 1454 + release_sub_crqs_no_irqs(adapter); 1455 + return rc; 1456 + } 1457 + 1414 1458 static void init_sub_crqs(struct ibmvnic_adapter *adapter, int retry) 1415 1459 { 1416 1460 struct device *dev = &adapter->vdev->dev; ··· 1479 1403 union ibmvnic_crq crq; 1480 1404 int total_queues; 1481 1405 int more = 0; 1482 - int i, j; 1483 - int rc; 1406 + int i; 1484 1407 1485 1408 if (!retry) { 1486 1409 /* Sub-CRQ entries are 32 byte long */ ··· 1558 1483 for (i = 0; i < adapter->req_tx_queues; i++) { 1559 1484 adapter->tx_scrq[i] = allqueues[i]; 1560 1485 adapter->tx_scrq[i]->pool_index = i; 1561 - rc = request_irq(adapter->tx_scrq[i]->irq, ibmvnic_interrupt_tx, 1562 - 0, "ibmvnic_tx", adapter->tx_scrq[i]); 1563 - if (rc) { 1564 - dev_err(dev, "Couldn't register tx irq 0x%x. rc=%d\n", 1565 - adapter->tx_scrq[i]->irq, rc); 1566 - goto req_tx_irq_failed; 1567 - } 1568 1486 } 1569 1487 1570 1488 adapter->rx_scrq = kcalloc(adapter->req_rx_queues, ··· 1568 1500 for (i = 0; i < adapter->req_rx_queues; i++) { 1569 1501 adapter->rx_scrq[i] = allqueues[i + adapter->req_tx_queues]; 1570 1502 adapter->rx_scrq[i]->scrq_num = i; 1571 - rc = request_irq(adapter->rx_scrq[i]->irq, ibmvnic_interrupt_rx, 1572 - 0, "ibmvnic_rx", adapter->rx_scrq[i]); 1573 - if (rc) { 1574 - dev_err(dev, "Couldn't register rx irq 0x%x. rc=%d\n", 1575 - adapter->rx_scrq[i]->irq, rc); 1576 - goto req_rx_irq_failed; 1577 - } 1578 1503 } 1579 1504 1580 1505 memset(&crq, 0, sizeof(crq)); ··· 1620 1559 1621 1560 return; 1622 1561 1623 - req_rx_irq_failed: 1624 - for (j = 0; j < i; j++) 1625 - free_irq(adapter->rx_scrq[j]->irq, adapter->rx_scrq[j]); 1626 - i = adapter->req_tx_queues; 1627 - req_tx_irq_failed: 1628 - for (j = 0; j < i; j++) 1629 - free_irq(adapter->tx_scrq[j]->irq, adapter->tx_scrq[j]); 1630 - kfree(adapter->rx_scrq); 1631 - adapter->rx_scrq = NULL; 1632 1562 rx_failed: 1633 1563 kfree(adapter->tx_scrq); 1634 1564 adapter->tx_scrq = NULL; ··· 2400 2348 *req_value, 2401 2349 (long int)be32_to_cpu(crq->request_capability_rsp. 2402 2350 number), name); 2403 - release_sub_crqs(adapter); 2351 + release_sub_crqs_no_irqs(adapter); 2404 2352 *req_value = be32_to_cpu(crq->request_capability_rsp.number); 2405 - complete(&adapter->init_done); 2353 + init_sub_crqs(adapter, 1); 2406 2354 return; 2407 2355 default: 2408 2356 dev_err(dev, "Error %d in request cap rsp\n", ··· 2711 2659 2712 2660 out: 2713 2661 if (atomic_read(&adapter->running_cap_queries) == 0) 2714 - complete(&adapter->init_done); 2662 + init_sub_crqs(adapter, 0); 2715 2663 /* We're done querying the capabilities, initialize sub-crqs */ 2716 2664 } 2717 2665 ··· 3254 3202 dev_info(dev, "Partner initialized\n"); 3255 3203 /* Send back a response */ 3256 3204 rc = ibmvnic_send_crq_init_complete(adapter); 3257 - if (rc == 0) 3258 - send_version_xchg(adapter); 3205 + if (!rc) 3206 + schedule_work(&adapter->vnic_crq_init); 3259 3207 else 3260 3208 dev_err(dev, "Can't send initrsp rc=%ld\n", rc); 3261 3209 break; ··· 3607 3555 .release = single_release, 3608 3556 }; 3609 3557 3558 + static void handle_crq_init_rsp(struct work_struct *work) 3559 + { 3560 + struct ibmvnic_adapter *adapter = container_of(work, 3561 + struct ibmvnic_adapter, 3562 + vnic_crq_init); 3563 + struct device *dev = &adapter->vdev->dev; 3564 + struct net_device *netdev = adapter->netdev; 3565 + unsigned long timeout = msecs_to_jiffies(30000); 3566 + int rc; 3567 + 3568 + send_version_xchg(adapter); 3569 + reinit_completion(&adapter->init_done); 3570 + if (!wait_for_completion_timeout(&adapter->init_done, timeout)) { 3571 + dev_err(dev, "Passive init timeout\n"); 3572 + goto task_failed; 3573 + } 3574 + 3575 + do { 3576 + if (adapter->renegotiate) { 3577 + adapter->renegotiate = false; 3578 + release_sub_crqs_no_irqs(adapter); 3579 + send_cap_queries(adapter); 3580 + 3581 + reinit_completion(&adapter->init_done); 3582 + if (!wait_for_completion_timeout(&adapter->init_done, 3583 + timeout)) { 3584 + dev_err(dev, "Passive init timeout\n"); 3585 + goto task_failed; 3586 + } 3587 + } 3588 + } while (adapter->renegotiate); 3589 + rc = init_sub_crq_irqs(adapter); 3590 + 3591 + if (rc) 3592 + goto task_failed; 3593 + 3594 + netdev->real_num_tx_queues = adapter->req_tx_queues; 3595 + 3596 + rc = register_netdev(netdev); 3597 + if (rc) { 3598 + dev_err(dev, 3599 + "failed to register netdev rc=%d\n", rc); 3600 + goto register_failed; 3601 + } 3602 + dev_info(dev, "ibmvnic registered\n"); 3603 + 3604 + return; 3605 + 3606 + register_failed: 3607 + release_sub_crqs(adapter); 3608 + task_failed: 3609 + dev_err(dev, "Passive initialization was not successful\n"); 3610 + } 3611 + 3610 3612 static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id) 3611 3613 { 3614 + unsigned long timeout = msecs_to_jiffies(30000); 3612 3615 struct ibmvnic_adapter *adapter; 3613 3616 struct net_device *netdev; 3614 3617 unsigned char *mac_addr_p; ··· 3699 3592 netdev->netdev_ops = &ibmvnic_netdev_ops; 3700 3593 netdev->ethtool_ops = &ibmvnic_ethtool_ops; 3701 3594 SET_NETDEV_DEV(netdev, &dev->dev); 3595 + 3596 + INIT_WORK(&adapter->vnic_crq_init, handle_crq_init_rsp); 3702 3597 3703 3598 spin_lock_init(&adapter->stats_lock); 3704 3599 ··· 3744 3635 ibmvnic_send_crq_init(adapter); 3745 3636 3746 3637 init_completion(&adapter->init_done); 3747 - wait_for_completion(&adapter->init_done); 3638 + if (!wait_for_completion_timeout(&adapter->init_done, timeout)) 3639 + return 0; 3748 3640 3749 3641 do { 3750 - adapter->renegotiate = false; 3751 - 3752 - init_sub_crqs(adapter, 0); 3753 - reinit_completion(&adapter->init_done); 3754 - wait_for_completion(&adapter->init_done); 3755 - 3756 3642 if (adapter->renegotiate) { 3757 - release_sub_crqs(adapter); 3643 + adapter->renegotiate = false; 3644 + release_sub_crqs_no_irqs(adapter); 3758 3645 send_cap_queries(adapter); 3759 3646 3760 3647 reinit_completion(&adapter->init_done); 3761 - wait_for_completion(&adapter->init_done); 3648 + if (!wait_for_completion_timeout(&adapter->init_done, 3649 + timeout)) 3650 + return 0; 3762 3651 } 3763 3652 } while (adapter->renegotiate); 3764 3653 3765 - /* if init_sub_crqs is partially successful, retry */ 3766 - while (!adapter->tx_scrq || !adapter->rx_scrq) { 3767 - init_sub_crqs(adapter, 1); 3768 - 3769 - reinit_completion(&adapter->init_done); 3770 - wait_for_completion(&adapter->init_done); 3654 + rc = init_sub_crq_irqs(adapter); 3655 + if (rc) { 3656 + dev_err(&dev->dev, "failed to initialize sub crq irqs\n"); 3657 + goto free_debugfs; 3771 3658 } 3772 3659 3773 3660 netdev->real_num_tx_queues = adapter->req_tx_queues; ··· 3771 3666 rc = register_netdev(netdev); 3772 3667 if (rc) { 3773 3668 dev_err(&dev->dev, "failed to register netdev rc=%d\n", rc); 3774 - goto free_debugfs; 3669 + goto free_sub_crqs; 3775 3670 } 3776 3671 dev_info(&dev->dev, "ibmvnic registered\n"); 3777 3672 3778 3673 return 0; 3779 3674 3675 + free_sub_crqs: 3676 + release_sub_crqs(adapter); 3780 3677 free_debugfs: 3781 3678 if (adapter->debugfs_dir && !IS_ERR(adapter->debugfs_dir)) 3782 3679 debugfs_remove_recursive(adapter->debugfs_dir);
+2
drivers/net/ethernet/ibm/ibmvnic.h
··· 1045 1045 u64 opt_rxba_entries_per_subcrq; 1046 1046 __be64 tx_rx_desc_req; 1047 1047 u8 map_id; 1048 + 1049 + struct work_struct vnic_crq_init; 1048 1050 };
+31 -17
drivers/net/ethernet/intel/i40e/i40e_main.c
··· 1344 1344 if (!vsi || !macaddr) 1345 1345 return NULL; 1346 1346 1347 + /* Do not allow broadcast filter to be added since broadcast filter 1348 + * is added as part of add VSI for any newly created VSI except 1349 + * FDIR VSI 1350 + */ 1351 + if (is_broadcast_ether_addr(macaddr)) 1352 + return NULL; 1353 + 1347 1354 f = i40e_find_filter(vsi, macaddr, vlan, is_vf, is_netdev); 1348 1355 if (!f) { 1349 1356 f = kzalloc(sizeof(*f), GFP_ATOMIC); ··· 2157 2150 "set multicast promisc failed, err %d, aq_err %d\n", 2158 2151 aq_ret, pf->hw.aq.asq_last_status); 2159 2152 } 2160 - } 2161 - aq_ret = i40e_aq_set_vsi_broadcast(&vsi->back->hw, 2162 - vsi->seid, 2163 - cur_promisc, NULL); 2164 - if (aq_ret) { 2165 - retval = i40e_aq_rc_to_posix(aq_ret, 2166 - pf->hw.aq.asq_last_status); 2167 - dev_info(&pf->pdev->dev, 2168 - "set brdcast promisc failed, err %s, aq_err %s\n", 2169 - i40e_stat_str(&pf->hw, aq_ret), 2170 - i40e_aq_str(&pf->hw, 2171 - pf->hw.aq.asq_last_status)); 2172 2153 } 2173 2154 } 2174 2155 out: ··· 7721 7726 * i40e_vsi_alloc_q_vector - Allocate memory for a single interrupt vector 7722 7727 * @vsi: the VSI being configured 7723 7728 * @v_idx: index of the vector in the vsi struct 7729 + * @cpu: cpu to be used on affinity_mask 7724 7730 * 7725 7731 * We allocate one q_vector. If allocation fails we return -ENOMEM. 7726 7732 **/ 7727 - static int i40e_vsi_alloc_q_vector(struct i40e_vsi *vsi, int v_idx) 7733 + static int i40e_vsi_alloc_q_vector(struct i40e_vsi *vsi, int v_idx, int cpu) 7728 7734 { 7729 7735 struct i40e_q_vector *q_vector; 7730 7736 ··· 7736 7740 7737 7741 q_vector->vsi = vsi; 7738 7742 q_vector->v_idx = v_idx; 7739 - cpumask_set_cpu(v_idx, &q_vector->affinity_mask); 7743 + cpumask_set_cpu(cpu, &q_vector->affinity_mask); 7744 + 7740 7745 if (vsi->netdev) 7741 7746 netif_napi_add(vsi->netdev, &q_vector->napi, 7742 7747 i40e_napi_poll, NAPI_POLL_WEIGHT); ··· 7761 7764 static int i40e_vsi_alloc_q_vectors(struct i40e_vsi *vsi) 7762 7765 { 7763 7766 struct i40e_pf *pf = vsi->back; 7764 - int v_idx, num_q_vectors; 7765 - int err; 7767 + int err, v_idx, num_q_vectors, current_cpu; 7766 7768 7767 7769 /* if not MSIX, give the one vector only to the LAN VSI */ 7768 7770 if (pf->flags & I40E_FLAG_MSIX_ENABLED) ··· 7771 7775 else 7772 7776 return -EINVAL; 7773 7777 7778 + current_cpu = cpumask_first(cpu_online_mask); 7779 + 7774 7780 for (v_idx = 0; v_idx < num_q_vectors; v_idx++) { 7775 - err = i40e_vsi_alloc_q_vector(vsi, v_idx); 7781 + err = i40e_vsi_alloc_q_vector(vsi, v_idx, current_cpu); 7776 7782 if (err) 7777 7783 goto err_out; 7784 + current_cpu = cpumask_next(current_cpu, cpu_online_mask); 7785 + if (unlikely(current_cpu >= nr_cpu_ids)) 7786 + current_cpu = cpumask_first(cpu_online_mask); 7778 7787 } 7779 7788 7780 7789 return 0; ··· 9225 9224 static int i40e_add_vsi(struct i40e_vsi *vsi) 9226 9225 { 9227 9226 int ret = -ENODEV; 9227 + i40e_status aq_ret = 0; 9228 9228 u8 laa_macaddr[ETH_ALEN]; 9229 9229 bool found_laa_mac_filter = false; 9230 9230 struct i40e_pf *pf = vsi->back; ··· 9414 9412 vsi->info.valid_sections = 0; 9415 9413 vsi->seid = ctxt.seid; 9416 9414 vsi->id = ctxt.vsi_number; 9415 + } 9416 + /* Except FDIR VSI, for all othet VSI set the broadcast filter */ 9417 + if (vsi->type != I40E_VSI_FDIR) { 9418 + aq_ret = i40e_aq_set_vsi_broadcast(hw, vsi->seid, true, NULL); 9419 + if (aq_ret) { 9420 + ret = i40e_aq_rc_to_posix(aq_ret, 9421 + hw->aq.asq_last_status); 9422 + dev_info(&pf->pdev->dev, 9423 + "set brdcast promisc failed, err %s, aq_err %s\n", 9424 + i40e_stat_str(hw, aq_ret), 9425 + i40e_aq_str(hw, hw->aq.asq_last_status)); 9426 + } 9417 9427 } 9418 9428 9419 9429 spin_lock_bh(&vsi->mac_filter_list_lock);
+16 -12
drivers/net/ethernet/intel/i40e/i40e_txrx.c
··· 1280 1280 union i40e_rx_desc *rx_desc) 1281 1281 { 1282 1282 struct i40e_rx_ptype_decoded decoded; 1283 - bool ipv4, ipv6, tunnel = false; 1284 1283 u32 rx_error, rx_status; 1284 + bool ipv4, ipv6; 1285 1285 u8 ptype; 1286 1286 u64 qword; 1287 1287 ··· 1336 1336 if (rx_error & BIT(I40E_RX_DESC_ERROR_PPRS_SHIFT)) 1337 1337 return; 1338 1338 1339 - /* The hardware supported by this driver does not validate outer 1340 - * checksums for tunneled VXLAN or GENEVE frames. I don't agree 1341 - * with it but the specification states that you "MAY validate", it 1342 - * doesn't make it a hard requirement so if we have validated the 1343 - * inner checksum report CHECKSUM_UNNECESSARY. 1339 + /* If there is an outer header present that might contain a checksum 1340 + * we need to bump the checksum level by 1 to reflect the fact that 1341 + * we are indicating we validated the inner checksum. 1344 1342 */ 1345 - if (decoded.inner_prot & (I40E_RX_PTYPE_INNER_PROT_TCP | 1346 - I40E_RX_PTYPE_INNER_PROT_UDP | 1347 - I40E_RX_PTYPE_INNER_PROT_SCTP)) 1348 - tunnel = true; 1343 + if (decoded.tunnel_type >= I40E_RX_PTYPE_TUNNEL_IP_GRENAT) 1344 + skb->csum_level = 1; 1349 1345 1350 - skb->ip_summed = CHECKSUM_UNNECESSARY; 1351 - skb->csum_level = tunnel ? 1 : 0; 1346 + /* Only report checksum unnecessary for TCP, UDP, or SCTP */ 1347 + switch (decoded.inner_prot) { 1348 + case I40E_RX_PTYPE_INNER_PROT_TCP: 1349 + case I40E_RX_PTYPE_INNER_PROT_UDP: 1350 + case I40E_RX_PTYPE_INNER_PROT_SCTP: 1351 + skb->ip_summed = CHECKSUM_UNNECESSARY; 1352 + /* fall though */ 1353 + default: 1354 + break; 1355 + } 1352 1356 1353 1357 return; 1354 1358
+16 -12
drivers/net/ethernet/intel/i40evf/i40e_txrx.c
··· 752 752 union i40e_rx_desc *rx_desc) 753 753 { 754 754 struct i40e_rx_ptype_decoded decoded; 755 - bool ipv4, ipv6, tunnel = false; 756 755 u32 rx_error, rx_status; 756 + bool ipv4, ipv6; 757 757 u8 ptype; 758 758 u64 qword; 759 759 ··· 808 808 if (rx_error & BIT(I40E_RX_DESC_ERROR_PPRS_SHIFT)) 809 809 return; 810 810 811 - /* The hardware supported by this driver does not validate outer 812 - * checksums for tunneled VXLAN or GENEVE frames. I don't agree 813 - * with it but the specification states that you "MAY validate", it 814 - * doesn't make it a hard requirement so if we have validated the 815 - * inner checksum report CHECKSUM_UNNECESSARY. 811 + /* If there is an outer header present that might contain a checksum 812 + * we need to bump the checksum level by 1 to reflect the fact that 813 + * we are indicating we validated the inner checksum. 816 814 */ 817 - if (decoded.inner_prot & (I40E_RX_PTYPE_INNER_PROT_TCP | 818 - I40E_RX_PTYPE_INNER_PROT_UDP | 819 - I40E_RX_PTYPE_INNER_PROT_SCTP)) 820 - tunnel = true; 815 + if (decoded.tunnel_type >= I40E_RX_PTYPE_TUNNEL_IP_GRENAT) 816 + skb->csum_level = 1; 821 817 822 - skb->ip_summed = CHECKSUM_UNNECESSARY; 823 - skb->csum_level = tunnel ? 1 : 0; 818 + /* Only report checksum unnecessary for TCP, UDP, or SCTP */ 819 + switch (decoded.inner_prot) { 820 + case I40E_RX_PTYPE_INNER_PROT_TCP: 821 + case I40E_RX_PTYPE_INNER_PROT_UDP: 822 + case I40E_RX_PTYPE_INNER_PROT_SCTP: 823 + skb->ip_summed = CHECKSUM_UNNECESSARY; 824 + /* fall though */ 825 + default: 826 + break; 827 + } 824 828 825 829 return; 826 830
+1 -1
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
··· 2887 2887 if (!test_bit(__IXGBE_DOWN, &adapter->state)) 2888 2888 ixgbe_irq_enable_queues(adapter, BIT_ULL(q_vector->v_idx)); 2889 2889 2890 - return 0; 2890 + return min(work_done, budget - 1); 2891 2891 } 2892 2892 2893 2893 /**
+1 -1
drivers/net/ethernet/marvell/mvneta.c
··· 244 244 /* Various constants */ 245 245 246 246 /* Coalescing */ 247 - #define MVNETA_TXDONE_COAL_PKTS 1 247 + #define MVNETA_TXDONE_COAL_PKTS 0 /* interrupt per packet */ 248 248 #define MVNETA_RX_COAL_PKTS 32 249 249 #define MVNETA_RX_COAL_USEC 100 250 250
+32 -22
drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
··· 1042 1042 { 1043 1043 struct mlx4_en_priv *priv = netdev_priv(dev); 1044 1044 struct mlx4_en_dev *mdev = priv->mdev; 1045 + struct mlx4_en_port_profile new_prof; 1046 + struct mlx4_en_priv *tmp; 1045 1047 u32 rx_size, tx_size; 1046 1048 int port_up = 0; 1047 1049 int err = 0; ··· 1063 1061 tx_size == priv->tx_ring[0]->size) 1064 1062 return 0; 1065 1063 1064 + tmp = kzalloc(sizeof(*tmp), GFP_KERNEL); 1065 + if (!tmp) 1066 + return -ENOMEM; 1067 + 1066 1068 mutex_lock(&mdev->state_lock); 1069 + memcpy(&new_prof, priv->prof, sizeof(struct mlx4_en_port_profile)); 1070 + new_prof.tx_ring_size = tx_size; 1071 + new_prof.rx_ring_size = rx_size; 1072 + err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof); 1073 + if (err) 1074 + goto out; 1075 + 1067 1076 if (priv->port_up) { 1068 1077 port_up = 1; 1069 1078 mlx4_en_stop_port(dev, 1); 1070 1079 } 1071 1080 1072 - mlx4_en_free_resources(priv); 1081 + mlx4_en_safe_replace_resources(priv, tmp); 1073 1082 1074 - priv->prof->tx_ring_size = tx_size; 1075 - priv->prof->rx_ring_size = rx_size; 1076 - 1077 - err = mlx4_en_alloc_resources(priv); 1078 - if (err) { 1079 - en_err(priv, "Failed reallocating port resources\n"); 1080 - goto out; 1081 - } 1082 1083 if (port_up) { 1083 1084 err = mlx4_en_start_port(dev); 1084 1085 if (err) ··· 1089 1084 } 1090 1085 1091 1086 err = mlx4_en_moderation_update(priv); 1092 - 1093 1087 out: 1088 + kfree(tmp); 1094 1089 mutex_unlock(&mdev->state_lock); 1095 1090 return err; 1096 1091 } ··· 1719 1714 { 1720 1715 struct mlx4_en_priv *priv = netdev_priv(dev); 1721 1716 struct mlx4_en_dev *mdev = priv->mdev; 1717 + struct mlx4_en_port_profile new_prof; 1718 + struct mlx4_en_priv *tmp; 1722 1719 int port_up = 0; 1723 1720 int err = 0; 1724 1721 ··· 1730 1723 !channel->tx_count || !channel->rx_count) 1731 1724 return -EINVAL; 1732 1725 1726 + tmp = kzalloc(sizeof(*tmp), GFP_KERNEL); 1727 + if (!tmp) 1728 + return -ENOMEM; 1729 + 1733 1730 mutex_lock(&mdev->state_lock); 1731 + memcpy(&new_prof, priv->prof, sizeof(struct mlx4_en_port_profile)); 1732 + new_prof.num_tx_rings_p_up = channel->tx_count; 1733 + new_prof.tx_ring_num = channel->tx_count * MLX4_EN_NUM_UP; 1734 + new_prof.rx_ring_num = channel->rx_count; 1735 + 1736 + err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof); 1737 + if (err) 1738 + goto out; 1739 + 1734 1740 if (priv->port_up) { 1735 1741 port_up = 1; 1736 1742 mlx4_en_stop_port(dev, 1); 1737 1743 } 1738 1744 1739 - mlx4_en_free_resources(priv); 1740 - 1741 - priv->num_tx_rings_p_up = channel->tx_count; 1742 - priv->tx_ring_num = channel->tx_count * MLX4_EN_NUM_UP; 1743 - priv->rx_ring_num = channel->rx_count; 1744 - 1745 - err = mlx4_en_alloc_resources(priv); 1746 - if (err) { 1747 - en_err(priv, "Failed reallocating port resources\n"); 1748 - goto out; 1749 - } 1745 + mlx4_en_safe_replace_resources(priv, tmp); 1750 1746 1751 1747 netif_set_real_num_tx_queues(dev, priv->tx_ring_num); 1752 1748 netif_set_real_num_rx_queues(dev, priv->rx_ring_num); ··· 1767 1757 } 1768 1758 1769 1759 err = mlx4_en_moderation_update(priv); 1770 - 1771 1760 out: 1761 + kfree(tmp); 1772 1762 mutex_unlock(&mdev->state_lock); 1773 1763 return err; 1774 1764 }
+97 -13
drivers/net/ethernet/mellanox/mlx4/en_netdev.c
··· 1954 1954 return 0; 1955 1955 } 1956 1956 1957 - void mlx4_en_free_resources(struct mlx4_en_priv *priv) 1957 + static void mlx4_en_free_resources(struct mlx4_en_priv *priv) 1958 1958 { 1959 1959 int i; 1960 1960 ··· 1979 1979 1980 1980 } 1981 1981 1982 - int mlx4_en_alloc_resources(struct mlx4_en_priv *priv) 1982 + static int mlx4_en_alloc_resources(struct mlx4_en_priv *priv) 1983 1983 { 1984 1984 struct mlx4_en_port_profile *prof = priv->prof; 1985 1985 int i; ··· 2044 2044 rtnl_unlock(); 2045 2045 } 2046 2046 2047 + static int mlx4_en_copy_priv(struct mlx4_en_priv *dst, 2048 + struct mlx4_en_priv *src, 2049 + struct mlx4_en_port_profile *prof) 2050 + { 2051 + memcpy(&dst->hwtstamp_config, &prof->hwtstamp_config, 2052 + sizeof(dst->hwtstamp_config)); 2053 + dst->num_tx_rings_p_up = src->mdev->profile.num_tx_rings_p_up; 2054 + dst->tx_ring_num = prof->tx_ring_num; 2055 + dst->rx_ring_num = prof->rx_ring_num; 2056 + dst->flags = prof->flags; 2057 + dst->mdev = src->mdev; 2058 + dst->port = src->port; 2059 + dst->dev = src->dev; 2060 + dst->prof = prof; 2061 + dst->stride = roundup_pow_of_two(sizeof(struct mlx4_en_rx_desc) + 2062 + DS_SIZE * MLX4_EN_MAX_RX_FRAGS); 2063 + 2064 + dst->tx_ring = kzalloc(sizeof(struct mlx4_en_tx_ring *) * MAX_TX_RINGS, 2065 + GFP_KERNEL); 2066 + if (!dst->tx_ring) 2067 + return -ENOMEM; 2068 + 2069 + dst->tx_cq = kzalloc(sizeof(struct mlx4_en_cq *) * MAX_TX_RINGS, 2070 + GFP_KERNEL); 2071 + if (!dst->tx_cq) { 2072 + kfree(dst->tx_ring); 2073 + return -ENOMEM; 2074 + } 2075 + return 0; 2076 + } 2077 + 2078 + static void mlx4_en_update_priv(struct mlx4_en_priv *dst, 2079 + struct mlx4_en_priv *src) 2080 + { 2081 + memcpy(dst->rx_ring, src->rx_ring, 2082 + sizeof(struct mlx4_en_rx_ring *) * src->rx_ring_num); 2083 + memcpy(dst->rx_cq, src->rx_cq, 2084 + sizeof(struct mlx4_en_cq *) * src->rx_ring_num); 2085 + memcpy(&dst->hwtstamp_config, &src->hwtstamp_config, 2086 + sizeof(dst->hwtstamp_config)); 2087 + dst->tx_ring_num = src->tx_ring_num; 2088 + dst->rx_ring_num = src->rx_ring_num; 2089 + dst->tx_ring = src->tx_ring; 2090 + dst->tx_cq = src->tx_cq; 2091 + memcpy(dst->prof, src->prof, sizeof(struct mlx4_en_port_profile)); 2092 + } 2093 + 2094 + int mlx4_en_try_alloc_resources(struct mlx4_en_priv *priv, 2095 + struct mlx4_en_priv *tmp, 2096 + struct mlx4_en_port_profile *prof) 2097 + { 2098 + mlx4_en_copy_priv(tmp, priv, prof); 2099 + 2100 + if (mlx4_en_alloc_resources(tmp)) { 2101 + en_warn(priv, 2102 + "%s: Resource allocation failed, using previous configuration\n", 2103 + __func__); 2104 + kfree(tmp->tx_ring); 2105 + kfree(tmp->tx_cq); 2106 + return -ENOMEM; 2107 + } 2108 + return 0; 2109 + } 2110 + 2111 + void mlx4_en_safe_replace_resources(struct mlx4_en_priv *priv, 2112 + struct mlx4_en_priv *tmp) 2113 + { 2114 + mlx4_en_free_resources(priv); 2115 + mlx4_en_update_priv(priv, tmp); 2116 + } 2117 + 2047 2118 void mlx4_en_destroy_netdev(struct net_device *dev) 2048 2119 { 2049 2120 struct mlx4_en_priv *priv = netdev_priv(dev); ··· 2150 2079 mdev->pndev[priv->port] = NULL; 2151 2080 mdev->upper[priv->port] = NULL; 2152 2081 mutex_unlock(&mdev->state_lock); 2082 + 2083 + #ifdef CONFIG_RFS_ACCEL 2084 + mlx4_en_cleanup_filters(priv); 2085 + #endif 2153 2086 2154 2087 mlx4_en_free_resources(priv); 2155 2088 ··· 3199 3124 { 3200 3125 struct mlx4_en_priv *priv = netdev_priv(dev); 3201 3126 struct mlx4_en_dev *mdev = priv->mdev; 3127 + struct mlx4_en_port_profile new_prof; 3128 + struct mlx4_en_priv *tmp; 3202 3129 int port_up = 0; 3203 3130 int err = 0; 3204 3131 ··· 3217 3140 return -EINVAL; 3218 3141 } 3219 3142 3143 + tmp = kzalloc(sizeof(*tmp), GFP_KERNEL); 3144 + if (!tmp) 3145 + return -ENOMEM; 3146 + 3220 3147 mutex_lock(&mdev->state_lock); 3148 + 3149 + memcpy(&new_prof, priv->prof, sizeof(struct mlx4_en_port_profile)); 3150 + memcpy(&new_prof.hwtstamp_config, &ts_config, sizeof(ts_config)); 3151 + 3152 + err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof); 3153 + if (err) 3154 + goto out; 3155 + 3221 3156 if (priv->port_up) { 3222 3157 port_up = 1; 3223 3158 mlx4_en_stop_port(dev, 1); 3224 3159 } 3225 3160 3226 - mlx4_en_free_resources(priv); 3227 - 3228 3161 en_warn(priv, "Changing device configuration rx filter(%x) rx vlan(%x)\n", 3229 - ts_config.rx_filter, !!(features & NETIF_F_HW_VLAN_CTAG_RX)); 3162 + ts_config.rx_filter, 3163 + !!(features & NETIF_F_HW_VLAN_CTAG_RX)); 3230 3164 3231 - priv->hwtstamp_config.tx_type = ts_config.tx_type; 3232 - priv->hwtstamp_config.rx_filter = ts_config.rx_filter; 3165 + mlx4_en_safe_replace_resources(priv, tmp); 3233 3166 3234 3167 if (DEV_FEATURE_CHANGED(dev, features, NETIF_F_HW_VLAN_CTAG_RX)) { 3235 3168 if (features & NETIF_F_HW_VLAN_CTAG_RX) ··· 3273 3186 dev->features &= ~NETIF_F_HW_VLAN_CTAG_RX; 3274 3187 } 3275 3188 3276 - err = mlx4_en_alloc_resources(priv); 3277 - if (err) { 3278 - en_err(priv, "Failed reallocating port resources\n"); 3279 - goto out; 3280 - } 3281 3189 if (port_up) { 3282 3190 err = mlx4_en_start_port(dev); 3283 3191 if (err) ··· 3281 3199 3282 3200 out: 3283 3201 mutex_unlock(&mdev->state_lock); 3284 - netdev_features_change(dev); 3202 + kfree(tmp); 3203 + if (!err) 3204 + netdev_features_change(dev); 3285 3205 return err; 3286 3206 }
-3
drivers/net/ethernet/mellanox/mlx4/en_rx.c
··· 514 514 ring->rx_info = NULL; 515 515 kfree(ring); 516 516 *pring = NULL; 517 - #ifdef CONFIG_RFS_ACCEL 518 - mlx4_en_cleanup_filters(priv); 519 - #endif 520 517 } 521 518 522 519 void mlx4_en_deactivate_rx_ring(struct mlx4_en_priv *priv,
+7 -2
drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
··· 353 353 u32 rx_ring_num; 354 354 u32 tx_ring_size; 355 355 u32 rx_ring_size; 356 + u8 num_tx_rings_p_up; 356 357 u8 rx_pause; 357 358 u8 rx_ppp; 358 359 u8 tx_pause; 359 360 u8 tx_ppp; 360 361 int rss_rings; 361 362 int inline_thold; 363 + struct hwtstamp_config hwtstamp_config; 362 364 }; 363 365 364 366 struct mlx4_en_profile { ··· 625 623 u8 rx_ppp, u8 rx_pause, 626 624 u8 tx_ppp, u8 tx_pause); 627 625 628 - void mlx4_en_free_resources(struct mlx4_en_priv *priv); 629 - int mlx4_en_alloc_resources(struct mlx4_en_priv *priv); 626 + int mlx4_en_try_alloc_resources(struct mlx4_en_priv *priv, 627 + struct mlx4_en_priv *tmp, 628 + struct mlx4_en_port_profile *prof); 629 + void mlx4_en_safe_replace_resources(struct mlx4_en_priv *priv, 630 + struct mlx4_en_priv *tmp); 630 631 631 632 int mlx4_en_create_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq **pcq, 632 633 int entries, int ring, enum cq_type mode, int node);
+12 -1
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
··· 1348 1348 goto err_close_channels; 1349 1349 } 1350 1350 1351 + /* FIXME: This is a W/A for tx timeout watch dog false alarm when 1352 + * polling for inactive tx queues. 1353 + */ 1354 + netif_tx_start_all_queues(priv->netdev); 1355 + 1351 1356 kfree(cparam); 1352 1357 return 0; 1353 1358 ··· 1371 1366 static void mlx5e_close_channels(struct mlx5e_priv *priv) 1372 1367 { 1373 1368 int i; 1369 + 1370 + /* FIXME: This is a W/A only for tx timeout watch dog false alarm when 1371 + * polling for inactive tx queues. 1372 + */ 1373 + netif_tx_stop_all_queues(priv->netdev); 1374 + netif_tx_disable(priv->netdev); 1374 1375 1375 1376 for (i = 0; i < priv->params.num_channels; i++) 1376 1377 mlx5e_close_channel(priv->channel[i]); ··· 2667 2656 for (i = 0; i < priv->params.num_channels * priv->params.num_tc; i++) { 2668 2657 struct mlx5e_sq *sq = priv->txq_to_sq_map[i]; 2669 2658 2670 - if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, i))) 2659 + if (!netif_xmit_stopped(netdev_get_tx_queue(dev, i))) 2671 2660 continue; 2672 2661 sched_work = true; 2673 2662 set_bit(MLX5E_SQ_STATE_TX_TIMEOUT, &sq->state);
+2 -2
drivers/net/ethernet/mellanox/mlx5/core/vxlan.c
··· 72 72 u32 in[MLX5_ST_SZ_DW(delete_vxlan_udp_dport_in)]; 73 73 u32 out[MLX5_ST_SZ_DW(delete_vxlan_udp_dport_out)]; 74 74 75 - memset(&in, 0, sizeof(in)); 76 - memset(&out, 0, sizeof(out)); 75 + memset(in, 0, sizeof(in)); 76 + memset(out, 0, sizeof(out)); 77 77 78 78 MLX5_SET(delete_vxlan_udp_dport_in, in, opcode, 79 79 MLX5_CMD_OP_DELETE_VXLAN_UDP_DPORT);
+16 -1
drivers/net/ethernet/mellanox/mlxsw/reg.h
··· 2718 2718 * Configures the switch priority to buffer table. 2719 2719 */ 2720 2720 #define MLXSW_REG_PPTB_ID 0x500B 2721 - #define MLXSW_REG_PPTB_LEN 0x0C 2721 + #define MLXSW_REG_PPTB_LEN 0x10 2722 2722 2723 2723 static const struct mlxsw_reg_info mlxsw_reg_pptb = { 2724 2724 .id = MLXSW_REG_PPTB_ID, ··· 2784 2784 */ 2785 2785 MLXSW_ITEM32(reg, pptb, untagged_buff, 0x08, 0, 4); 2786 2786 2787 + /* reg_pptb_prio_to_buff_msb 2788 + * Mapping of switch priority <i+8> to one of the allocated receive port 2789 + * buffers. 2790 + * Access: RW 2791 + */ 2792 + MLXSW_ITEM_BIT_ARRAY(reg, pptb, prio_to_buff_msb, 0x0C, 0x04, 4); 2793 + 2787 2794 #define MLXSW_REG_PPTB_ALL_PRIO 0xFF 2788 2795 2789 2796 static inline void mlxsw_reg_pptb_pack(char *payload, u8 local_port) ··· 2799 2792 mlxsw_reg_pptb_mm_set(payload, MLXSW_REG_PPTB_MM_UM); 2800 2793 mlxsw_reg_pptb_local_port_set(payload, local_port); 2801 2794 mlxsw_reg_pptb_pm_set(payload, MLXSW_REG_PPTB_ALL_PRIO); 2795 + mlxsw_reg_pptb_pm_msb_set(payload, MLXSW_REG_PPTB_ALL_PRIO); 2796 + } 2797 + 2798 + static inline void mlxsw_reg_pptb_prio_to_buff_pack(char *payload, u8 prio, 2799 + u8 buff) 2800 + { 2801 + mlxsw_reg_pptb_prio_to_buff_set(payload, prio, buff); 2802 + mlxsw_reg_pptb_prio_to_buff_msb_set(payload, prio, buff); 2802 2803 } 2803 2804 2804 2805 /* PBMC - Port Buffer Management Control Register
+3 -25
drivers/net/ethernet/mellanox/mlxsw/spectrum.c
··· 171 171 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(paos), paos_pl); 172 172 } 173 173 174 - static int mlxsw_sp_port_oper_status_get(struct mlxsw_sp_port *mlxsw_sp_port, 175 - bool *p_is_up) 176 - { 177 - struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 178 - char paos_pl[MLXSW_REG_PAOS_LEN]; 179 - u8 oper_status; 180 - int err; 181 - 182 - mlxsw_reg_paos_pack(paos_pl, mlxsw_sp_port->local_port, 0); 183 - err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(paos), paos_pl); 184 - if (err) 185 - return err; 186 - oper_status = mlxsw_reg_paos_oper_status_get(paos_pl); 187 - *p_is_up = oper_status == MLXSW_PORT_ADMIN_STATUS_UP ? true : false; 188 - return 0; 189 - } 190 - 191 174 static int mlxsw_sp_port_dev_addr_set(struct mlxsw_sp_port *mlxsw_sp_port, 192 175 unsigned char *addr) 193 176 { ··· 1417 1434 1418 1435 cmd->supported = mlxsw_sp_from_ptys_supported_port(eth_proto_cap) | 1419 1436 mlxsw_sp_from_ptys_supported_link(eth_proto_cap) | 1420 - SUPPORTED_Pause | SUPPORTED_Asym_Pause; 1437 + SUPPORTED_Pause | SUPPORTED_Asym_Pause | 1438 + SUPPORTED_Autoneg; 1421 1439 cmd->advertising = mlxsw_sp_from_ptys_advert_link(eth_proto_admin); 1422 1440 mlxsw_sp_from_ptys_speed_duplex(netif_carrier_ok(dev), 1423 1441 eth_proto_oper, cmd); ··· 1477 1493 u32 eth_proto_new; 1478 1494 u32 eth_proto_cap; 1479 1495 u32 eth_proto_admin; 1480 - bool is_up; 1481 1496 int err; 1482 1497 1483 1498 speed = ethtool_cmd_speed(cmd); ··· 1508 1525 return err; 1509 1526 } 1510 1527 1511 - err = mlxsw_sp_port_oper_status_get(mlxsw_sp_port, &is_up); 1512 - if (err) { 1513 - netdev_err(dev, "Failed to get oper status"); 1514 - return err; 1515 - } 1516 - if (!is_up) 1528 + if (!netif_running(dev)) 1517 1529 return 0; 1518 1530 1519 1531 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
+1 -1
drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
··· 194 194 195 195 mlxsw_reg_pptb_pack(pptb_pl, mlxsw_sp_port->local_port); 196 196 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) 197 - mlxsw_reg_pptb_prio_to_buff_set(pptb_pl, i, 0); 197 + mlxsw_reg_pptb_prio_to_buff_pack(pptb_pl, i, 0); 198 198 return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pptb), 199 199 pptb_pl); 200 200 }
+6 -2
drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c
··· 103 103 104 104 mlxsw_reg_pptb_pack(pptb_pl, mlxsw_sp_port->local_port); 105 105 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) 106 - mlxsw_reg_pptb_prio_to_buff_set(pptb_pl, i, prio_tc[i]); 106 + mlxsw_reg_pptb_prio_to_buff_pack(pptb_pl, i, prio_tc[i]); 107 + 107 108 return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pptb), 108 109 pptb_pl); 109 110 } ··· 250 249 return err; 251 250 252 251 memcpy(mlxsw_sp_port->dcb.ets, ets, sizeof(*ets)); 252 + mlxsw_sp_port->dcb.ets->ets_cap = IEEE_8021QAZ_MAX_TCS; 253 253 254 254 return 0; 255 255 } ··· 353 351 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 354 352 int err; 355 353 356 - if (mlxsw_sp_port->link.tx_pause || mlxsw_sp_port->link.rx_pause) { 354 + if ((mlxsw_sp_port->link.tx_pause || mlxsw_sp_port->link.rx_pause) && 355 + pfc->pfc_en) { 357 356 netdev_err(dev, "PAUSE frames already enabled on port\n"); 358 357 return -EINVAL; 359 358 } ··· 374 371 } 375 372 376 373 memcpy(mlxsw_sp_port->dcb.pfc, pfc, sizeof(*pfc)); 374 + mlxsw_sp_port->dcb.pfc->pfc_cap = IEEE_8021QAZ_MAX_TCS; 377 375 378 376 return 0; 379 377
+3 -2
drivers/net/ppp/ppp_generic.c
··· 2601 2601 spin_lock_bh(&pn->all_channels_lock); 2602 2602 list_del(&pch->list); 2603 2603 spin_unlock_bh(&pn->all_channels_lock); 2604 - put_net(pch->chan_net); 2605 - pch->chan_net = NULL; 2606 2604 2607 2605 pch->file.dead = 1; 2608 2606 wake_up_interruptible(&pch->file.rwait); ··· 3134 3136 */ 3135 3137 static void ppp_destroy_channel(struct channel *pch) 3136 3138 { 3139 + put_net(pch->chan_net); 3140 + pch->chan_net = NULL; 3141 + 3137 3142 atomic_dec(&channel_count); 3138 3143 3139 3144 if (!pch->file.dead) {
+76 -9
drivers/net/usb/r8152.c
··· 26 26 #include <linux/mdio.h> 27 27 #include <linux/usb/cdc.h> 28 28 #include <linux/suspend.h> 29 + #include <linux/acpi.h> 29 30 30 31 /* Information for net-next */ 31 32 #define NETNEXT_VERSION "08" ··· 460 459 461 460 /* SRAM_IMPEDANCE */ 462 461 #define RX_DRIVING_MASK 0x6000 462 + 463 + /* MAC PASSTHRU */ 464 + #define AD_MASK 0xfee0 465 + #define EFUSE 0xcfdb 466 + #define PASS_THRU_MASK 0x1 463 467 464 468 enum rtl_register_content { 465 469 _1000bps = 0x10, ··· 1042 1036 return ret; 1043 1037 } 1044 1038 1039 + /* Devices containing RTL8153-AD can support a persistent 1040 + * host system provided MAC address. 1041 + * Examples of this are Dell TB15 and Dell WD15 docks 1042 + */ 1043 + static int vendor_mac_passthru_addr_read(struct r8152 *tp, struct sockaddr *sa) 1044 + { 1045 + acpi_status status; 1046 + struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; 1047 + union acpi_object *obj; 1048 + int ret = -EINVAL; 1049 + u32 ocp_data; 1050 + unsigned char buf[6]; 1051 + 1052 + /* test for -AD variant of RTL8153 */ 1053 + ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_MISC_0); 1054 + if ((ocp_data & AD_MASK) != 0x1000) 1055 + return -ENODEV; 1056 + 1057 + /* test for MAC address pass-through bit */ 1058 + ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, EFUSE); 1059 + if ((ocp_data & PASS_THRU_MASK) != 1) 1060 + return -ENODEV; 1061 + 1062 + /* returns _AUXMAC_#AABBCCDDEEFF# */ 1063 + status = acpi_evaluate_object(NULL, "\\_SB.AMAC", NULL, &buffer); 1064 + obj = (union acpi_object *)buffer.pointer; 1065 + if (!ACPI_SUCCESS(status)) 1066 + return -ENODEV; 1067 + if (obj->type != ACPI_TYPE_BUFFER || obj->string.length != 0x17) { 1068 + netif_warn(tp, probe, tp->netdev, 1069 + "Invalid buffer when reading pass-thru MAC addr: " 1070 + "(%d, %d)\n", 1071 + obj->type, obj->string.length); 1072 + goto amacout; 1073 + } 1074 + if (strncmp(obj->string.pointer, "_AUXMAC_#", 9) != 0 || 1075 + strncmp(obj->string.pointer + 0x15, "#", 1) != 0) { 1076 + netif_warn(tp, probe, tp->netdev, 1077 + "Invalid header when reading pass-thru MAC addr\n"); 1078 + goto amacout; 1079 + } 1080 + ret = hex2bin(buf, obj->string.pointer + 9, 6); 1081 + if (!(ret == 0 && is_valid_ether_addr(buf))) { 1082 + netif_warn(tp, probe, tp->netdev, 1083 + "Invalid MAC when reading pass-thru MAC addr: " 1084 + "%d, %pM\n", ret, buf); 1085 + ret = -EINVAL; 1086 + goto amacout; 1087 + } 1088 + memcpy(sa->sa_data, buf, 6); 1089 + ether_addr_copy(tp->netdev->dev_addr, sa->sa_data); 1090 + netif_info(tp, probe, tp->netdev, 1091 + "Using pass-thru MAC addr %pM\n", sa->sa_data); 1092 + 1093 + amacout: 1094 + kfree(obj); 1095 + return ret; 1096 + } 1097 + 1045 1098 static int set_ethernet_addr(struct r8152 *tp) 1046 1099 { 1047 1100 struct net_device *dev = tp->netdev; ··· 1109 1044 1110 1045 if (tp->version == RTL_VER_01) 1111 1046 ret = pla_ocp_read(tp, PLA_IDR, 8, sa.sa_data); 1112 - else 1113 - ret = pla_ocp_read(tp, PLA_BACKUP, 8, sa.sa_data); 1047 + else { 1048 + /* if this is not an RTL8153-AD, no eFuse mac pass thru set, 1049 + * or system doesn't provide valid _SB.AMAC this will be 1050 + * be expected to non-zero 1051 + */ 1052 + ret = vendor_mac_passthru_addr_read(tp, &sa); 1053 + if (ret < 0) 1054 + ret = pla_ocp_read(tp, PLA_BACKUP, 8, sa.sa_data); 1055 + } 1114 1056 1115 1057 if (ret < 0) { 1116 1058 netif_err(tp, probe, dev, "Get ether addr fail\n"); ··· 2368 2296 u32 ocp_data; 2369 2297 u32 wolopts = 0; 2370 2298 2371 - ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_CONFIG5); 2372 - if (!(ocp_data & LAN_WAKE_EN)) 2373 - return 0; 2374 - 2375 2299 ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_CONFIG34); 2376 2300 if (ocp_data & LINK_ON_WAKE_EN) 2377 2301 wolopts |= WAKE_PHY; ··· 2400 2332 ocp_write_word(tp, MCU_TYPE_PLA, PLA_CONFIG34, ocp_data); 2401 2333 2402 2334 ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_CONFIG5); 2403 - ocp_data &= ~(UWF_EN | BWF_EN | MWF_EN | LAN_WAKE_EN); 2335 + ocp_data &= ~(UWF_EN | BWF_EN | MWF_EN); 2404 2336 if (wolopts & WAKE_UCAST) 2405 2337 ocp_data |= UWF_EN; 2406 2338 if (wolopts & WAKE_BCAST) 2407 2339 ocp_data |= BWF_EN; 2408 2340 if (wolopts & WAKE_MCAST) 2409 2341 ocp_data |= MWF_EN; 2410 - if (wolopts & WAKE_ANY) 2411 - ocp_data |= LAN_WAKE_EN; 2412 2342 ocp_write_word(tp, MCU_TYPE_PLA, PLA_CONFIG5, ocp_data); 2413 2343 2414 2344 ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_NORAML); ··· 4425 4359 MODULE_AUTHOR(DRIVER_AUTHOR); 4426 4360 MODULE_DESCRIPTION(DRIVER_DESC); 4427 4361 MODULE_LICENSE("GPL"); 4362 + MODULE_VERSION(DRIVER_VERSION);
+5 -1
include/linux/filter.h
··· 467 467 } 468 468 #endif /* CONFIG_DEBUG_SET_MODULE_RONX */ 469 469 470 - int sk_filter(struct sock *sk, struct sk_buff *skb); 470 + int sk_filter_trim_cap(struct sock *sk, struct sk_buff *skb, unsigned int cap); 471 + static inline int sk_filter(struct sock *sk, struct sk_buff *skb) 472 + { 473 + return sk_filter_trim_cap(sk, skb, 1); 474 + } 471 475 472 476 struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err); 473 477 void bpf_prog_free(struct bpf_prog *fp);
+7
include/linux/netdevice.h
··· 4145 4145 dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM); 4146 4146 } 4147 4147 4148 + /* return true if dev can't cope with mtu frames that need vlan tag insertion */ 4149 + static inline bool netif_reduces_vlan_mtu(struct net_device *dev) 4150 + { 4151 + /* TODO: reserve and use an additional IFF bit, if we get more users */ 4152 + return dev->priv_flags & IFF_MACSEC; 4153 + } 4154 + 4148 4155 extern struct pernet_operations __net_initdata loopback_net_ops; 4149 4156 4150 4157 /* Logging, debugging and troubleshooting/diagnostic helpers. */
+8
include/net/netfilter/nf_conntrack.h
··· 284 284 return skb->dev && skb->skb_iif && skb->dev->flags & IFF_LOOPBACK; 285 285 } 286 286 287 + /* jiffies until ct expires, 0 if already expired */ 288 + static inline unsigned long nf_ct_expires(const struct nf_conn *ct) 289 + { 290 + long timeout = (long)ct->timeout.expires - (long)jiffies; 291 + 292 + return timeout > 0 ? timeout : 0; 293 + } 294 + 287 295 struct kernel_param; 288 296 289 297 int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp);
+7 -1
include/net/sock.h
··· 1576 1576 */ 1577 1577 void sock_gen_put(struct sock *sk); 1578 1578 1579 - int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested); 1579 + int __sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested, 1580 + unsigned int trim_cap); 1581 + static inline int sk_receive_skb(struct sock *sk, struct sk_buff *skb, 1582 + const int nested) 1583 + { 1584 + return __sk_receive_skb(sk, skb, nested, 1); 1585 + } 1580 1586 1581 1587 static inline void sk_tx_queue_set(struct sock *sk, int tx_queue) 1582 1588 {
+1 -1
include/net/switchdev.h
··· 60 60 struct netdev_phys_item_id ppid; /* PORT_PARENT_ID */ 61 61 u8 stp_state; /* PORT_STP_STATE */ 62 62 unsigned long brport_flags; /* PORT_BRIDGE_FLAGS */ 63 - u32 ageing_time; /* BRIDGE_AGEING_TIME */ 63 + clock_t ageing_time; /* BRIDGE_AGEING_TIME */ 64 64 bool vlan_filtering; /* BRIDGE_VLAN_FILTERING */ 65 65 } u; 66 66 };
+6 -4
net/8021q/vlan_dev.c
··· 146 146 147 147 static int vlan_dev_change_mtu(struct net_device *dev, int new_mtu) 148 148 { 149 - /* TODO: gotta make sure the underlying layer can handle it, 150 - * maybe an IFF_VLAN_CAPABLE flag for devices? 151 - */ 152 - if (vlan_dev_priv(dev)->real_dev->mtu < new_mtu) 149 + struct net_device *real_dev = vlan_dev_priv(dev)->real_dev; 150 + unsigned int max_mtu = real_dev->mtu; 151 + 152 + if (netif_reduces_vlan_mtu(real_dev)) 153 + max_mtu -= VLAN_HLEN; 154 + if (max_mtu < new_mtu) 153 155 return -ERANGE; 154 156 155 157 dev->mtu = new_mtu;
+5 -2
net/8021q/vlan_netlink.c
··· 118 118 { 119 119 struct vlan_dev_priv *vlan = vlan_dev_priv(dev); 120 120 struct net_device *real_dev; 121 + unsigned int max_mtu; 121 122 __be16 proto; 122 123 int err; 123 124 ··· 145 144 if (err < 0) 146 145 return err; 147 146 147 + max_mtu = netif_reduces_vlan_mtu(real_dev) ? real_dev->mtu - VLAN_HLEN : 148 + real_dev->mtu; 148 149 if (!tb[IFLA_MTU]) 149 - dev->mtu = real_dev->mtu; 150 - else if (dev->mtu > real_dev->mtu) 150 + dev->mtu = max_mtu; 151 + else if (dev->mtu > max_mtu) 151 152 return -EINVAL; 152 153 153 154 err = vlan_changelink(dev, tb, data);
+92 -24
net/batman-adv/bridge_loop_avoidance.c
··· 177 177 static void batadv_claim_release(struct kref *ref) 178 178 { 179 179 struct batadv_bla_claim *claim; 180 + struct batadv_bla_backbone_gw *old_backbone_gw; 180 181 181 182 claim = container_of(ref, struct batadv_bla_claim, refcount); 182 183 183 - batadv_backbone_gw_put(claim->backbone_gw); 184 + spin_lock_bh(&claim->backbone_lock); 185 + old_backbone_gw = claim->backbone_gw; 186 + claim->backbone_gw = NULL; 187 + spin_unlock_bh(&claim->backbone_lock); 188 + 189 + spin_lock_bh(&old_backbone_gw->crc_lock); 190 + old_backbone_gw->crc ^= crc16(0, claim->addr, ETH_ALEN); 191 + spin_unlock_bh(&old_backbone_gw->crc_lock); 192 + 193 + batadv_backbone_gw_put(old_backbone_gw); 194 + 184 195 kfree_rcu(claim, rcu); 185 196 } 186 197 ··· 429 418 break; 430 419 } 431 420 432 - if (vid & BATADV_VLAN_HAS_TAG) 421 + if (vid & BATADV_VLAN_HAS_TAG) { 433 422 skb = vlan_insert_tag(skb, htons(ETH_P_8021Q), 434 423 vid & VLAN_VID_MASK); 424 + if (!skb) 425 + goto out; 426 + } 435 427 436 428 skb_reset_mac_header(skb); 437 429 skb->protocol = eth_type_trans(skb, soft_iface); ··· 688 674 const u8 *mac, const unsigned short vid, 689 675 struct batadv_bla_backbone_gw *backbone_gw) 690 676 { 677 + struct batadv_bla_backbone_gw *old_backbone_gw; 691 678 struct batadv_bla_claim *claim; 692 679 struct batadv_bla_claim search_claim; 680 + bool remove_crc = false; 693 681 int hash_added; 694 682 695 683 ether_addr_copy(search_claim.addr, mac); ··· 705 689 return; 706 690 707 691 ether_addr_copy(claim->addr, mac); 692 + spin_lock_init(&claim->backbone_lock); 708 693 claim->vid = vid; 709 694 claim->lasttime = jiffies; 695 + kref_get(&backbone_gw->refcount); 710 696 claim->backbone_gw = backbone_gw; 711 697 712 698 kref_init(&claim->refcount); ··· 736 718 "bla_add_claim(): changing ownership for %pM, vid %d\n", 737 719 mac, BATADV_PRINT_VID(vid)); 738 720 739 - spin_lock_bh(&claim->backbone_gw->crc_lock); 740 - claim->backbone_gw->crc ^= crc16(0, claim->addr, ETH_ALEN); 741 - spin_unlock_bh(&claim->backbone_gw->crc_lock); 742 - batadv_backbone_gw_put(claim->backbone_gw); 721 + remove_crc = true; 743 722 } 744 - /* set (new) backbone gw */ 723 + 724 + /* replace backbone_gw atomically and adjust reference counters */ 725 + spin_lock_bh(&claim->backbone_lock); 726 + old_backbone_gw = claim->backbone_gw; 745 727 kref_get(&backbone_gw->refcount); 746 728 claim->backbone_gw = backbone_gw; 729 + spin_unlock_bh(&claim->backbone_lock); 747 730 731 + if (remove_crc) { 732 + /* remove claim address from old backbone_gw */ 733 + spin_lock_bh(&old_backbone_gw->crc_lock); 734 + old_backbone_gw->crc ^= crc16(0, claim->addr, ETH_ALEN); 735 + spin_unlock_bh(&old_backbone_gw->crc_lock); 736 + } 737 + 738 + batadv_backbone_gw_put(old_backbone_gw); 739 + 740 + /* add claim address to new backbone_gw */ 748 741 spin_lock_bh(&backbone_gw->crc_lock); 749 742 backbone_gw->crc ^= crc16(0, claim->addr, ETH_ALEN); 750 743 spin_unlock_bh(&backbone_gw->crc_lock); ··· 763 734 764 735 claim_free_ref: 765 736 batadv_claim_put(claim); 737 + } 738 + 739 + /** 740 + * batadv_bla_claim_get_backbone_gw - Get valid reference for backbone_gw of 741 + * claim 742 + * @claim: claim whose backbone_gw should be returned 743 + * 744 + * Return: valid reference to claim::backbone_gw 745 + */ 746 + static struct batadv_bla_backbone_gw * 747 + batadv_bla_claim_get_backbone_gw(struct batadv_bla_claim *claim) 748 + { 749 + struct batadv_bla_backbone_gw *backbone_gw; 750 + 751 + spin_lock_bh(&claim->backbone_lock); 752 + backbone_gw = claim->backbone_gw; 753 + kref_get(&backbone_gw->refcount); 754 + spin_unlock_bh(&claim->backbone_lock); 755 + 756 + return backbone_gw; 766 757 } 767 758 768 759 /** ··· 808 759 batadv_hash_remove(bat_priv->bla.claim_hash, batadv_compare_claim, 809 760 batadv_choose_claim, claim); 810 761 batadv_claim_put(claim); /* reference from the hash is gone */ 811 - 812 - spin_lock_bh(&claim->backbone_gw->crc_lock); 813 - claim->backbone_gw->crc ^= crc16(0, claim->addr, ETH_ALEN); 814 - spin_unlock_bh(&claim->backbone_gw->crc_lock); 815 762 816 763 /* don't need the reference from hash_find() anymore */ 817 764 batadv_claim_put(claim); ··· 1261 1216 struct batadv_hard_iface *primary_if, 1262 1217 int now) 1263 1218 { 1219 + struct batadv_bla_backbone_gw *backbone_gw; 1264 1220 struct batadv_bla_claim *claim; 1265 1221 struct hlist_head *head; 1266 1222 struct batadv_hashtable *hash; ··· 1276 1230 1277 1231 rcu_read_lock(); 1278 1232 hlist_for_each_entry_rcu(claim, head, hash_entry) { 1233 + backbone_gw = batadv_bla_claim_get_backbone_gw(claim); 1279 1234 if (now) 1280 1235 goto purge_now; 1281 - if (!batadv_compare_eth(claim->backbone_gw->orig, 1236 + 1237 + if (!batadv_compare_eth(backbone_gw->orig, 1282 1238 primary_if->net_dev->dev_addr)) 1283 - continue; 1239 + goto skip; 1240 + 1284 1241 if (!batadv_has_timed_out(claim->lasttime, 1285 1242 BATADV_BLA_CLAIM_TIMEOUT)) 1286 - continue; 1243 + goto skip; 1287 1244 1288 1245 batadv_dbg(BATADV_DBG_BLA, bat_priv, 1289 1246 "bla_purge_claims(): %pM, vid %d, time out\n", ··· 1294 1245 1295 1246 purge_now: 1296 1247 batadv_handle_unclaim(bat_priv, primary_if, 1297 - claim->backbone_gw->orig, 1248 + backbone_gw->orig, 1298 1249 claim->addr, claim->vid); 1250 + skip: 1251 + batadv_backbone_gw_put(backbone_gw); 1299 1252 } 1300 1253 rcu_read_unlock(); 1301 1254 } ··· 1808 1757 bool batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb, 1809 1758 unsigned short vid, bool is_bcast) 1810 1759 { 1760 + struct batadv_bla_backbone_gw *backbone_gw; 1811 1761 struct ethhdr *ethhdr; 1812 1762 struct batadv_bla_claim search_claim, *claim = NULL; 1813 1763 struct batadv_hard_iface *primary_if; 1764 + bool own_claim; 1814 1765 bool ret; 1815 1766 1816 1767 ethhdr = eth_hdr(skb); ··· 1847 1794 } 1848 1795 1849 1796 /* if it is our own claim ... */ 1850 - if (batadv_compare_eth(claim->backbone_gw->orig, 1851 - primary_if->net_dev->dev_addr)) { 1797 + backbone_gw = batadv_bla_claim_get_backbone_gw(claim); 1798 + own_claim = batadv_compare_eth(backbone_gw->orig, 1799 + primary_if->net_dev->dev_addr); 1800 + batadv_backbone_gw_put(backbone_gw); 1801 + 1802 + if (own_claim) { 1852 1803 /* ... allow it in any case */ 1853 1804 claim->lasttime = jiffies; 1854 1805 goto allow; ··· 1916 1859 { 1917 1860 struct ethhdr *ethhdr; 1918 1861 struct batadv_bla_claim search_claim, *claim = NULL; 1862 + struct batadv_bla_backbone_gw *backbone_gw; 1919 1863 struct batadv_hard_iface *primary_if; 1864 + bool client_roamed; 1920 1865 bool ret = false; 1921 1866 1922 1867 primary_if = batadv_primary_if_get_selected(bat_priv); ··· 1948 1889 goto allow; 1949 1890 1950 1891 /* check if we are responsible. */ 1951 - if (batadv_compare_eth(claim->backbone_gw->orig, 1952 - primary_if->net_dev->dev_addr)) { 1892 + backbone_gw = batadv_bla_claim_get_backbone_gw(claim); 1893 + client_roamed = batadv_compare_eth(backbone_gw->orig, 1894 + primary_if->net_dev->dev_addr); 1895 + batadv_backbone_gw_put(backbone_gw); 1896 + 1897 + if (client_roamed) { 1953 1898 /* if yes, the client has roamed and we have 1954 1899 * to unclaim it. 1955 1900 */ ··· 2001 1938 struct net_device *net_dev = (struct net_device *)seq->private; 2002 1939 struct batadv_priv *bat_priv = netdev_priv(net_dev); 2003 1940 struct batadv_hashtable *hash = bat_priv->bla.claim_hash; 1941 + struct batadv_bla_backbone_gw *backbone_gw; 2004 1942 struct batadv_bla_claim *claim; 2005 1943 struct batadv_hard_iface *primary_if; 2006 1944 struct hlist_head *head; ··· 2026 1962 2027 1963 rcu_read_lock(); 2028 1964 hlist_for_each_entry_rcu(claim, head, hash_entry) { 2029 - is_own = batadv_compare_eth(claim->backbone_gw->orig, 1965 + backbone_gw = batadv_bla_claim_get_backbone_gw(claim); 1966 + 1967 + is_own = batadv_compare_eth(backbone_gw->orig, 2030 1968 primary_addr); 2031 1969 2032 - spin_lock_bh(&claim->backbone_gw->crc_lock); 2033 - backbone_crc = claim->backbone_gw->crc; 2034 - spin_unlock_bh(&claim->backbone_gw->crc_lock); 1970 + spin_lock_bh(&backbone_gw->crc_lock); 1971 + backbone_crc = backbone_gw->crc; 1972 + spin_unlock_bh(&backbone_gw->crc_lock); 2035 1973 seq_printf(seq, " * %pM on %5d by %pM [%c] (%#.4x)\n", 2036 1974 claim->addr, BATADV_PRINT_VID(claim->vid), 2037 - claim->backbone_gw->orig, 1975 + backbone_gw->orig, 2038 1976 (is_own ? 'x' : ' '), 2039 1977 backbone_crc); 1978 + 1979 + batadv_backbone_gw_put(backbone_gw); 2040 1980 } 2041 1981 rcu_read_unlock(); 2042 1982 }
+8 -2
net/batman-adv/distributed-arp-table.c
··· 1009 1009 if (!skb_new) 1010 1010 goto out; 1011 1011 1012 - if (vid & BATADV_VLAN_HAS_TAG) 1012 + if (vid & BATADV_VLAN_HAS_TAG) { 1013 1013 skb_new = vlan_insert_tag(skb_new, htons(ETH_P_8021Q), 1014 1014 vid & VLAN_VID_MASK); 1015 + if (!skb_new) 1016 + goto out; 1017 + } 1015 1018 1016 1019 skb_reset_mac_header(skb_new); 1017 1020 skb_new->protocol = eth_type_trans(skb_new, ··· 1092 1089 */ 1093 1090 skb_reset_mac_header(skb_new); 1094 1091 1095 - if (vid & BATADV_VLAN_HAS_TAG) 1092 + if (vid & BATADV_VLAN_HAS_TAG) { 1096 1093 skb_new = vlan_insert_tag(skb_new, htons(ETH_P_8021Q), 1097 1094 vid & VLAN_VID_MASK); 1095 + if (!skb_new) 1096 + goto out; 1097 + } 1098 1098 1099 1099 /* To preserve backwards compatibility, the node has choose the outgoing 1100 1100 * format based on the incoming request packet type. The assumption is
+15
net/batman-adv/originator.c
··· 765 765 struct batadv_neigh_node *neigh_node; 766 766 struct batadv_orig_node *orig_node; 767 767 struct batadv_orig_ifinfo *orig_ifinfo; 768 + struct batadv_orig_node_vlan *vlan; 769 + struct batadv_orig_ifinfo *last_candidate; 768 770 769 771 orig_node = container_of(ref, struct batadv_orig_node, refcount); 770 772 ··· 784 782 hlist_del_rcu(&orig_ifinfo->list); 785 783 batadv_orig_ifinfo_put(orig_ifinfo); 786 784 } 785 + 786 + last_candidate = orig_node->last_bonding_candidate; 787 + orig_node->last_bonding_candidate = NULL; 787 788 spin_unlock_bh(&orig_node->neigh_list_lock); 789 + 790 + if (last_candidate) 791 + batadv_orig_ifinfo_put(last_candidate); 792 + 793 + spin_lock_bh(&orig_node->vlan_list_lock); 794 + hlist_for_each_entry_safe(vlan, node_tmp, &orig_node->vlan_list, list) { 795 + hlist_del_rcu(&vlan->list); 796 + batadv_orig_node_vlan_put(vlan); 797 + } 798 + spin_unlock_bh(&orig_node->vlan_list_lock); 788 799 789 800 /* Free nc_nodes */ 790 801 batadv_nc_purge_orig(orig_node->bat_priv, orig_node, NULL);
+39 -13
net/batman-adv/routing.c
··· 456 456 } 457 457 458 458 /** 459 + * batadv_last_bonding_replace - Replace last_bonding_candidate of orig_node 460 + * @orig_node: originator node whose bonding candidates should be replaced 461 + * @new_candidate: new bonding candidate or NULL 462 + */ 463 + static void 464 + batadv_last_bonding_replace(struct batadv_orig_node *orig_node, 465 + struct batadv_orig_ifinfo *new_candidate) 466 + { 467 + struct batadv_orig_ifinfo *old_candidate; 468 + 469 + spin_lock_bh(&orig_node->neigh_list_lock); 470 + old_candidate = orig_node->last_bonding_candidate; 471 + 472 + if (new_candidate) 473 + kref_get(&new_candidate->refcount); 474 + orig_node->last_bonding_candidate = new_candidate; 475 + spin_unlock_bh(&orig_node->neigh_list_lock); 476 + 477 + if (old_candidate) 478 + batadv_orig_ifinfo_put(old_candidate); 479 + } 480 + 481 + /** 459 482 * batadv_find_router - find a suitable router for this originator 460 483 * @bat_priv: the bat priv with all the soft interface information 461 484 * @orig_node: the destination node ··· 585 562 } 586 563 rcu_read_unlock(); 587 564 588 - /* last_bonding_candidate is reset below, remove the old reference. */ 589 - if (orig_node->last_bonding_candidate) 590 - batadv_orig_ifinfo_put(orig_node->last_bonding_candidate); 591 - 592 565 /* After finding candidates, handle the three cases: 593 566 * 1) there is a next candidate, use that 594 567 * 2) there is no next candidate, use the first of the list ··· 593 574 if (next_candidate) { 594 575 batadv_neigh_node_put(router); 595 576 596 - /* remove references to first candidate, we don't need it. */ 597 - if (first_candidate) { 598 - batadv_neigh_node_put(first_candidate_router); 599 - batadv_orig_ifinfo_put(first_candidate); 600 - } 577 + kref_get(&next_candidate_router->refcount); 601 578 router = next_candidate_router; 602 - orig_node->last_bonding_candidate = next_candidate; 579 + batadv_last_bonding_replace(orig_node, next_candidate); 603 580 } else if (first_candidate) { 604 581 batadv_neigh_node_put(router); 605 582 606 - /* refcounting has already been done in the loop above. */ 583 + kref_get(&first_candidate_router->refcount); 607 584 router = first_candidate_router; 608 - orig_node->last_bonding_candidate = first_candidate; 585 + batadv_last_bonding_replace(orig_node, first_candidate); 609 586 } else { 610 - orig_node->last_bonding_candidate = NULL; 587 + batadv_last_bonding_replace(orig_node, NULL); 588 + } 589 + 590 + /* cleanup of candidates */ 591 + if (first_candidate) { 592 + batadv_neigh_node_put(first_candidate_router); 593 + batadv_orig_ifinfo_put(first_candidate); 594 + } 595 + 596 + if (next_candidate) { 597 + batadv_neigh_node_put(next_candidate_router); 598 + batadv_orig_ifinfo_put(next_candidate); 611 599 } 612 600 613 601 return router;
+2 -2
net/batman-adv/send.c
··· 424 424 struct batadv_orig_node *orig_node; 425 425 426 426 orig_node = batadv_gw_get_selected_orig(bat_priv); 427 - return batadv_send_skb_unicast(bat_priv, skb, BATADV_UNICAST, 0, 428 - orig_node, vid); 427 + return batadv_send_skb_unicast(bat_priv, skb, BATADV_UNICAST_4ADDR, 428 + BATADV_P_DATA, orig_node, vid); 429 429 } 430 430 431 431 void batadv_schedule_bat_ogm(struct batadv_hard_iface *hard_iface)
+5 -1
net/batman-adv/types.h
··· 330 330 DECLARE_BITMAP(bcast_bits, BATADV_TQ_LOCAL_WINDOW_SIZE); 331 331 u32 last_bcast_seqno; 332 332 struct hlist_head neigh_list; 333 - /* neigh_list_lock protects: neigh_list and router */ 333 + /* neigh_list_lock protects: neigh_list, ifinfo_list, 334 + * last_bonding_candidate and router 335 + */ 334 336 spinlock_t neigh_list_lock; 335 337 struct hlist_node hash_entry; 336 338 struct batadv_priv *bat_priv; ··· 1044 1042 * @addr: mac address of claimed non-mesh client 1045 1043 * @vid: vlan id this client was detected on 1046 1044 * @backbone_gw: pointer to backbone gw claiming this client 1045 + * @backbone_lock: lock protecting backbone_gw pointer 1047 1046 * @lasttime: last time we heard of claim (locals only) 1048 1047 * @hash_entry: hlist node for batadv_priv_bla::claim_hash 1049 1048 * @refcount: number of contexts the object is used ··· 1054 1051 u8 addr[ETH_ALEN]; 1055 1052 unsigned short vid; 1056 1053 struct batadv_bla_backbone_gw *backbone_gw; 1054 + spinlock_t backbone_lock; /* protects backbone_gw */ 1057 1055 unsigned long lasttime; 1058 1056 struct hlist_node hash_entry; 1059 1057 struct rcu_head rcu;
+5 -5
net/core/filter.c
··· 53 53 #include <net/sock_reuseport.h> 54 54 55 55 /** 56 - * sk_filter - run a packet through a socket filter 56 + * sk_filter_trim_cap - run a packet through a socket filter 57 57 * @sk: sock associated with &sk_buff 58 58 * @skb: buffer to filter 59 + * @cap: limit on how short the eBPF program may trim the packet 59 60 * 60 61 * Run the eBPF program and then cut skb->data to correct size returned by 61 62 * the program. If pkt_len is 0 we toss packet. If skb->len is smaller ··· 65 64 * be accepted or -EPERM if the packet should be tossed. 66 65 * 67 66 */ 68 - int sk_filter(struct sock *sk, struct sk_buff *skb) 67 + int sk_filter_trim_cap(struct sock *sk, struct sk_buff *skb, unsigned int cap) 69 68 { 70 69 int err; 71 70 struct sk_filter *filter; ··· 86 85 filter = rcu_dereference(sk->sk_filter); 87 86 if (filter) { 88 87 unsigned int pkt_len = bpf_prog_run_save_cb(filter->prog, skb); 89 - 90 - err = pkt_len ? pskb_trim(skb, pkt_len) : -EPERM; 88 + err = pkt_len ? pskb_trim(skb, max(cap, pkt_len)) : -EPERM; 91 89 } 92 90 rcu_read_unlock(); 93 91 94 92 return err; 95 93 } 96 - EXPORT_SYMBOL(sk_filter); 94 + EXPORT_SYMBOL(sk_filter_trim_cap); 97 95 98 96 static u64 __skb_get_pay_offset(u64 ctx, u64 a, u64 x, u64 r4, u64 r5) 99 97 {
+8 -3
net/core/sock.c
··· 452 452 } 453 453 EXPORT_SYMBOL(sock_queue_rcv_skb); 454 454 455 - int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested) 455 + int __sk_receive_skb(struct sock *sk, struct sk_buff *skb, 456 + const int nested, unsigned int trim_cap) 456 457 { 457 458 int rc = NET_RX_SUCCESS; 458 459 459 - if (sk_filter(sk, skb)) 460 + if (sk_filter_trim_cap(sk, skb, trim_cap)) 460 461 goto discard_and_relse; 461 462 462 463 skb->dev = NULL; ··· 493 492 kfree_skb(skb); 494 493 goto out; 495 494 } 496 - EXPORT_SYMBOL(sk_receive_skb); 495 + EXPORT_SYMBOL(__sk_receive_skb); 497 496 498 497 struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie) 499 498 { ··· 1938 1937 1939 1938 sockc->tsflags &= ~SOF_TIMESTAMPING_TX_RECORD_MASK; 1940 1939 sockc->tsflags |= tsflags; 1940 + break; 1941 + /* SCM_RIGHTS and SCM_CREDENTIALS are semantically in SOL_UNIX. */ 1942 + case SCM_RIGHTS: 1943 + case SCM_CREDENTIALS: 1941 1944 break; 1942 1945 default: 1943 1946 return -EINVAL;
+7 -5
net/dccp/ipv4.c
··· 462 462 security_skb_classify_flow(skb, flowi4_to_flowi(&fl4)); 463 463 rt = ip_route_output_flow(net, &fl4, sk); 464 464 if (IS_ERR(rt)) { 465 - __IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES); 465 + IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES); 466 466 return NULL; 467 467 } 468 468 ··· 527 527 rxiph->daddr); 528 528 skb_dst_set(skb, dst_clone(dst)); 529 529 530 + local_bh_disable(); 530 531 bh_lock_sock(ctl_sk); 531 532 err = ip_build_and_send_pkt(skb, ctl_sk, 532 533 rxiph->daddr, rxiph->saddr, NULL); 533 534 bh_unlock_sock(ctl_sk); 534 535 535 536 if (net_xmit_eval(err) == 0) { 536 - DCCP_INC_STATS(DCCP_MIB_OUTSEGS); 537 - DCCP_INC_STATS(DCCP_MIB_OUTRSTS); 537 + __DCCP_INC_STATS(DCCP_MIB_OUTSEGS); 538 + __DCCP_INC_STATS(DCCP_MIB_OUTRSTS); 538 539 } 540 + local_bh_enable(); 539 541 out: 540 - dst_release(dst); 542 + dst_release(dst); 541 543 } 542 544 543 545 static void dccp_v4_reqsk_destructor(struct request_sock *req) ··· 868 866 goto discard_and_relse; 869 867 nf_reset(skb); 870 868 871 - return sk_receive_skb(sk, skb, 1); 869 + return __sk_receive_skb(sk, skb, 1, dh->dccph_doff * 4); 872 870 873 871 no_dccp_socket: 874 872 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
+1 -1
net/dccp/ipv6.c
··· 732 732 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) 733 733 goto discard_and_relse; 734 734 735 - return sk_receive_skb(sk, skb, 1) ? -1 : 0; 735 + return __sk_receive_skb(sk, skb, 1, dh->dccph_doff * 4) ? -1 : 0; 736 736 737 737 no_dccp_socket: 738 738 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
+6
net/ipv4/fib_semantics.c
··· 479 479 if (!rtnh_ok(rtnh, remaining)) 480 480 return -EINVAL; 481 481 482 + if (rtnh->rtnh_flags & (RTNH_F_DEAD | RTNH_F_LINKDOWN)) 483 + return -EINVAL; 484 + 482 485 nexthop_nh->nh_flags = 483 486 (cfg->fc_flags & ~0xFF) | rtnh->rtnh_flags; 484 487 nexthop_nh->nh_oif = rtnh->rtnh_ifindex; ··· 1004 1001 1005 1002 /* Fast check to catch the most weird cases */ 1006 1003 if (fib_props[cfg->fc_type].scope > cfg->fc_scope) 1004 + goto err_inval; 1005 + 1006 + if (cfg->fc_flags & (RTNH_F_DEAD | RTNH_F_LINKDOWN)) 1007 1007 goto err_inval; 1008 1008 1009 1009 #ifdef CONFIG_IP_ROUTE_MULTIPATH
+32 -22
net/ipv4/tcp_input.c
··· 87 87 EXPORT_SYMBOL(sysctl_tcp_adv_win_scale); 88 88 89 89 /* rfc5961 challenge ack rate limiting */ 90 - int sysctl_tcp_challenge_ack_limit = 100; 90 + int sysctl_tcp_challenge_ack_limit = 1000; 91 91 92 92 int sysctl_tcp_stdurg __read_mostly; 93 93 int sysctl_tcp_rfc1337 __read_mostly; ··· 3421 3421 return flag; 3422 3422 } 3423 3423 3424 + static bool __tcp_oow_rate_limited(struct net *net, int mib_idx, 3425 + u32 *last_oow_ack_time) 3426 + { 3427 + if (*last_oow_ack_time) { 3428 + s32 elapsed = (s32)(tcp_time_stamp - *last_oow_ack_time); 3429 + 3430 + if (0 <= elapsed && elapsed < sysctl_tcp_invalid_ratelimit) { 3431 + NET_INC_STATS(net, mib_idx); 3432 + return true; /* rate-limited: don't send yet! */ 3433 + } 3434 + } 3435 + 3436 + *last_oow_ack_time = tcp_time_stamp; 3437 + 3438 + return false; /* not rate-limited: go ahead, send dupack now! */ 3439 + } 3440 + 3424 3441 /* Return true if we're currently rate-limiting out-of-window ACKs and 3425 3442 * thus shouldn't send a dupack right now. We rate-limit dupacks in 3426 3443 * response to out-of-window SYNs or ACKs to mitigate ACK loops or DoS ··· 3451 3434 /* Data packets without SYNs are not likely part of an ACK loop. */ 3452 3435 if ((TCP_SKB_CB(skb)->seq != TCP_SKB_CB(skb)->end_seq) && 3453 3436 !tcp_hdr(skb)->syn) 3454 - goto not_rate_limited; 3437 + return false; 3455 3438 3456 - if (*last_oow_ack_time) { 3457 - s32 elapsed = (s32)(tcp_time_stamp - *last_oow_ack_time); 3458 - 3459 - if (0 <= elapsed && elapsed < sysctl_tcp_invalid_ratelimit) { 3460 - NET_INC_STATS(net, mib_idx); 3461 - return true; /* rate-limited: don't send yet! */ 3462 - } 3463 - } 3464 - 3465 - *last_oow_ack_time = tcp_time_stamp; 3466 - 3467 - not_rate_limited: 3468 - return false; /* not rate-limited: go ahead, send dupack now! */ 3439 + return __tcp_oow_rate_limited(net, mib_idx, last_oow_ack_time); 3469 3440 } 3470 3441 3471 3442 /* RFC 5961 7 [ACK Throttling] */ ··· 3463 3458 static u32 challenge_timestamp; 3464 3459 static unsigned int challenge_count; 3465 3460 struct tcp_sock *tp = tcp_sk(sk); 3466 - u32 now; 3461 + u32 count, now; 3467 3462 3468 3463 /* First check our per-socket dupack rate limit. */ 3469 - if (tcp_oow_rate_limited(sock_net(sk), skb, 3470 - LINUX_MIB_TCPACKSKIPPEDCHALLENGE, 3471 - &tp->last_oow_ack_time)) 3464 + if (__tcp_oow_rate_limited(sock_net(sk), 3465 + LINUX_MIB_TCPACKSKIPPEDCHALLENGE, 3466 + &tp->last_oow_ack_time)) 3472 3467 return; 3473 3468 3474 - /* Then check the check host-wide RFC 5961 rate limit. */ 3469 + /* Then check host-wide RFC 5961 rate limit. */ 3475 3470 now = jiffies / HZ; 3476 3471 if (now != challenge_timestamp) { 3472 + u32 half = (sysctl_tcp_challenge_ack_limit + 1) >> 1; 3473 + 3477 3474 challenge_timestamp = now; 3478 - challenge_count = 0; 3475 + WRITE_ONCE(challenge_count, half + 3476 + prandom_u32_max(sysctl_tcp_challenge_ack_limit)); 3479 3477 } 3480 - if (++challenge_count <= sysctl_tcp_challenge_ack_limit) { 3478 + count = READ_ONCE(challenge_count); 3479 + if (count > 0) { 3480 + WRITE_ONCE(challenge_count, count - 1); 3481 3481 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPCHALLENGEACK); 3482 3482 tcp_send_ack(sk); 3483 3483 }
+2
net/ipv4/udp.c
··· 1583 1583 1584 1584 if (sk_filter(sk, skb)) 1585 1585 goto drop; 1586 + if (unlikely(skb->len < sizeof(struct udphdr))) 1587 + goto drop; 1586 1588 1587 1589 udp_csum_pull_header(skb); 1588 1590 if (sk_rcvqueues_full(sk, sk->sk_rcvbuf)) {
+2
net/ipv6/udp.c
··· 620 620 621 621 if (sk_filter(sk, skb)) 622 622 goto drop; 623 + if (unlikely(skb->len < sizeof(struct udphdr))) 624 + goto drop; 623 625 624 626 udp_csum_pull_header(skb); 625 627 if (sk_rcvqueues_full(sk, sk->sk_rcvbuf)) {
+4 -2
net/netfilter/ipvs/ip_vs_sync.c
··· 1545 1545 /* 1546 1546 * Set up receiving multicast socket over UDP 1547 1547 */ 1548 - static struct socket *make_receive_sock(struct netns_ipvs *ipvs, int id) 1548 + static struct socket *make_receive_sock(struct netns_ipvs *ipvs, int id, 1549 + int ifindex) 1549 1550 { 1550 1551 /* multicast addr */ 1551 1552 union ipvs_sockaddr mcast_addr; ··· 1567 1566 set_sock_size(sock->sk, 0, result); 1568 1567 1569 1568 get_mcast_sockaddr(&mcast_addr, &salen, &ipvs->bcfg, id); 1569 + sock->sk->sk_bound_dev_if = ifindex; 1570 1570 result = sock->ops->bind(sock, (struct sockaddr *)&mcast_addr, salen); 1571 1571 if (result < 0) { 1572 1572 pr_err("Error binding to the multicast addr\n"); ··· 1870 1868 if (state == IP_VS_STATE_MASTER) 1871 1869 sock = make_send_sock(ipvs, id); 1872 1870 else 1873 - sock = make_receive_sock(ipvs, id); 1871 + sock = make_receive_sock(ipvs, id, dev->ifindex); 1874 1872 if (IS_ERR(sock)) { 1875 1873 result = PTR_ERR(sock); 1876 1874 goto outtinfo;
+8
net/netfilter/nf_conntrack_core.c
··· 646 646 647 647 l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct)); 648 648 if (l4proto->allow_clash && 649 + !nfct_nat(ct) && 649 650 !nf_ct_is_dying(ct) && 650 651 atomic_inc_not_zero(&ct->ct_general.use)) { 651 652 nf_ct_acct_merge(ct, ctinfo, (struct nf_conn *)skb->nfct); ··· 1602 1601 unsigned int nr_slots, i; 1603 1602 size_t sz; 1604 1603 1604 + if (*sizep > (UINT_MAX / sizeof(struct hlist_nulls_head))) 1605 + return NULL; 1606 + 1605 1607 BUILD_BUG_ON(sizeof(struct hlist_nulls_head) != sizeof(struct hlist_head)); 1606 1608 nr_slots = *sizep = roundup(*sizep, PAGE_SIZE / sizeof(struct hlist_nulls_head)); 1609 + 1610 + if (nr_slots > (UINT_MAX / sizeof(struct hlist_nulls_head))) 1611 + return NULL; 1612 + 1607 1613 sz = nr_slots * sizeof(struct hlist_nulls_head); 1608 1614 hash = (void *)__get_free_pages(GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO, 1609 1615 get_order(sz));
+3 -1
net/netfilter/nf_tables_api.c
··· 1724 1724 1725 1725 err = nf_tables_newexpr(ctx, &info, expr); 1726 1726 if (err < 0) 1727 - goto err2; 1727 + goto err3; 1728 1728 1729 1729 return expr; 1730 + err3: 1731 + kfree(expr); 1730 1732 err2: 1731 1733 module_put(info.ops->type->owner); 1732 1734 err1:
+1 -5
net/netfilter/nft_ct.c
··· 54 54 const struct nf_conn_help *help; 55 55 const struct nf_conntrack_tuple *tuple; 56 56 const struct nf_conntrack_helper *helper; 57 - long diff; 58 57 unsigned int state; 59 58 60 59 ct = nf_ct_get(pkt->skb, &ctinfo); ··· 93 94 return; 94 95 #endif 95 96 case NFT_CT_EXPIRATION: 96 - diff = (long)jiffies - (long)ct->timeout.expires; 97 - if (diff < 0) 98 - diff = 0; 99 - *dest = jiffies_to_msecs(diff); 97 + *dest = jiffies_to_msecs(nf_ct_expires(ct)); 100 98 return; 101 99 case NFT_CT_HELPER: 102 100 if (ct->master == NULL)
+1 -1
net/netfilter/nft_meta.c
··· 227 227 skb->pkt_type = value; 228 228 break; 229 229 case NFT_META_NFTRACE: 230 - skb->nf_trace = 1; 230 + skb->nf_trace = !!value; 231 231 break; 232 232 default: 233 233 WARN_ON(1);
+4 -6
net/packet/af_packet.c
··· 1927 1927 goto out_unlock; 1928 1928 } 1929 1929 1930 - sockc.tsflags = 0; 1930 + sockc.tsflags = sk->sk_tsflags; 1931 1931 if (msg->msg_controllen) { 1932 1932 err = sock_cmsg_send(sk, msg, &sockc); 1933 - if (unlikely(err)) { 1934 - err = -EINVAL; 1933 + if (unlikely(err)) 1935 1934 goto out_unlock; 1936 - } 1937 1935 } 1938 1936 1939 1937 skb->protocol = proto; ··· 2676 2678 dev = dev_get_by_index(sock_net(&po->sk), saddr->sll_ifindex); 2677 2679 } 2678 2680 2679 - sockc.tsflags = 0; 2681 + sockc.tsflags = po->sk.sk_tsflags; 2680 2682 if (msg->msg_controllen) { 2681 2683 err = sock_cmsg_send(&po->sk, msg, &sockc); 2682 2684 if (unlikely(err)) ··· 2879 2881 if (unlikely(!(dev->flags & IFF_UP))) 2880 2882 goto out_unlock; 2881 2883 2882 - sockc.tsflags = 0; 2884 + sockc.tsflags = sk->sk_tsflags; 2883 2885 sockc.mark = sk->sk_mark; 2884 2886 if (msg->msg_controllen) { 2885 2887 err = sock_cmsg_send(sk, msg, &sockc);
+2 -1
net/rose/rose_in.c
··· 164 164 rose_frames_acked(sk, nr); 165 165 if (ns == rose->vr) { 166 166 rose_start_idletimer(sk); 167 - if (sock_queue_rcv_skb(sk, skb) == 0) { 167 + if (sk_filter_trim_cap(sk, skb, ROSE_MIN_LEN) == 0 && 168 + __sock_queue_rcv_skb(sk, skb) == 0) { 168 169 rose->vr = (rose->vr + 1) % ROSE_MODULUS; 169 170 queued = 1; 170 171 } else {
+4 -2
net/sched/sch_htb.c
··· 1140 1140 1141 1141 if (!cl->level && cl->un.leaf.q) 1142 1142 qlen = cl->un.leaf.q->q.qlen; 1143 - cl->xstats.tokens = PSCHED_NS2TICKS(cl->tokens); 1144 - cl->xstats.ctokens = PSCHED_NS2TICKS(cl->ctokens); 1143 + cl->xstats.tokens = clamp_t(s64, PSCHED_NS2TICKS(cl->tokens), 1144 + INT_MIN, INT_MAX); 1145 + cl->xstats.ctokens = clamp_t(s64, PSCHED_NS2TICKS(cl->ctokens), 1146 + INT_MIN, INT_MAX); 1145 1147 1146 1148 if (gnet_stats_copy_basic(d, NULL, &cl->bstats) < 0 || 1147 1149 gnet_stats_copy_rate_est(d, NULL, &cl->rate_est) < 0 ||
+1 -4
net/sctp/input.c
··· 112 112 struct sctp_ep_common *rcvr; 113 113 struct sctp_transport *transport = NULL; 114 114 struct sctp_chunk *chunk; 115 - struct sctphdr *sh; 116 115 union sctp_addr src; 117 116 union sctp_addr dest; 118 117 int family; ··· 125 126 126 127 if (skb_linearize(skb)) 127 128 goto discard_it; 128 - 129 - sh = sctp_hdr(skb); 130 129 131 130 /* Pull up the IP and SCTP headers. */ 132 131 __skb_pull(skb, skb_transport_offset(skb)); ··· 227 230 chunk->rcvr = rcvr; 228 231 229 232 /* Remember the SCTP header. */ 230 - chunk->sctp_hdr = sh; 233 + chunk->sctp_hdr = sctp_hdr(skb); 231 234 232 235 /* Set the source and destination addresses of the incoming chunk. */ 233 236 sctp_init_addrs(chunk, &src, &dest);
+15
net/tipc/bearer.c
··· 330 330 return 0; 331 331 } 332 332 333 + /* tipc_bearer_reset_all - reset all links on all bearers 334 + */ 335 + void tipc_bearer_reset_all(struct net *net) 336 + { 337 + struct tipc_net *tn = tipc_net(net); 338 + struct tipc_bearer *b; 339 + int i; 340 + 341 + for (i = 0; i < MAX_BEARERS; i++) { 342 + b = rcu_dereference_rtnl(tn->bearer_list[i]); 343 + if (b) 344 + tipc_reset_bearer(net, b); 345 + } 346 + } 347 + 333 348 /** 334 349 * bearer_disable 335 350 *
+1
net/tipc/bearer.h
··· 198 198 void tipc_bearer_remove_dest(struct net *net, u32 bearer_id, u32 dest); 199 199 struct tipc_bearer *tipc_bearer_find(struct net *net, const char *name); 200 200 struct tipc_media *tipc_media_find(const char *name); 201 + void tipc_bearer_reset_all(struct net *net); 201 202 int tipc_bearer_setup(void); 202 203 void tipc_bearer_cleanup(void); 203 204 void tipc_bearer_stop(struct net *net);
+8 -1
net/tipc/link.c
··· 349 349 u16 ack = snd_l->snd_nxt - 1; 350 350 351 351 snd_l->ackers--; 352 + rcv_l->bc_peer_is_up = true; 353 + rcv_l->state = LINK_ESTABLISHED; 352 354 tipc_link_bc_ack_rcv(rcv_l, ack, xmitq); 353 355 tipc_link_reset(rcv_l); 354 356 rcv_l->state = LINK_RESET; ··· 1561 1559 if (!msg_peer_node_is_up(hdr)) 1562 1560 return; 1563 1561 1564 - l->bc_peer_is_up = true; 1562 + /* Open when peer ackowledges our bcast init msg (pkt #1) */ 1563 + if (msg_ack(hdr)) 1564 + l->bc_peer_is_up = true; 1565 + 1566 + if (!l->bc_peer_is_up) 1567 + return; 1565 1568 1566 1569 /* Ignore if peers_snd_nxt goes beyond receive window */ 1567 1570 if (more(peers_snd_nxt, l->rcv_nxt + l->window))
+11 -4
net/tipc/node.c
··· 1297 1297 1298 1298 rc = tipc_bcast_rcv(net, be->link, skb); 1299 1299 1300 - /* Broadcast link reset may happen at reassembly failure */ 1301 - if (rc & TIPC_LINK_DOWN_EVT) 1302 - tipc_node_reset_links(n); 1303 - 1304 1300 /* Broadcast ACKs are sent on a unicast link */ 1305 1301 if (rc & TIPC_LINK_SND_BC_ACK) { 1306 1302 tipc_node_read_lock(n); ··· 1316 1320 spin_unlock_bh(&be->inputq2.lock); 1317 1321 tipc_sk_mcast_rcv(net, &be->arrvq, &be->inputq2); 1318 1322 } 1323 + 1324 + if (rc & TIPC_LINK_DOWN_EVT) { 1325 + /* Reception reassembly failure => reset all links to peer */ 1326 + if (!tipc_link_is_up(be->link)) 1327 + tipc_node_reset_links(n); 1328 + 1329 + /* Retransmission failure => reset all links to all peers */ 1330 + if (!tipc_link_is_up(tipc_bc_sndlink(net))) 1331 + tipc_bearer_reset_all(net); 1332 + } 1333 + 1319 1334 tipc_node_put(n); 1320 1335 } 1321 1336
+4 -4
net/wireless/nl80211.c
··· 3487 3487 params.smps_mode = NL80211_SMPS_OFF; 3488 3488 } 3489 3489 3490 + params.pbss = nla_get_flag(info->attrs[NL80211_ATTR_PBSS]); 3491 + if (params.pbss && !rdev->wiphy.bands[NL80211_BAND_60GHZ]) 3492 + return -EOPNOTSUPP; 3493 + 3490 3494 if (info->attrs[NL80211_ATTR_ACL_POLICY]) { 3491 3495 params.acl = parse_acl_data(&rdev->wiphy, info); 3492 3496 if (IS_ERR(params.acl)) 3493 3497 return PTR_ERR(params.acl); 3494 3498 } 3495 - 3496 - params.pbss = nla_get_flag(info->attrs[NL80211_ATTR_PBSS]); 3497 - if (params.pbss && !rdev->wiphy.bands[NL80211_BAND_60GHZ]) 3498 - return -EOPNOTSUPP; 3499 3499 3500 3500 wdev_lock(wdev); 3501 3501 err = rdev_start_ap(rdev, dev, &params);
+2
net/wireless/util.c
··· 721 721 * alignment since sizeof(struct ethhdr) is 14. 722 722 */ 723 723 frame = dev_alloc_skb(hlen + sizeof(struct ethhdr) + 2 + cur_len); 724 + if (!frame) 725 + return NULL; 724 726 725 727 skb_reserve(frame, hlen + sizeof(struct ethhdr) + 2); 726 728 skb_copy_bits(skb, offset, skb_put(frame, cur_len), cur_len);