Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

Pull networking fixes from David Miller:

1) Fix double-free in batman-adv, from Sven Eckelmann.

2) Fix packet stats for fast-RX path, from Joannes Berg.

3) Netfilter's ip_route_me_harder() doesn't handle request sockets
properly, fix from Florian Westphal.

4) Fix sendmsg deadlock in rxrpc, from David Howells.

5) Add missing RCU locking to transport hashtable scan, from Xin Long.

6) Fix potential packet loss in mlxsw driver, from Ido Schimmel.

7) Fix race in NAPI handling between poll handlers and busy polling,
from Eric Dumazet.

8) TX path in vxlan and geneve need proper RCU locking, from Jakub
Kicinski.

9) SYN processing in DCCP and TCP need to disable BH, from Eric
Dumazet.

10) Properly handle net_enable_timestamp() being invoked from IRQ
context, also from Eric Dumazet.

11) Fix crash on device-tree systems in xgene driver, from Alban Bedel.

12) Do not call sk_free() on a locked socket, from Arnaldo Carvalho de
Melo.

13) Fix use-after-free in netvsc driver, from Dexuan Cui.

14) Fix max MTU setting in bonding driver, from WANG Cong.

15) xen-netback hash table can be allocated from softirq context, so use
GFP_ATOMIC. From Anoob Soman.

16) Fix MAC address change bug in bgmac driver, from Hari Vyas.

17) strparser needs to destroy strp_wq on module exit, from WANG Cong.

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (69 commits)
strparser: destroy workqueue on module exit
sfc: fix IPID endianness in TSOv2
sfc: avoid max() in array size
rds: remove unnecessary returned value check
rxrpc: Fix potential NULL-pointer exception
nfp: correct DMA direction in XDP DMA sync
nfp: don't tell FW about the reserved buffer space
net: ethernet: bgmac: mac address change bug
net: ethernet: bgmac: init sequence bug
xen-netback: don't vfree() queues under spinlock
xen-netback: keep a local pointer for vif in backend_disconnect()
netfilter: nf_tables: don't call nfnetlink_set_err() if nfnetlink_send() fails
netfilter: nft_set_rbtree: incorrect assumption on lower interval lookups
netfilter: nf_conntrack_sip: fix wrong memory initialisation
can: flexcan: fix typo in comment
can: usb_8dev: Fix memory leak of priv->cmd_msg_buffer
can: gs_usb: fix coding style
can: gs_usb: Don't use stack memory for USB transfers
ixgbe: Limit use of 2K buffers on architectures with 256B or larger cache lines
ixgbe: update the rss key on h/w, when ethtool ask for it
...

+896 -369
+2 -3
MAINTAINERS
··· 6011 6011 F: include/uapi/linux/hsi/ 6012 6012 6013 6013 HSO 3G MODEM DRIVER 6014 - M: Jan Dumon <j.dumon@option.com> 6015 - W: http://www.pharscape.org 6016 - S: Maintained 6014 + L: linux-usb@vger.kernel.org 6015 + S: Orphan 6017 6016 F: drivers/net/usb/hso.c 6018 6017 6019 6018 HSR NETWORK PROTOCOL
+1
drivers/net/bonding/bond_main.c
··· 4179 4179 4180 4180 /* Initialize the device entry points */ 4181 4181 ether_setup(bond_dev); 4182 + bond_dev->max_mtu = ETH_MAX_MTU; 4182 4183 bond_dev->netdev_ops = &bond_netdev_ops; 4183 4184 bond_dev->ethtool_ops = &bond_ethtool_ops; 4184 4185
+1 -1
drivers/net/can/flexcan.c
··· 196 196 #define FLEXCAN_QUIRK_BROKEN_ERR_STATE BIT(1) /* [TR]WRN_INT not connected */ 197 197 #define FLEXCAN_QUIRK_DISABLE_RXFG BIT(2) /* Disable RX FIFO Global mask */ 198 198 #define FLEXCAN_QUIRK_ENABLE_EACEN_RRS BIT(3) /* Enable EACEN and RRS bit in ctrl2 */ 199 - #define FLEXCAN_QUIRK_DISABLE_MECR BIT(4) /* Disble Memory error detection */ 199 + #define FLEXCAN_QUIRK_DISABLE_MECR BIT(4) /* Disable Memory error detection */ 200 200 #define FLEXCAN_QUIRK_USE_OFF_TIMESTAMP BIT(5) /* Use timestamp based offloading */ 201 201 202 202 /* Structure of the message buffer */
+34 -17
drivers/net/can/usb/gs_usb.c
··· 258 258 rc = usb_control_msg(interface_to_usbdev(intf), 259 259 usb_sndctrlpipe(interface_to_usbdev(intf), 0), 260 260 GS_USB_BREQ_MODE, 261 - USB_DIR_OUT|USB_TYPE_VENDOR|USB_RECIP_INTERFACE, 261 + USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE, 262 262 gsdev->channel, 263 263 0, 264 264 dm, ··· 432 432 rc = usb_control_msg(interface_to_usbdev(intf), 433 433 usb_sndctrlpipe(interface_to_usbdev(intf), 0), 434 434 GS_USB_BREQ_BITTIMING, 435 - USB_DIR_OUT|USB_TYPE_VENDOR|USB_RECIP_INTERFACE, 435 + USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE, 436 436 dev->channel, 437 437 0, 438 438 dbt, ··· 545 545 sizeof(*hf), 546 546 hf, 547 547 urb->transfer_dma); 548 - 549 548 550 549 if (rc == -ENODEV) { 551 550 netif_device_detach(netdev); ··· 803 804 rc = usb_control_msg(interface_to_usbdev(intf), 804 805 usb_rcvctrlpipe(interface_to_usbdev(intf), 0), 805 806 GS_USB_BREQ_BT_CONST, 806 - USB_DIR_IN|USB_TYPE_VENDOR|USB_RECIP_INTERFACE, 807 + USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_INTERFACE, 807 808 channel, 808 809 0, 809 810 bt_const, ··· 907 908 struct gs_usb *dev; 908 909 int rc = -ENOMEM; 909 910 unsigned int icount, i; 910 - struct gs_host_config hconf = { 911 - .byte_order = 0x0000beef, 912 - }; 913 - struct gs_device_config dconf; 911 + struct gs_host_config *hconf; 912 + struct gs_device_config *dconf; 913 + 914 + hconf = kmalloc(sizeof(*hconf), GFP_KERNEL); 915 + if (!hconf) 916 + return -ENOMEM; 917 + 918 + hconf->byte_order = 0x0000beef; 914 919 915 920 /* send host config */ 916 921 rc = usb_control_msg(interface_to_usbdev(intf), 917 922 usb_sndctrlpipe(interface_to_usbdev(intf), 0), 918 923 GS_USB_BREQ_HOST_FORMAT, 919 - USB_DIR_OUT|USB_TYPE_VENDOR|USB_RECIP_INTERFACE, 924 + USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE, 920 925 1, 921 926 intf->altsetting[0].desc.bInterfaceNumber, 922 - &hconf, 923 - sizeof(hconf), 927 + hconf, 928 + sizeof(*hconf), 924 929 1000); 930 + 931 + kfree(hconf); 925 932 926 933 if (rc < 0) { 927 934 dev_err(&intf->dev, "Couldn't send data format (err=%d)\n", ··· 935 930 return rc; 936 931 } 937 932 933 + dconf = kmalloc(sizeof(*dconf), GFP_KERNEL); 934 + if (!dconf) 935 + return -ENOMEM; 936 + 938 937 /* read device config */ 939 938 rc = usb_control_msg(interface_to_usbdev(intf), 940 939 usb_rcvctrlpipe(interface_to_usbdev(intf), 0), 941 940 GS_USB_BREQ_DEVICE_CONFIG, 942 - USB_DIR_IN|USB_TYPE_VENDOR|USB_RECIP_INTERFACE, 941 + USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_INTERFACE, 943 942 1, 944 943 intf->altsetting[0].desc.bInterfaceNumber, 945 - &dconf, 946 - sizeof(dconf), 944 + dconf, 945 + sizeof(*dconf), 947 946 1000); 948 947 if (rc < 0) { 949 948 dev_err(&intf->dev, "Couldn't get device config: (err=%d)\n", 950 949 rc); 950 + kfree(dconf); 951 951 return rc; 952 952 } 953 953 954 - icount = dconf.icount + 1; 954 + icount = dconf->icount + 1; 955 955 dev_info(&intf->dev, "Configuring for %d interfaces\n", icount); 956 956 957 957 if (icount > GS_MAX_INTF) { 958 958 dev_err(&intf->dev, 959 959 "Driver cannot handle more that %d CAN interfaces\n", 960 960 GS_MAX_INTF); 961 + kfree(dconf); 961 962 return -EINVAL; 962 963 } 963 964 964 965 dev = kzalloc(sizeof(*dev), GFP_KERNEL); 965 - if (!dev) 966 + if (!dev) { 967 + kfree(dconf); 966 968 return -ENOMEM; 969 + } 970 + 967 971 init_usb_anchor(&dev->rx_submitted); 968 972 969 973 atomic_set(&dev->active_channels, 0); ··· 981 967 dev->udev = interface_to_usbdev(intf); 982 968 983 969 for (i = 0; i < icount; i++) { 984 - dev->canch[i] = gs_make_candev(i, intf, &dconf); 970 + dev->canch[i] = gs_make_candev(i, intf, dconf); 985 971 if (IS_ERR_OR_NULL(dev->canch[i])) { 986 972 /* save error code to return later */ 987 973 rc = PTR_ERR(dev->canch[i]); ··· 992 978 gs_destroy_candev(dev->canch[i]); 993 979 994 980 usb_kill_anchored_urbs(&dev->rx_submitted); 981 + kfree(dconf); 995 982 kfree(dev); 996 983 return rc; 997 984 } 998 985 dev->canch[i]->parent = dev; 999 986 } 987 + 988 + kfree(dconf); 1000 989 1001 990 return 0; 1002 991 }
+3 -6
drivers/net/can/usb/usb_8dev.c
··· 951 951 for (i = 0; i < MAX_TX_URBS; i++) 952 952 priv->tx_contexts[i].echo_index = MAX_TX_URBS; 953 953 954 - priv->cmd_msg_buffer = kzalloc(sizeof(struct usb_8dev_cmd_msg), 955 - GFP_KERNEL); 954 + priv->cmd_msg_buffer = devm_kzalloc(&intf->dev, sizeof(struct usb_8dev_cmd_msg), 955 + GFP_KERNEL); 956 956 if (!priv->cmd_msg_buffer) 957 957 goto cleanup_candev; 958 958 ··· 966 966 if (err) { 967 967 netdev_err(netdev, 968 968 "couldn't register CAN device: %d\n", err); 969 - goto cleanup_cmd_msg_buffer; 969 + goto cleanup_candev; 970 970 } 971 971 972 972 err = usb_8dev_cmd_version(priv, &version); ··· 986 986 987 987 cleanup_unregister_candev: 988 988 unregister_netdev(priv->netdev); 989 - 990 - cleanup_cmd_msg_buffer: 991 - kfree(priv->cmd_msg_buffer); 992 989 993 990 cleanup_candev: 994 991 free_candev(netdev);
+15 -15
drivers/net/ethernet/amd/declance.c
··· 1276 1276 return ret; 1277 1277 } 1278 1278 1279 - static void __exit dec_lance_remove(struct device *bdev) 1280 - { 1281 - struct net_device *dev = dev_get_drvdata(bdev); 1282 - resource_size_t start, len; 1283 - 1284 - unregister_netdev(dev); 1285 - start = to_tc_dev(bdev)->resource.start; 1286 - len = to_tc_dev(bdev)->resource.end - start + 1; 1287 - release_mem_region(start, len); 1288 - free_netdev(dev); 1289 - } 1290 - 1291 1279 /* Find all the lance cards on the system and initialize them */ 1292 1280 static int __init dec_lance_platform_probe(void) 1293 1281 { ··· 1308 1320 1309 1321 #ifdef CONFIG_TC 1310 1322 static int dec_lance_tc_probe(struct device *dev); 1311 - static int __exit dec_lance_tc_remove(struct device *dev); 1323 + static int dec_lance_tc_remove(struct device *dev); 1312 1324 1313 1325 static const struct tc_device_id dec_lance_tc_table[] = { 1314 1326 { "DEC ", "PMAD-AA " }, ··· 1322 1334 .name = "declance", 1323 1335 .bus = &tc_bus_type, 1324 1336 .probe = dec_lance_tc_probe, 1325 - .remove = __exit_p(dec_lance_tc_remove), 1337 + .remove = dec_lance_tc_remove, 1326 1338 }, 1327 1339 }; 1328 1340 ··· 1334 1346 return status; 1335 1347 } 1336 1348 1337 - static int __exit dec_lance_tc_remove(struct device *dev) 1349 + static void dec_lance_remove(struct device *bdev) 1350 + { 1351 + struct net_device *dev = dev_get_drvdata(bdev); 1352 + resource_size_t start, len; 1353 + 1354 + unregister_netdev(dev); 1355 + start = to_tc_dev(bdev)->resource.start; 1356 + len = to_tc_dev(bdev)->resource.end - start + 1; 1357 + release_mem_region(start, len); 1358 + free_netdev(dev); 1359 + } 1360 + 1361 + static int dec_lance_tc_remove(struct device *dev) 1338 1362 { 1339 1363 put_device(dev); 1340 1364 dec_lance_remove(dev);
+1 -1
drivers/net/ethernet/amd/xgbe/xgbe-dev.c
··· 1323 1323 static int xgbe_set_ext_mii_mode(struct xgbe_prv_data *pdata, unsigned int port, 1324 1324 enum xgbe_mdio_mode mode) 1325 1325 { 1326 - unsigned int reg_val = 0; 1326 + unsigned int reg_val = XGMAC_IOREAD(pdata, MAC_MDIOCL22R); 1327 1327 1328 1328 switch (mode) { 1329 1329 case XGBE_MDIO_MODE_CL22:
+2 -2
drivers/net/ethernet/amd/xgbe/xgbe-drv.c
··· 1131 1131 hw_if->disable_tx(pdata); 1132 1132 hw_if->disable_rx(pdata); 1133 1133 1134 + phy_if->phy_stop(pdata); 1135 + 1134 1136 xgbe_free_irqs(pdata); 1135 1137 1136 1138 xgbe_napi_disable(pdata, 1); 1137 - 1138 - phy_if->phy_stop(pdata); 1139 1139 1140 1140 hw_if->exit(pdata); 1141 1141
+24
drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
··· 716 716 pdata->phy.duplex = DUPLEX_UNKNOWN; 717 717 pdata->phy.autoneg = AUTONEG_ENABLE; 718 718 pdata->phy.advertising = pdata->phy.supported; 719 + 720 + return; 719 721 } 720 722 721 723 pdata->phy.advertising &= ~ADVERTISED_Autoneg; ··· 876 874 if ((phy_data->port_mode == XGBE_PORT_MODE_SFP) && 877 875 !phy_data->sfp_phy_avail) 878 876 return 0; 877 + 878 + /* Set the proper MDIO mode for the PHY */ 879 + ret = pdata->hw_if.set_ext_mii_mode(pdata, phy_data->mdio_addr, 880 + phy_data->phydev_mode); 881 + if (ret) { 882 + netdev_err(pdata->netdev, 883 + "mdio port/clause not compatible (%u/%u)\n", 884 + phy_data->mdio_addr, phy_data->phydev_mode); 885 + return ret; 886 + } 879 887 880 888 /* Create and connect to the PHY device */ 881 889 phydev = get_phy_device(phy_data->mii, phy_data->mdio_addr, ··· 2733 2721 ret = pdata->i2c_if.i2c_start(pdata); 2734 2722 if (ret) 2735 2723 return ret; 2724 + 2725 + /* Set the proper MDIO mode for the re-driver */ 2726 + if (phy_data->redrv && !phy_data->redrv_if) { 2727 + ret = pdata->hw_if.set_ext_mii_mode(pdata, phy_data->redrv_addr, 2728 + XGBE_MDIO_MODE_CL22); 2729 + if (ret) { 2730 + netdev_err(pdata->netdev, 2731 + "redriver mdio port not compatible (%u)\n", 2732 + phy_data->redrv_addr); 2733 + return ret; 2734 + } 2735 + } 2736 2736 2737 2737 /* Start in highest supported mode */ 2738 2738 xgbe_phy_set_mode(pdata, phy_data->start_mode);
+6
drivers/net/ethernet/apm/xgene/xgene_enet_main.c
··· 1749 1749 1750 1750 pdata->clk = devm_clk_get(&pdev->dev, NULL); 1751 1751 if (IS_ERR(pdata->clk)) { 1752 + /* Abort if the clock is defined but couldn't be retrived. 1753 + * Always abort if the clock is missing on DT system as 1754 + * the driver can't cope with this case. 1755 + */ 1756 + if (PTR_ERR(pdata->clk) != -ENOENT || dev->of_node) 1757 + return PTR_ERR(pdata->clk); 1752 1758 /* Firmware may have set up the clock already. */ 1753 1759 dev_info(dev, "clocks have been setup already\n"); 1754 1760 }
+18 -9
drivers/net/ethernet/broadcom/bgmac-platform.c
··· 51 51 52 52 static bool platform_bgmac_clk_enabled(struct bgmac *bgmac) 53 53 { 54 - if ((bgmac_idm_read(bgmac, BCMA_IOCTL) & 55 - (BCMA_IOCTL_CLK | BCMA_IOCTL_FGC)) != BCMA_IOCTL_CLK) 54 + if ((bgmac_idm_read(bgmac, BCMA_IOCTL) & BGMAC_CLK_EN) != BGMAC_CLK_EN) 56 55 return false; 57 56 if (bgmac_idm_read(bgmac, BCMA_RESET_CTL) & BCMA_RESET_CTL_RESET) 58 57 return false; ··· 60 61 61 62 static void platform_bgmac_clk_enable(struct bgmac *bgmac, u32 flags) 62 63 { 63 - bgmac_idm_write(bgmac, BCMA_IOCTL, 64 - (BCMA_IOCTL_CLK | BCMA_IOCTL_FGC | flags)); 65 - bgmac_idm_read(bgmac, BCMA_IOCTL); 64 + u32 val; 66 65 67 - bgmac_idm_write(bgmac, BCMA_RESET_CTL, 0); 68 - bgmac_idm_read(bgmac, BCMA_RESET_CTL); 69 - udelay(1); 66 + /* The Reset Control register only contains a single bit to show if the 67 + * controller is currently in reset. Do a sanity check here, just in 68 + * case the bootloader happened to leave the device in reset. 69 + */ 70 + val = bgmac_idm_read(bgmac, BCMA_RESET_CTL); 71 + if (val) { 72 + bgmac_idm_write(bgmac, BCMA_RESET_CTL, 0); 73 + bgmac_idm_read(bgmac, BCMA_RESET_CTL); 74 + udelay(1); 75 + } 70 76 71 - bgmac_idm_write(bgmac, BCMA_IOCTL, (BCMA_IOCTL_CLK | flags)); 77 + val = bgmac_idm_read(bgmac, BCMA_IOCTL); 78 + /* Some bits of BCMA_IOCTL set by HW/ATF and should not change */ 79 + val |= flags & ~(BGMAC_AWCACHE | BGMAC_ARCACHE | BGMAC_AWUSER | 80 + BGMAC_ARUSER); 81 + val |= BGMAC_CLK_EN; 82 + bgmac_idm_write(bgmac, BCMA_IOCTL, val); 72 83 bgmac_idm_read(bgmac, BCMA_IOCTL); 73 84 udelay(1); 74 85 }
+5 -1
drivers/net/ethernet/broadcom/bgmac.c
··· 1223 1223 static int bgmac_set_mac_address(struct net_device *net_dev, void *addr) 1224 1224 { 1225 1225 struct bgmac *bgmac = netdev_priv(net_dev); 1226 + struct sockaddr *sa = addr; 1226 1227 int ret; 1227 1228 1228 1229 ret = eth_prepare_mac_addr_change(net_dev, addr); 1229 1230 if (ret < 0) 1230 1231 return ret; 1231 - bgmac_write_mac_address(bgmac, (u8 *)addr); 1232 + 1233 + ether_addr_copy(net_dev->dev_addr, sa->sa_data); 1234 + bgmac_write_mac_address(bgmac, net_dev->dev_addr); 1235 + 1232 1236 eth_commit_mac_addr_change(net_dev, addr); 1233 1237 return 0; 1234 1238 }
+16
drivers/net/ethernet/broadcom/bgmac.h
··· 213 213 /* BCMA GMAC core specific IO Control (BCMA_IOCTL) flags */ 214 214 #define BGMAC_BCMA_IOCTL_SW_CLKEN 0x00000004 /* PHY Clock Enable */ 215 215 #define BGMAC_BCMA_IOCTL_SW_RESET 0x00000008 /* PHY Reset */ 216 + /* The IOCTL values appear to be different in NS, NSP, and NS2, and do not match 217 + * the values directly above 218 + */ 219 + #define BGMAC_CLK_EN BIT(0) 220 + #define BGMAC_RESERVED_0 BIT(1) 221 + #define BGMAC_SOURCE_SYNC_MODE_EN BIT(2) 222 + #define BGMAC_DEST_SYNC_MODE_EN BIT(3) 223 + #define BGMAC_TX_CLK_OUT_INVERT_EN BIT(4) 224 + #define BGMAC_DIRECT_GMII_MODE BIT(5) 225 + #define BGMAC_CLK_250_SEL BIT(6) 226 + #define BGMAC_AWCACHE (0xf << 7) 227 + #define BGMAC_RESERVED_1 (0x1f << 11) 228 + #define BGMAC_ARCACHE (0xf << 16) 229 + #define BGMAC_AWUSER (0x3f << 20) 230 + #define BGMAC_ARUSER (0x3f << 26) 231 + #define BGMAC_RESERVED BIT(31) 216 232 217 233 /* BCMA GMAC core specific IO status (BCMA_IOST) flags */ 218 234 #define BGMAC_BCMA_IOST_ATTACHED 0x00000800
+2 -2
drivers/net/ethernet/broadcom/sb1250-mac.c
··· 2617 2617 return err; 2618 2618 } 2619 2619 2620 - static int __exit sbmac_remove(struct platform_device *pldev) 2620 + static int sbmac_remove(struct platform_device *pldev) 2621 2621 { 2622 2622 struct net_device *dev = platform_get_drvdata(pldev); 2623 2623 struct sbmac_softc *sc = netdev_priv(dev); ··· 2634 2634 2635 2635 static struct platform_driver sbmac_driver = { 2636 2636 .probe = sbmac_probe, 2637 - .remove = __exit_p(sbmac_remove), 2637 + .remove = sbmac_remove, 2638 2638 .driver = { 2639 2639 .name = sbmac_string, 2640 2640 },
+3 -3
drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h
··· 37 37 38 38 #define T4FW_VERSION_MAJOR 0x01 39 39 #define T4FW_VERSION_MINOR 0x10 40 - #define T4FW_VERSION_MICRO 0x1A 40 + #define T4FW_VERSION_MICRO 0x21 41 41 #define T4FW_VERSION_BUILD 0x00 42 42 43 43 #define T4FW_MIN_VERSION_MAJOR 0x01 ··· 46 46 47 47 #define T5FW_VERSION_MAJOR 0x01 48 48 #define T5FW_VERSION_MINOR 0x10 49 - #define T5FW_VERSION_MICRO 0x1A 49 + #define T5FW_VERSION_MICRO 0x21 50 50 #define T5FW_VERSION_BUILD 0x00 51 51 52 52 #define T5FW_MIN_VERSION_MAJOR 0x00 ··· 55 55 56 56 #define T6FW_VERSION_MAJOR 0x01 57 57 #define T6FW_VERSION_MINOR 0x10 58 - #define T6FW_VERSION_MICRO 0x1A 58 + #define T6FW_VERSION_MICRO 0x21 59 59 #define T6FW_VERSION_BUILD 0x00 60 60 61 61 #define T6FW_MIN_VERSION_MAJOR 0x00
+2 -2
drivers/net/ethernet/faraday/ftgmac100.c
··· 1456 1456 return err; 1457 1457 } 1458 1458 1459 - static int __exit ftgmac100_remove(struct platform_device *pdev) 1459 + static int ftgmac100_remove(struct platform_device *pdev) 1460 1460 { 1461 1461 struct net_device *netdev; 1462 1462 struct ftgmac100 *priv; ··· 1483 1483 1484 1484 static struct platform_driver ftgmac100_driver = { 1485 1485 .probe = ftgmac100_probe, 1486 - .remove = __exit_p(ftgmac100_remove), 1486 + .remove = ftgmac100_remove, 1487 1487 .driver = { 1488 1488 .name = DRV_NAME, 1489 1489 .of_match_table = ftgmac100_of_match,
+2 -2
drivers/net/ethernet/faraday/ftmac100.c
··· 1156 1156 return err; 1157 1157 } 1158 1158 1159 - static int __exit ftmac100_remove(struct platform_device *pdev) 1159 + static int ftmac100_remove(struct platform_device *pdev) 1160 1160 { 1161 1161 struct net_device *netdev; 1162 1162 struct ftmac100 *priv; ··· 1176 1176 1177 1177 static struct platform_driver ftmac100_driver = { 1178 1178 .probe = ftmac100_probe, 1179 - .remove = __exit_p(ftmac100_remove), 1179 + .remove = ftmac100_remove, 1180 1180 .driver = { 1181 1181 .name = DRV_NAME, 1182 1182 },
+2 -1
drivers/net/ethernet/intel/ixgbe/ixgbe.h
··· 96 96 #define IXGBE_MAX_FRAME_BUILD_SKB \ 97 97 (SKB_WITH_OVERHEAD(IXGBE_RXBUFFER_2K) - IXGBE_SKB_PAD) 98 98 #else 99 - #define IGB_MAX_FRAME_BUILD_SKB IXGBE_RXBUFFER_2K 99 + #define IXGBE_MAX_FRAME_BUILD_SKB IXGBE_RXBUFFER_2K 100 100 #endif 101 101 102 102 /* ··· 929 929 struct ixgbe_adapter *adapter, 930 930 struct ixgbe_ring *tx_ring); 931 931 u32 ixgbe_rss_indir_tbl_entries(struct ixgbe_adapter *adapter); 932 + void ixgbe_store_key(struct ixgbe_adapter *adapter); 932 933 void ixgbe_store_reta(struct ixgbe_adapter *adapter); 933 934 s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg, 934 935 u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm);
+3 -1
drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
··· 2998 2998 } 2999 2999 3000 3000 /* Fill out the rss hash key */ 3001 - if (key) 3001 + if (key) { 3002 3002 memcpy(adapter->rss_key, key, ixgbe_get_rxfh_key_size(netdev)); 3003 + ixgbe_store_key(adapter); 3004 + } 3003 3005 3004 3006 ixgbe_store_reta(adapter); 3005 3007
+18 -4
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
··· 3474 3474 } 3475 3475 3476 3476 /** 3477 + * ixgbe_store_key - Write the RSS key to HW 3478 + * @adapter: device handle 3479 + * 3480 + * Write the RSS key stored in adapter.rss_key to HW. 3481 + */ 3482 + void ixgbe_store_key(struct ixgbe_adapter *adapter) 3483 + { 3484 + struct ixgbe_hw *hw = &adapter->hw; 3485 + int i; 3486 + 3487 + for (i = 0; i < 10; i++) 3488 + IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), adapter->rss_key[i]); 3489 + } 3490 + 3491 + /** 3477 3492 * ixgbe_store_reta - Write the RETA table to HW 3478 3493 * @adapter: device handle 3479 3494 * ··· 3553 3538 3554 3539 static void ixgbe_setup_reta(struct ixgbe_adapter *adapter) 3555 3540 { 3556 - struct ixgbe_hw *hw = &adapter->hw; 3557 3541 u32 i, j; 3558 3542 u32 reta_entries = ixgbe_rss_indir_tbl_entries(adapter); 3559 3543 u16 rss_i = adapter->ring_feature[RING_F_RSS].indices; ··· 3565 3551 rss_i = 4; 3566 3552 3567 3553 /* Fill out hash function seeds */ 3568 - for (i = 0; i < 10; i++) 3569 - IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), adapter->rss_key[i]); 3554 + ixgbe_store_key(adapter); 3570 3555 3571 3556 /* Fill out redirection table */ 3572 3557 memset(adapter->rss_indir_tbl, 0, sizeof(adapter->rss_indir_tbl)); ··· 3972 3959 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) 3973 3960 set_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state); 3974 3961 3975 - if (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN)) 3962 + if ((max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN)) || 3963 + (max_frame > IXGBE_MAX_FRAME_BUILD_SKB)) 3976 3964 set_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state); 3977 3965 #endif 3978 3966 }
+20 -10
drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
··· 441 441 mlxsw_sp_vr_lpm_tree_check(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_vr *vr, 442 442 struct mlxsw_sp_prefix_usage *req_prefix_usage) 443 443 { 444 - struct mlxsw_sp_lpm_tree *lpm_tree; 444 + struct mlxsw_sp_lpm_tree *lpm_tree = vr->lpm_tree; 445 + struct mlxsw_sp_lpm_tree *new_tree; 446 + int err; 445 447 446 - if (mlxsw_sp_prefix_usage_eq(req_prefix_usage, 447 - &vr->lpm_tree->prefix_usage)) 448 + if (mlxsw_sp_prefix_usage_eq(req_prefix_usage, &lpm_tree->prefix_usage)) 448 449 return 0; 449 450 450 - lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, req_prefix_usage, 451 + new_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, req_prefix_usage, 451 452 vr->proto, false); 452 - if (IS_ERR(lpm_tree)) { 453 + if (IS_ERR(new_tree)) { 453 454 /* We failed to get a tree according to the required 454 455 * prefix usage. However, the current tree might be still good 455 456 * for us if our requirement is subset of the prefixes used 456 457 * in the tree. 457 458 */ 458 459 if (mlxsw_sp_prefix_usage_subset(req_prefix_usage, 459 - &vr->lpm_tree->prefix_usage)) 460 + &lpm_tree->prefix_usage)) 460 461 return 0; 461 - return PTR_ERR(lpm_tree); 462 + return PTR_ERR(new_tree); 462 463 } 463 464 464 - mlxsw_sp_vr_lpm_tree_unbind(mlxsw_sp, vr); 465 - mlxsw_sp_lpm_tree_put(mlxsw_sp, vr->lpm_tree); 465 + /* Prevent packet loss by overwriting existing binding */ 466 + vr->lpm_tree = new_tree; 467 + err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, vr); 468 + if (err) 469 + goto err_tree_bind; 470 + mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree); 471 + 472 + return 0; 473 + 474 + err_tree_bind: 466 475 vr->lpm_tree = lpm_tree; 467 - return mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, vr); 476 + mlxsw_sp_lpm_tree_put(mlxsw_sp, new_tree); 477 + return err; 468 478 } 469 479 470 480 static struct mlxsw_sp_vr *mlxsw_sp_vr_get(struct mlxsw_sp *mlxsw_sp,
+4 -3
drivers/net/ethernet/netronome/nfp/nfp_net_common.c
··· 1498 1498 txbuf->real_len = pkt_len; 1499 1499 1500 1500 dma_sync_single_for_device(&nn->pdev->dev, rxbuf->dma_addr + pkt_off, 1501 - pkt_len, DMA_TO_DEVICE); 1501 + pkt_len, DMA_BIDIRECTIONAL); 1502 1502 1503 1503 /* Build TX descriptor */ 1504 1504 txd = &tx_ring->txds[wr_idx]; ··· 1611 1611 1612 1612 dma_sync_single_for_cpu(&nn->pdev->dev, 1613 1613 rxbuf->dma_addr + pkt_off, 1614 - pkt_len, DMA_FROM_DEVICE); 1614 + pkt_len, DMA_BIDIRECTIONAL); 1615 1615 act = nfp_net_run_xdp(xdp_prog, rxbuf->frag + data_off, 1616 1616 pkt_len); 1617 1617 switch (act) { ··· 2198 2198 nfp_net_write_mac_addr(nn); 2199 2199 2200 2200 nn_writel(nn, NFP_NET_CFG_MTU, nn->netdev->mtu); 2201 - nn_writel(nn, NFP_NET_CFG_FLBUFSZ, nn->fl_bufsz); 2201 + nn_writel(nn, NFP_NET_CFG_FLBUFSZ, 2202 + nn->fl_bufsz - NFP_NET_RX_BUF_NON_DATA); 2202 2203 2203 2204 /* Enable device */ 2204 2205 new_ctrl |= NFP_NET_CFG_CTRL_ENABLE;
+2 -2
drivers/net/ethernet/seeq/sgiseeq.c
··· 807 807 return err; 808 808 } 809 809 810 - static int __exit sgiseeq_remove(struct platform_device *pdev) 810 + static int sgiseeq_remove(struct platform_device *pdev) 811 811 { 812 812 struct net_device *dev = platform_get_drvdata(pdev); 813 813 struct sgiseeq_private *sp = netdev_priv(dev); ··· 822 822 823 823 static struct platform_driver sgiseeq_driver = { 824 824 .probe = sgiseeq_probe, 825 - .remove = __exit_p(sgiseeq_remove), 825 + .remove = sgiseeq_remove, 826 826 .driver = { 827 827 .name = "sgiseeq", 828 828 }
+6 -6
drivers/net/ethernet/sfc/ef10.c
··· 828 828 static int efx_ef10_link_piobufs(struct efx_nic *efx) 829 829 { 830 830 struct efx_ef10_nic_data *nic_data = efx->nic_data; 831 - _MCDI_DECLARE_BUF(inbuf, 832 - max(MC_CMD_LINK_PIOBUF_IN_LEN, 833 - MC_CMD_UNLINK_PIOBUF_IN_LEN)); 831 + MCDI_DECLARE_BUF(inbuf, MC_CMD_LINK_PIOBUF_IN_LEN); 834 832 struct efx_channel *channel; 835 833 struct efx_tx_queue *tx_queue; 836 834 unsigned int offset, index; ··· 836 838 837 839 BUILD_BUG_ON(MC_CMD_LINK_PIOBUF_OUT_LEN != 0); 838 840 BUILD_BUG_ON(MC_CMD_UNLINK_PIOBUF_OUT_LEN != 0); 839 - 840 - memset(inbuf, 0, sizeof(inbuf)); 841 841 842 842 /* Link a buffer to each VI in the write-combining mapping */ 843 843 for (index = 0; index < nic_data->n_piobufs; ++index) { ··· 916 920 return 0; 917 921 918 922 fail: 923 + /* inbuf was defined for MC_CMD_LINK_PIOBUF. We can use the same 924 + * buffer for MC_CMD_UNLINK_PIOBUF because it's shorter. 925 + */ 926 + BUILD_BUG_ON(MC_CMD_LINK_PIOBUF_IN_LEN < MC_CMD_UNLINK_PIOBUF_IN_LEN); 919 927 while (index--) { 920 928 MCDI_SET_DWORD(inbuf, UNLINK_PIOBUF_IN_TXQ_INSTANCE, 921 929 nic_data->pio_write_vi_base + index); ··· 2183 2183 /* Modify IPv4 header if needed. */ 2184 2184 ip->tot_len = 0; 2185 2185 ip->check = 0; 2186 - ipv4_id = ip->id; 2186 + ipv4_id = ntohs(ip->id); 2187 2187 } else { 2188 2188 /* Modify IPv6 header if needed. */ 2189 2189 struct ipv6hdr *ipv6 = ipv6_hdr(skb);
+2 -2
drivers/net/ethernet/sgi/meth.c
··· 854 854 return 0; 855 855 } 856 856 857 - static int __exit meth_remove(struct platform_device *pdev) 857 + static int meth_remove(struct platform_device *pdev) 858 858 { 859 859 struct net_device *dev = platform_get_drvdata(pdev); 860 860 ··· 866 866 867 867 static struct platform_driver meth_driver = { 868 868 .probe = meth_probe, 869 - .remove = __exit_p(meth_remove), 869 + .remove = meth_remove, 870 870 .driver = { 871 871 .name = "meth", 872 872 }
+2
drivers/net/geneve.c
··· 881 881 info = &geneve->info; 882 882 } 883 883 884 + rcu_read_lock(); 884 885 #if IS_ENABLED(CONFIG_IPV6) 885 886 if (info->mode & IP_TUNNEL_INFO_IPV6) 886 887 err = geneve6_xmit_skb(skb, dev, geneve, info); 887 888 else 888 889 #endif 889 890 err = geneve_xmit_skb(skb, dev, geneve, info); 891 + rcu_read_unlock(); 890 892 891 893 if (likely(!err)) 892 894 return NETDEV_TX_OK;
+12 -5
drivers/net/hyperv/netvsc_drv.c
··· 859 859 if (ret) 860 860 goto out; 861 861 862 - ndevctx->start_remove = true; 863 - rndis_filter_device_remove(hdev, nvdev); 864 - 865 - ndev->mtu = mtu; 866 - 867 862 memset(&device_info, 0, sizeof(device_info)); 868 863 device_info.ring_size = ring_size; 869 864 device_info.num_chn = nvdev->num_chn; 870 865 device_info.max_num_vrss_chns = nvdev->num_chn; 866 + 867 + ndevctx->start_remove = true; 868 + rndis_filter_device_remove(hdev, nvdev); 869 + 870 + /* 'nvdev' has been freed in rndis_filter_device_remove() -> 871 + * netvsc_device_remove () -> free_netvsc_device(). 872 + * We mustn't access it before it's re-created in 873 + * rndis_filter_device_add() -> netvsc_device_add(). 874 + */ 875 + 876 + ndev->mtu = mtu; 877 + 871 878 rndis_filter_device_add(hdev, &device_info); 872 879 873 880 out:
+1 -1
drivers/net/usb/asix_devices.c
··· 346 346 if (ret < 0) 347 347 goto out; 348 348 349 - asix_write_medium_mode(dev, AX88772_MEDIUM_DEFAULT, 0); 349 + ret = asix_write_medium_mode(dev, AX88772_MEDIUM_DEFAULT, 0); 350 350 if (ret < 0) 351 351 goto out; 352 352
+1 -1
drivers/net/virtio_net.c
··· 51 51 * at once, the weight is chosen so that the EWMA will be insensitive to short- 52 52 * term, transient changes in packet size. 53 53 */ 54 - DECLARE_EWMA(pkt_len, 1, 64) 54 + DECLARE_EWMA(pkt_len, 0, 64) 55 55 56 56 /* With mergeable buffers we align buffer address and use the low bits to 57 57 * encode its true size. Buffer size is up to 1 page so we need to align to
+6 -2
drivers/net/vxlan.c
··· 2105 2105 src_port = udp_flow_src_port(dev_net(dev), skb, vxlan->cfg.port_min, 2106 2106 vxlan->cfg.port_max, true); 2107 2107 2108 + rcu_read_lock(); 2108 2109 if (dst->sa.sa_family == AF_INET) { 2109 2110 struct vxlan_sock *sock4 = rcu_dereference(vxlan->vn4_sock); 2110 2111 struct rtable *rt; ··· 2128 2127 dst_port, vni, &rt->dst, 2129 2128 rt->rt_flags); 2130 2129 if (err) 2131 - return; 2130 + goto out_unlock; 2132 2131 } else if (info->key.tun_flags & TUNNEL_DONT_FRAGMENT) { 2133 2132 df = htons(IP_DF); 2134 2133 } ··· 2167 2166 dst_port, vni, ndst, 2168 2167 rt6i_flags); 2169 2168 if (err) 2170 - return; 2169 + goto out_unlock; 2171 2170 } 2172 2171 2173 2172 tos = ip_tunnel_ecn_encap(tos, old_iph, skb); ··· 2184 2183 label, src_port, dst_port, !udp_sum); 2185 2184 #endif 2186 2185 } 2186 + out_unlock: 2187 + rcu_read_unlock(); 2187 2188 return; 2188 2189 2189 2190 drop: ··· 2194 2191 return; 2195 2192 2196 2193 tx_error: 2194 + rcu_read_unlock(); 2197 2195 if (err == -ELOOP) 2198 2196 dev->stats.collisions++; 2199 2197 else if (err == -ENETUNREACH)
+98 -3
drivers/net/wireless/ath/ath10k/core.c
··· 18 18 #include <linux/module.h> 19 19 #include <linux/firmware.h> 20 20 #include <linux/of.h> 21 + #include <linux/dmi.h> 22 + #include <linux/ctype.h> 21 23 #include <asm/byteorder.h> 22 24 23 25 #include "core.h" ··· 713 711 return 0; 714 712 } 715 713 714 + static void ath10k_core_check_bdfext(const struct dmi_header *hdr, void *data) 715 + { 716 + struct ath10k *ar = data; 717 + const char *bdf_ext; 718 + const char *magic = ATH10K_SMBIOS_BDF_EXT_MAGIC; 719 + u8 bdf_enabled; 720 + int i; 721 + 722 + if (hdr->type != ATH10K_SMBIOS_BDF_EXT_TYPE) 723 + return; 724 + 725 + if (hdr->length != ATH10K_SMBIOS_BDF_EXT_LENGTH) { 726 + ath10k_dbg(ar, ATH10K_DBG_BOOT, 727 + "wrong smbios bdf ext type length (%d).\n", 728 + hdr->length); 729 + return; 730 + } 731 + 732 + bdf_enabled = *((u8 *)hdr + ATH10K_SMBIOS_BDF_EXT_OFFSET); 733 + if (!bdf_enabled) { 734 + ath10k_dbg(ar, ATH10K_DBG_BOOT, "bdf variant name not found.\n"); 735 + return; 736 + } 737 + 738 + /* Only one string exists (per spec) */ 739 + bdf_ext = (char *)hdr + hdr->length; 740 + 741 + if (memcmp(bdf_ext, magic, strlen(magic)) != 0) { 742 + ath10k_dbg(ar, ATH10K_DBG_BOOT, 743 + "bdf variant magic does not match.\n"); 744 + return; 745 + } 746 + 747 + for (i = 0; i < strlen(bdf_ext); i++) { 748 + if (!isascii(bdf_ext[i]) || !isprint(bdf_ext[i])) { 749 + ath10k_dbg(ar, ATH10K_DBG_BOOT, 750 + "bdf variant name contains non ascii chars.\n"); 751 + return; 752 + } 753 + } 754 + 755 + /* Copy extension name without magic suffix */ 756 + if (strscpy(ar->id.bdf_ext, bdf_ext + strlen(magic), 757 + sizeof(ar->id.bdf_ext)) < 0) { 758 + ath10k_dbg(ar, ATH10K_DBG_BOOT, 759 + "bdf variant string is longer than the buffer can accommodate (variant: %s)\n", 760 + bdf_ext); 761 + return; 762 + } 763 + 764 + ath10k_dbg(ar, ATH10K_DBG_BOOT, 765 + "found and validated bdf variant smbios_type 0x%x bdf %s\n", 766 + ATH10K_SMBIOS_BDF_EXT_TYPE, bdf_ext); 767 + } 768 + 769 + static int ath10k_core_check_smbios(struct ath10k *ar) 770 + { 771 + ar->id.bdf_ext[0] = '\0'; 772 + dmi_walk(ath10k_core_check_bdfext, ar); 773 + 774 + if (ar->id.bdf_ext[0] == '\0') 775 + return -ENODATA; 776 + 777 + return 0; 778 + } 779 + 716 780 static int ath10k_download_and_run_otp(struct ath10k *ar) 717 781 { 718 782 u32 result, address = ar->hw_params.patch_load_addr; ··· 1088 1020 case ATH10K_BD_IE_BOARD: 1089 1021 ret = ath10k_core_parse_bd_ie_board(ar, data, ie_len, 1090 1022 boardname); 1023 + if (ret == -ENOENT && ar->id.bdf_ext[0] != '\0') { 1024 + /* try default bdf if variant was not found */ 1025 + char *s, *v = ",variant="; 1026 + char boardname2[100]; 1027 + 1028 + strlcpy(boardname2, boardname, 1029 + sizeof(boardname2)); 1030 + 1031 + s = strstr(boardname2, v); 1032 + if (s) 1033 + *s = '\0'; /* strip ",variant=%s" */ 1034 + 1035 + ret = ath10k_core_parse_bd_ie_board(ar, data, 1036 + ie_len, 1037 + boardname2); 1038 + } 1039 + 1091 1040 if (ret == -ENOENT) 1092 1041 /* no match found, continue */ 1093 1042 break; ··· 1142 1057 static int ath10k_core_create_board_name(struct ath10k *ar, char *name, 1143 1058 size_t name_len) 1144 1059 { 1060 + /* strlen(',variant=') + strlen(ar->id.bdf_ext) */ 1061 + char variant[9 + ATH10K_SMBIOS_BDF_EXT_STR_LENGTH] = { 0 }; 1062 + 1145 1063 if (ar->id.bmi_ids_valid) { 1146 1064 scnprintf(name, name_len, 1147 1065 "bus=%s,bmi-chip-id=%d,bmi-board-id=%d", ··· 1154 1066 goto out; 1155 1067 } 1156 1068 1069 + if (ar->id.bdf_ext[0] != '\0') 1070 + scnprintf(variant, sizeof(variant), ",variant=%s", 1071 + ar->id.bdf_ext); 1072 + 1157 1073 scnprintf(name, name_len, 1158 - "bus=%s,vendor=%04x,device=%04x,subsystem-vendor=%04x,subsystem-device=%04x", 1074 + "bus=%s,vendor=%04x,device=%04x,subsystem-vendor=%04x,subsystem-device=%04x%s", 1159 1075 ath10k_bus_str(ar->hif.bus), 1160 1076 ar->id.vendor, ar->id.device, 1161 - ar->id.subsystem_vendor, ar->id.subsystem_device); 1162 - 1077 + ar->id.subsystem_vendor, ar->id.subsystem_device, variant); 1163 1078 out: 1164 1079 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot using board name '%s'\n", name); 1165 1080 ··· 2218 2127 ret); 2219 2128 goto err_free_firmware_files; 2220 2129 } 2130 + 2131 + ret = ath10k_core_check_smbios(ar); 2132 + if (ret) 2133 + ath10k_dbg(ar, ATH10K_DBG_BOOT, "bdf variant name not set.\n"); 2221 2134 2222 2135 ret = ath10k_core_fetch_board_file(ar); 2223 2136 if (ret) {
+19
drivers/net/wireless/ath/ath10k/core.h
··· 69 69 #define ATH10K_NAPI_BUDGET 64 70 70 #define ATH10K_NAPI_QUOTA_LIMIT 60 71 71 72 + /* SMBIOS type containing Board Data File Name Extension */ 73 + #define ATH10K_SMBIOS_BDF_EXT_TYPE 0xF8 74 + 75 + /* SMBIOS type structure length (excluding strings-set) */ 76 + #define ATH10K_SMBIOS_BDF_EXT_LENGTH 0x9 77 + 78 + /* Offset pointing to Board Data File Name Extension */ 79 + #define ATH10K_SMBIOS_BDF_EXT_OFFSET 0x8 80 + 81 + /* Board Data File Name Extension string length. 82 + * String format: BDF_<Customer ID>_<Extension>\0 83 + */ 84 + #define ATH10K_SMBIOS_BDF_EXT_STR_LENGTH 0x20 85 + 86 + /* The magic used by QCA spec */ 87 + #define ATH10K_SMBIOS_BDF_EXT_MAGIC "BDF_" 88 + 72 89 struct ath10k; 73 90 74 91 enum ath10k_bus { ··· 815 798 bool bmi_ids_valid; 816 799 u8 bmi_board_id; 817 800 u8 bmi_chip_id; 801 + 802 + char bdf_ext[ATH10K_SMBIOS_BDF_EXT_STR_LENGTH]; 818 803 } id; 819 804 820 805 int fw_api;
+1 -1
drivers/net/wireless/ath/ath5k/ath5k.h
··· 1252 1252 #define ATH5K_TXQ_LEN_MAX (ATH_TXBUF / 4) /* bufs per queue */ 1253 1253 #define ATH5K_TXQ_LEN_LOW (ATH5K_TXQ_LEN_MAX / 2) /* low mark */ 1254 1254 1255 - DECLARE_EWMA(beacon_rssi, 1024, 8) 1255 + DECLARE_EWMA(beacon_rssi, 10, 8) 1256 1256 1257 1257 /* Driver state associated with an instance of a device */ 1258 1258 struct ath5k_hw {
+21 -7
drivers/net/wireless/mac80211_hwsim.c
··· 3056 3056 static int hwsim_new_radio_nl(struct sk_buff *msg, struct genl_info *info) 3057 3057 { 3058 3058 struct hwsim_new_radio_params param = { 0 }; 3059 + const char *hwname = NULL; 3059 3060 3060 3061 param.reg_strict = info->attrs[HWSIM_ATTR_REG_STRICT_REG]; 3061 3062 param.p2p_device = info->attrs[HWSIM_ATTR_SUPPORT_P2P_DEVICE]; ··· 3070 3069 if (info->attrs[HWSIM_ATTR_NO_VIF]) 3071 3070 param.no_vif = true; 3072 3071 3073 - if (info->attrs[HWSIM_ATTR_RADIO_NAME]) 3074 - param.hwname = nla_data(info->attrs[HWSIM_ATTR_RADIO_NAME]); 3072 + if (info->attrs[HWSIM_ATTR_RADIO_NAME]) { 3073 + hwname = kasprintf(GFP_KERNEL, "%.*s", 3074 + nla_len(info->attrs[HWSIM_ATTR_RADIO_NAME]), 3075 + (char *)nla_data(info->attrs[HWSIM_ATTR_RADIO_NAME])); 3076 + if (!hwname) 3077 + return -ENOMEM; 3078 + param.hwname = hwname; 3079 + } 3075 3080 3076 3081 if (info->attrs[HWSIM_ATTR_USE_CHANCTX]) 3077 3082 param.use_chanctx = true; ··· 3105 3098 s64 idx = -1; 3106 3099 const char *hwname = NULL; 3107 3100 3108 - if (info->attrs[HWSIM_ATTR_RADIO_ID]) 3101 + if (info->attrs[HWSIM_ATTR_RADIO_ID]) { 3109 3102 idx = nla_get_u32(info->attrs[HWSIM_ATTR_RADIO_ID]); 3110 - else if (info->attrs[HWSIM_ATTR_RADIO_NAME]) 3111 - hwname = (void *)nla_data(info->attrs[HWSIM_ATTR_RADIO_NAME]); 3112 - else 3103 + } else if (info->attrs[HWSIM_ATTR_RADIO_NAME]) { 3104 + hwname = kasprintf(GFP_KERNEL, "%.*s", 3105 + nla_len(info->attrs[HWSIM_ATTR_RADIO_NAME]), 3106 + (char *)nla_data(info->attrs[HWSIM_ATTR_RADIO_NAME])); 3107 + if (!hwname) 3108 + return -ENOMEM; 3109 + } else 3113 3110 return -EINVAL; 3114 3111 3115 3112 spin_lock_bh(&hwsim_radio_lock); ··· 3122 3111 if (data->idx != idx) 3123 3112 continue; 3124 3113 } else { 3125 - if (strcmp(hwname, wiphy_name(data->hw->wiphy))) 3114 + if (!hwname || 3115 + strcmp(hwname, wiphy_name(data->hw->wiphy))) 3126 3116 continue; 3127 3117 } 3128 3118 ··· 3134 3122 spin_unlock_bh(&hwsim_radio_lock); 3135 3123 mac80211_hwsim_del_radio(data, wiphy_name(data->hw->wiphy), 3136 3124 info); 3125 + kfree(hwname); 3137 3126 return 0; 3138 3127 } 3139 3128 spin_unlock_bh(&hwsim_radio_lock); 3140 3129 3130 + kfree(hwname); 3141 3131 return -ENODEV; 3142 3132 } 3143 3133
+1 -1
drivers/net/wireless/ralink/rt2x00/rt2x00.h
··· 257 257 int tx_failed; 258 258 }; 259 259 260 - DECLARE_EWMA(rssi, 1024, 8) 260 + DECLARE_EWMA(rssi, 10, 8) 261 261 262 262 /* 263 263 * Antenna settings about the currently active link.
+1 -1
drivers/net/xen-netback/hash.c
··· 39 39 unsigned long flags; 40 40 bool found; 41 41 42 - new = kmalloc(sizeof(*entry), GFP_KERNEL); 42 + new = kmalloc(sizeof(*entry), GFP_ATOMIC); 43 43 if (!new) 44 44 return; 45 45
+19 -12
drivers/net/xen-netback/xenbus.c
··· 492 492 493 493 static void backend_disconnect(struct backend_info *be) 494 494 { 495 - if (be->vif) { 495 + struct xenvif *vif = be->vif; 496 + 497 + if (vif) { 496 498 unsigned int queue_index; 499 + struct xenvif_queue *queues; 497 500 498 - xen_unregister_watchers(be->vif); 501 + xen_unregister_watchers(vif); 499 502 #ifdef CONFIG_DEBUG_FS 500 - xenvif_debugfs_delif(be->vif); 503 + xenvif_debugfs_delif(vif); 501 504 #endif /* CONFIG_DEBUG_FS */ 502 - xenvif_disconnect_data(be->vif); 503 - for (queue_index = 0; queue_index < be->vif->num_queues; ++queue_index) 504 - xenvif_deinit_queue(&be->vif->queues[queue_index]); 505 + xenvif_disconnect_data(vif); 506 + for (queue_index = 0; 507 + queue_index < vif->num_queues; 508 + ++queue_index) 509 + xenvif_deinit_queue(&vif->queues[queue_index]); 505 510 506 - spin_lock(&be->vif->lock); 507 - vfree(be->vif->queues); 508 - be->vif->num_queues = 0; 509 - be->vif->queues = NULL; 510 - spin_unlock(&be->vif->lock); 511 + spin_lock(&vif->lock); 512 + queues = vif->queues; 513 + vif->num_queues = 0; 514 + vif->queues = NULL; 515 + spin_unlock(&vif->lock); 511 516 512 - xenvif_disconnect_ctrl(be->vif); 517 + vfree(queues); 518 + 519 + xenvif_disconnect_ctrl(vif); 513 520 } 514 521 } 515 522
+41 -20
include/linux/average.h
··· 1 1 #ifndef _LINUX_AVERAGE_H 2 2 #define _LINUX_AVERAGE_H 3 3 4 - /* Exponentially weighted moving average (EWMA) */ 4 + /* 5 + * Exponentially weighted moving average (EWMA) 6 + * 7 + * This implements a fixed-precision EWMA algorithm, with both the 8 + * precision and fall-off coefficient determined at compile-time 9 + * and built into the generated helper funtions. 10 + * 11 + * The first argument to the macro is the name that will be used 12 + * for the struct and helper functions. 13 + * 14 + * The second argument, the precision, expresses how many bits are 15 + * used for the fractional part of the fixed-precision values. 16 + * 17 + * The third argument, the weight reciprocal, determines how the 18 + * new values will be weighed vs. the old state, new values will 19 + * get weight 1/weight_rcp and old values 1-1/weight_rcp. Note 20 + * that this parameter must be a power of two for efficiency. 21 + */ 5 22 6 - #define DECLARE_EWMA(name, _factor, _weight) \ 23 + #define DECLARE_EWMA(name, _precision, _weight_rcp) \ 7 24 struct ewma_##name { \ 8 25 unsigned long internal; \ 9 26 }; \ 10 27 static inline void ewma_##name##_init(struct ewma_##name *e) \ 11 28 { \ 12 - BUILD_BUG_ON(!__builtin_constant_p(_factor)); \ 13 - BUILD_BUG_ON(!__builtin_constant_p(_weight)); \ 14 - BUILD_BUG_ON_NOT_POWER_OF_2(_factor); \ 15 - BUILD_BUG_ON_NOT_POWER_OF_2(_weight); \ 29 + BUILD_BUG_ON(!__builtin_constant_p(_precision)); \ 30 + BUILD_BUG_ON(!__builtin_constant_p(_weight_rcp)); \ 31 + /* \ 32 + * Even if you want to feed it just 0/1 you should have \ 33 + * some bits for the non-fractional part... \ 34 + */ \ 35 + BUILD_BUG_ON((_precision) > 30); \ 36 + BUILD_BUG_ON_NOT_POWER_OF_2(_weight_rcp); \ 16 37 e->internal = 0; \ 17 38 } \ 18 39 static inline unsigned long \ 19 40 ewma_##name##_read(struct ewma_##name *e) \ 20 41 { \ 21 - BUILD_BUG_ON(!__builtin_constant_p(_factor)); \ 22 - BUILD_BUG_ON(!__builtin_constant_p(_weight)); \ 23 - BUILD_BUG_ON_NOT_POWER_OF_2(_factor); \ 24 - BUILD_BUG_ON_NOT_POWER_OF_2(_weight); \ 25 - return e->internal >> ilog2(_factor); \ 42 + BUILD_BUG_ON(!__builtin_constant_p(_precision)); \ 43 + BUILD_BUG_ON(!__builtin_constant_p(_weight_rcp)); \ 44 + BUILD_BUG_ON((_precision) > 30); \ 45 + BUILD_BUG_ON_NOT_POWER_OF_2(_weight_rcp); \ 46 + return e->internal >> (_precision); \ 26 47 } \ 27 48 static inline void ewma_##name##_add(struct ewma_##name *e, \ 28 49 unsigned long val) \ 29 50 { \ 30 51 unsigned long internal = ACCESS_ONCE(e->internal); \ 31 - unsigned long weight = ilog2(_weight); \ 32 - unsigned long factor = ilog2(_factor); \ 52 + unsigned long weight_rcp = ilog2(_weight_rcp); \ 53 + unsigned long precision = _precision; \ 33 54 \ 34 - BUILD_BUG_ON(!__builtin_constant_p(_factor)); \ 35 - BUILD_BUG_ON(!__builtin_constant_p(_weight)); \ 36 - BUILD_BUG_ON_NOT_POWER_OF_2(_factor); \ 37 - BUILD_BUG_ON_NOT_POWER_OF_2(_weight); \ 55 + BUILD_BUG_ON(!__builtin_constant_p(_precision)); \ 56 + BUILD_BUG_ON(!__builtin_constant_p(_weight_rcp)); \ 57 + BUILD_BUG_ON((_precision) > 30); \ 58 + BUILD_BUG_ON_NOT_POWER_OF_2(_weight_rcp); \ 38 59 \ 39 60 ACCESS_ONCE(e->internal) = internal ? \ 40 - (((internal << weight) - internal) + \ 41 - (val << factor)) >> weight : \ 42 - (val << factor); \ 61 + (((internal << weight_rcp) - internal) + \ 62 + (val << precision)) >> weight_rcp : \ 63 + (val << precision); \ 43 64 } 44 65 45 66 #endif /* _LINUX_AVERAGE_H */
+1 -1
include/linux/mlx4/driver.h
··· 109 109 int i; 110 110 111 111 for (i = ETH_ALEN; i > 0; i--) { 112 - addr[i - 1] = mac && 0xFF; 112 + addr[i - 1] = mac & 0xFF; 113 113 mac >>= 8; 114 114 } 115 115 }
+9 -20
include/linux/netdevice.h
··· 330 330 331 331 enum { 332 332 NAPI_STATE_SCHED, /* Poll is scheduled */ 333 + NAPI_STATE_MISSED, /* reschedule a napi */ 333 334 NAPI_STATE_DISABLE, /* Disable pending */ 334 335 NAPI_STATE_NPSVC, /* Netpoll - don't dequeue from poll_list */ 335 336 NAPI_STATE_HASHED, /* In NAPI hash (busy polling possible) */ ··· 339 338 }; 340 339 341 340 enum { 342 - NAPIF_STATE_SCHED = (1UL << NAPI_STATE_SCHED), 343 - NAPIF_STATE_DISABLE = (1UL << NAPI_STATE_DISABLE), 344 - NAPIF_STATE_NPSVC = (1UL << NAPI_STATE_NPSVC), 345 - NAPIF_STATE_HASHED = (1UL << NAPI_STATE_HASHED), 346 - NAPIF_STATE_NO_BUSY_POLL = (1UL << NAPI_STATE_NO_BUSY_POLL), 347 - NAPIF_STATE_IN_BUSY_POLL = (1UL << NAPI_STATE_IN_BUSY_POLL), 341 + NAPIF_STATE_SCHED = BIT(NAPI_STATE_SCHED), 342 + NAPIF_STATE_MISSED = BIT(NAPI_STATE_MISSED), 343 + NAPIF_STATE_DISABLE = BIT(NAPI_STATE_DISABLE), 344 + NAPIF_STATE_NPSVC = BIT(NAPI_STATE_NPSVC), 345 + NAPIF_STATE_HASHED = BIT(NAPI_STATE_HASHED), 346 + NAPIF_STATE_NO_BUSY_POLL = BIT(NAPI_STATE_NO_BUSY_POLL), 347 + NAPIF_STATE_IN_BUSY_POLL = BIT(NAPI_STATE_IN_BUSY_POLL), 348 348 }; 349 349 350 350 enum gro_result { ··· 416 414 return test_bit(NAPI_STATE_DISABLE, &n->state); 417 415 } 418 416 419 - /** 420 - * napi_schedule_prep - check if NAPI can be scheduled 421 - * @n: NAPI context 422 - * 423 - * Test if NAPI routine is already running, and if not mark 424 - * it as running. This is used as a condition variable to 425 - * insure only one NAPI poll instance runs. We also make 426 - * sure there is no pending NAPI disable. 427 - */ 428 - static inline bool napi_schedule_prep(struct napi_struct *n) 429 - { 430 - return !napi_disable_pending(n) && 431 - !test_and_set_bit(NAPI_STATE_SCHED, &n->state); 432 - } 417 + bool napi_schedule_prep(struct napi_struct *n); 433 418 434 419 /** 435 420 * napi_schedule - schedule NAPI poll
+3 -3
include/net/netfilter/nf_tables.h
··· 988 988 const struct nlattr *nla, u32 objtype, 989 989 u8 genmask); 990 990 991 - int nft_obj_notify(struct net *net, struct nft_table *table, 992 - struct nft_object *obj, u32 portid, u32 seq, 993 - int event, int family, int report, gfp_t gfp); 991 + void nft_obj_notify(struct net *net, struct nft_table *table, 992 + struct nft_object *obj, u32 portid, u32 seq, 993 + int event, int family, int report, gfp_t gfp); 994 994 995 995 /** 996 996 * struct nft_object_type - stateful object type
+1
include/net/sock.h
··· 1526 1526 void sk_free(struct sock *sk); 1527 1527 void sk_destruct(struct sock *sk); 1528 1528 struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority); 1529 + void sk_free_unlock_clone(struct sock *sk); 1529 1530 1530 1531 struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force, 1531 1532 gfp_t priority);
+2
include/trace/events/rxrpc.h
··· 119 119 rxrpc_recvmsg_full, 120 120 rxrpc_recvmsg_hole, 121 121 rxrpc_recvmsg_next, 122 + rxrpc_recvmsg_requeue, 122 123 rxrpc_recvmsg_return, 123 124 rxrpc_recvmsg_terminal, 124 125 rxrpc_recvmsg_to_be_accepted, ··· 278 277 EM(rxrpc_recvmsg_full, "FULL") \ 279 278 EM(rxrpc_recvmsg_hole, "HOLE") \ 280 279 EM(rxrpc_recvmsg_next, "NEXT") \ 280 + EM(rxrpc_recvmsg_requeue, "REQU") \ 281 281 EM(rxrpc_recvmsg_return, "RETN") \ 282 282 EM(rxrpc_recvmsg_terminal, "TERM") \ 283 283 EM(rxrpc_recvmsg_to_be_accepted, "TBAC") \
+1 -1
kernel/bpf/verifier.c
··· 33 33 * - out of bounds or malformed jumps 34 34 * The second pass is all possible path descent from the 1st insn. 35 35 * Since it's analyzing all pathes through the program, the length of the 36 - * analysis is limited to 32k insn, which may be hit even if total number of 36 + * analysis is limited to 64k insn, which may be hit even if total number of 37 37 * insn is less then 4K, but there are too many branches that change stack/regs. 38 38 * Number of 'branches to be analyzed' is limited to 1k 39 39 *
+11 -9
net/batman-adv/fragmentation.c
··· 239 239 spin_unlock_bh(&chain->lock); 240 240 241 241 err: 242 - if (!ret) 242 + if (!ret) { 243 243 kfree(frag_entry_new); 244 + kfree_skb(skb); 245 + } 244 246 245 247 return ret; 246 248 } ··· 315 313 * 316 314 * There are three possible outcomes: 1) Packet is merged: Return true and 317 315 * set *skb to merged packet; 2) Packet is buffered: Return true and set *skb 318 - * to NULL; 3) Error: Return false and leave skb as is. 316 + * to NULL; 3) Error: Return false and free skb. 319 317 * 320 318 * Return: true when packet is merged or buffered, false when skb is not not 321 319 * used. ··· 340 338 goto out_err; 341 339 342 340 out: 343 - *skb = skb_out; 344 341 ret = true; 345 342 out_err: 343 + *skb = skb_out; 346 344 return ret; 347 345 } 348 346 ··· 501 499 502 500 /* Eat and send fragments from the tail of skb */ 503 501 while (skb->len > max_fragment_size) { 502 + /* The initial check in this function should cover this case */ 503 + if (unlikely(frag_header.no == BATADV_FRAG_MAX_FRAGMENTS - 1)) { 504 + ret = -EINVAL; 505 + goto put_primary_if; 506 + } 507 + 504 508 skb_fragment = batadv_frag_create(skb, &frag_header, mtu); 505 509 if (!skb_fragment) { 506 510 ret = -ENOMEM; ··· 523 515 } 524 516 525 517 frag_header.no++; 526 - 527 - /* The initial check in this function should cover this case */ 528 - if (frag_header.no == BATADV_FRAG_MAX_FRAGMENTS - 1) { 529 - ret = -EINVAL; 530 - goto put_primary_if; 531 - } 532 518 } 533 519 534 520 /* Make room for the fragment header. */
+1 -1
net/batman-adv/types.h
··· 402 402 struct rcu_head rcu; 403 403 }; 404 404 405 - DECLARE_EWMA(throughput, 1024, 8) 405 + DECLARE_EWMA(throughput, 10, 8) 406 406 407 407 /** 408 408 * struct batadv_hardif_neigh_node_bat_v - B.A.T.M.A.N. V private neighbor
+2 -1
net/bridge/br_forward.c
··· 186 186 /* Do not flood unicast traffic to ports that turn it off */ 187 187 if (pkt_type == BR_PKT_UNICAST && !(p->flags & BR_FLOOD)) 188 188 continue; 189 + /* Do not flood if mc off, except for traffic we originate */ 189 190 if (pkt_type == BR_PKT_MULTICAST && 190 - !(p->flags & BR_MCAST_FLOOD)) 191 + !(p->flags & BR_MCAST_FLOOD) && skb->dev != br->dev) 191 192 continue; 192 193 193 194 /* Do not flood to ports that enable proxy ARP */
+1 -1
net/bridge/br_vlan.c
··· 997 997 RCU_INIT_POINTER(p->vlgrp, NULL); 998 998 synchronize_rcu(); 999 999 vlan_tunnel_deinit(vg); 1000 - err_vlan_enabled: 1001 1000 err_tunnel_init: 1002 1001 rhashtable_destroy(&vg->vlan_hash); 1003 1002 err_rhtbl: 1003 + err_vlan_enabled: 1004 1004 kfree(vg); 1005 1005 1006 1006 goto out;
+103 -8
net/core/dev.c
··· 1698 1698 static struct static_key netstamp_needed __read_mostly; 1699 1699 #ifdef HAVE_JUMP_LABEL 1700 1700 static atomic_t netstamp_needed_deferred; 1701 + static atomic_t netstamp_wanted; 1701 1702 static void netstamp_clear(struct work_struct *work) 1702 1703 { 1703 1704 int deferred = atomic_xchg(&netstamp_needed_deferred, 0); 1705 + int wanted; 1704 1706 1705 - while (deferred--) 1706 - static_key_slow_dec(&netstamp_needed); 1707 + wanted = atomic_add_return(deferred, &netstamp_wanted); 1708 + if (wanted > 0) 1709 + static_key_enable(&netstamp_needed); 1710 + else 1711 + static_key_disable(&netstamp_needed); 1707 1712 } 1708 1713 static DECLARE_WORK(netstamp_work, netstamp_clear); 1709 1714 #endif 1710 1715 1711 1716 void net_enable_timestamp(void) 1712 1717 { 1718 + #ifdef HAVE_JUMP_LABEL 1719 + int wanted; 1720 + 1721 + while (1) { 1722 + wanted = atomic_read(&netstamp_wanted); 1723 + if (wanted <= 0) 1724 + break; 1725 + if (atomic_cmpxchg(&netstamp_wanted, wanted, wanted + 1) == wanted) 1726 + return; 1727 + } 1728 + atomic_inc(&netstamp_needed_deferred); 1729 + schedule_work(&netstamp_work); 1730 + #else 1713 1731 static_key_slow_inc(&netstamp_needed); 1732 + #endif 1714 1733 } 1715 1734 EXPORT_SYMBOL(net_enable_timestamp); 1716 1735 1717 1736 void net_disable_timestamp(void) 1718 1737 { 1719 1738 #ifdef HAVE_JUMP_LABEL 1720 - /* net_disable_timestamp() can be called from non process context */ 1721 - atomic_inc(&netstamp_needed_deferred); 1739 + int wanted; 1740 + 1741 + while (1) { 1742 + wanted = atomic_read(&netstamp_wanted); 1743 + if (wanted <= 1) 1744 + break; 1745 + if (atomic_cmpxchg(&netstamp_wanted, wanted, wanted - 1) == wanted) 1746 + return; 1747 + } 1748 + atomic_dec(&netstamp_needed_deferred); 1722 1749 schedule_work(&netstamp_work); 1723 1750 #else 1724 1751 static_key_slow_dec(&netstamp_needed); ··· 4911 4884 EXPORT_SYMBOL(__napi_schedule); 4912 4885 4913 4886 /** 4887 + * napi_schedule_prep - check if napi can be scheduled 4888 + * @n: napi context 4889 + * 4890 + * Test if NAPI routine is already running, and if not mark 4891 + * it as running. This is used as a condition variable 4892 + * insure only one NAPI poll instance runs. We also make 4893 + * sure there is no pending NAPI disable. 4894 + */ 4895 + bool napi_schedule_prep(struct napi_struct *n) 4896 + { 4897 + unsigned long val, new; 4898 + 4899 + do { 4900 + val = READ_ONCE(n->state); 4901 + if (unlikely(val & NAPIF_STATE_DISABLE)) 4902 + return false; 4903 + new = val | NAPIF_STATE_SCHED; 4904 + 4905 + /* Sets STATE_MISSED bit if STATE_SCHED was already set 4906 + * This was suggested by Alexander Duyck, as compiler 4907 + * emits better code than : 4908 + * if (val & NAPIF_STATE_SCHED) 4909 + * new |= NAPIF_STATE_MISSED; 4910 + */ 4911 + new |= (val & NAPIF_STATE_SCHED) / NAPIF_STATE_SCHED * 4912 + NAPIF_STATE_MISSED; 4913 + } while (cmpxchg(&n->state, val, new) != val); 4914 + 4915 + return !(val & NAPIF_STATE_SCHED); 4916 + } 4917 + EXPORT_SYMBOL(napi_schedule_prep); 4918 + 4919 + /** 4914 4920 * __napi_schedule_irqoff - schedule for receive 4915 4921 * @n: entry to schedule 4916 4922 * ··· 4957 4897 4958 4898 bool napi_complete_done(struct napi_struct *n, int work_done) 4959 4899 { 4960 - unsigned long flags; 4900 + unsigned long flags, val, new; 4961 4901 4962 4902 /* 4963 4903 * 1) Don't let napi dequeue from the cpu poll list ··· 4987 4927 list_del_init(&n->poll_list); 4988 4928 local_irq_restore(flags); 4989 4929 } 4990 - WARN_ON_ONCE(!test_and_clear_bit(NAPI_STATE_SCHED, &n->state)); 4930 + 4931 + do { 4932 + val = READ_ONCE(n->state); 4933 + 4934 + WARN_ON_ONCE(!(val & NAPIF_STATE_SCHED)); 4935 + 4936 + new = val & ~(NAPIF_STATE_MISSED | NAPIF_STATE_SCHED); 4937 + 4938 + /* If STATE_MISSED was set, leave STATE_SCHED set, 4939 + * because we will call napi->poll() one more time. 4940 + * This C code was suggested by Alexander Duyck to help gcc. 4941 + */ 4942 + new |= (val & NAPIF_STATE_MISSED) / NAPIF_STATE_MISSED * 4943 + NAPIF_STATE_SCHED; 4944 + } while (cmpxchg(&n->state, val, new) != val); 4945 + 4946 + if (unlikely(val & NAPIF_STATE_MISSED)) { 4947 + __napi_schedule(n); 4948 + return false; 4949 + } 4950 + 4991 4951 return true; 4992 4952 } 4993 4953 EXPORT_SYMBOL(napi_complete_done); ··· 5033 4953 { 5034 4954 int rc; 5035 4955 4956 + /* Busy polling means there is a high chance device driver hard irq 4957 + * could not grab NAPI_STATE_SCHED, and that NAPI_STATE_MISSED was 4958 + * set in napi_schedule_prep(). 4959 + * Since we are about to call napi->poll() once more, we can safely 4960 + * clear NAPI_STATE_MISSED. 4961 + * 4962 + * Note: x86 could use a single "lock and ..." instruction 4963 + * to perform these two clear_bit() 4964 + */ 4965 + clear_bit(NAPI_STATE_MISSED, &napi->state); 5036 4966 clear_bit(NAPI_STATE_IN_BUSY_POLL, &napi->state); 5037 4967 5038 4968 local_bh_disable(); ··· 5178 5088 struct napi_struct *napi; 5179 5089 5180 5090 napi = container_of(timer, struct napi_struct, timer); 5181 - if (napi->gro_list) 5182 - napi_schedule_irqoff(napi); 5091 + 5092 + /* Note : we use a relaxed variant of napi_schedule_prep() not setting 5093 + * NAPI_STATE_MISSED, since we do not react to a device IRQ. 5094 + */ 5095 + if (napi->gro_list && !napi_disable_pending(napi) && 5096 + !test_and_set_bit(NAPI_STATE_SCHED, &napi->state)) 5097 + __napi_schedule_irqoff(napi); 5183 5098 5184 5099 return HRTIMER_NORESTART; 5185 5100 }
+11 -5
net/core/sock.c
··· 1539 1539 is_charged = sk_filter_charge(newsk, filter); 1540 1540 1541 1541 if (unlikely(!is_charged || xfrm_sk_clone_policy(newsk, sk))) { 1542 - /* It is still raw copy of parent, so invalidate 1543 - * destructor and make plain sk_free() */ 1544 - newsk->sk_destruct = NULL; 1545 - bh_unlock_sock(newsk); 1546 - sk_free(newsk); 1542 + sk_free_unlock_clone(newsk); 1547 1543 newsk = NULL; 1548 1544 goto out; 1549 1545 } ··· 1587 1591 return newsk; 1588 1592 } 1589 1593 EXPORT_SYMBOL_GPL(sk_clone_lock); 1594 + 1595 + void sk_free_unlock_clone(struct sock *sk) 1596 + { 1597 + /* It is still raw copy of parent, so invalidate 1598 + * destructor and make plain sk_free() */ 1599 + sk->sk_destruct = NULL; 1600 + bh_unlock_sock(sk); 1601 + sk_free(sk); 1602 + } 1603 + EXPORT_SYMBOL_GPL(sk_free_unlock_clone); 1590 1604 1591 1605 void sk_setup_caps(struct sock *sk, struct dst_entry *dst) 1592 1606 {
+8 -2
net/dccp/input.c
··· 577 577 struct dccp_sock *dp = dccp_sk(sk); 578 578 struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb); 579 579 const int old_state = sk->sk_state; 580 + bool acceptable; 580 581 int queued = 0; 581 582 582 583 /* ··· 604 603 */ 605 604 if (sk->sk_state == DCCP_LISTEN) { 606 605 if (dh->dccph_type == DCCP_PKT_REQUEST) { 607 - if (inet_csk(sk)->icsk_af_ops->conn_request(sk, 608 - skb) < 0) 606 + /* It is possible that we process SYN packets from backlog, 607 + * so we need to make sure to disable BH right there. 608 + */ 609 + local_bh_disable(); 610 + acceptable = inet_csk(sk)->icsk_af_ops->conn_request(sk, skb) >= 0; 611 + local_bh_enable(); 612 + if (!acceptable) 609 613 return 1; 610 614 consume_skb(skb); 611 615 return 0;
+1 -4
net/dccp/minisocks.c
··· 119 119 * Activate features: initialise CCIDs, sequence windows etc. 120 120 */ 121 121 if (dccp_feat_activate_values(newsk, &dreq->dreq_featneg)) { 122 - /* It is still raw copy of parent, so invalidate 123 - * destructor and make plain sk_free() */ 124 - newsk->sk_destruct = NULL; 125 - sk_free(newsk); 122 + sk_free_unlock_clone(newsk); 126 123 return NULL; 127 124 } 128 125 dccp_init_xmit_timers(newsk);
+1
net/ipv4/fib_frontend.c
··· 622 622 [RTA_ENCAP_TYPE] = { .type = NLA_U16 }, 623 623 [RTA_ENCAP] = { .type = NLA_NESTED }, 624 624 [RTA_UID] = { .type = NLA_U32 }, 625 + [RTA_MARK] = { .type = NLA_U32 }, 625 626 }; 626 627 627 628 static int rtm_to_fib_config(struct net *net, struct sk_buff *skb,
+4 -3
net/ipv4/netfilter.c
··· 23 23 struct rtable *rt; 24 24 struct flowi4 fl4 = {}; 25 25 __be32 saddr = iph->saddr; 26 - __u8 flags = skb->sk ? inet_sk_flowi_flags(skb->sk) : 0; 26 + const struct sock *sk = skb_to_full_sk(skb); 27 + __u8 flags = sk ? inet_sk_flowi_flags(sk) : 0; 27 28 struct net_device *dev = skb_dst(skb)->dev; 28 29 unsigned int hh_len; 29 30 ··· 41 40 fl4.daddr = iph->daddr; 42 41 fl4.saddr = saddr; 43 42 fl4.flowi4_tos = RT_TOS(iph->tos); 44 - fl4.flowi4_oif = skb->sk ? skb->sk->sk_bound_dev_if : 0; 43 + fl4.flowi4_oif = sk ? sk->sk_bound_dev_if : 0; 45 44 if (!fl4.flowi4_oif) 46 45 fl4.flowi4_oif = l3mdev_master_ifindex(dev); 47 46 fl4.flowi4_mark = skb->mark; ··· 62 61 xfrm_decode_session(skb, flowi4_to_flowi(&fl4), AF_INET) == 0) { 63 62 struct dst_entry *dst = skb_dst(skb); 64 63 skb_dst_set(skb, NULL); 65 - dst = xfrm_lookup(net, dst, flowi4_to_flowi(&fl4), skb->sk, 0); 64 + dst = xfrm_lookup(net, dst, flowi4_to_flowi(&fl4), sk, 0); 66 65 if (IS_ERR(dst)) 67 66 return PTR_ERR(dst); 68 67 skb_dst_set(skb, dst);
+12 -3
net/ipv4/tcp.c
··· 1110 1110 flags = (msg->msg_flags & MSG_DONTWAIT) ? O_NONBLOCK : 0; 1111 1111 err = __inet_stream_connect(sk->sk_socket, msg->msg_name, 1112 1112 msg->msg_namelen, flags, 1); 1113 - inet->defer_connect = 0; 1114 - *copied = tp->fastopen_req->copied; 1115 - tcp_free_fastopen_req(tp); 1113 + /* fastopen_req could already be freed in __inet_stream_connect 1114 + * if the connection times out or gets rst 1115 + */ 1116 + if (tp->fastopen_req) { 1117 + *copied = tp->fastopen_req->copied; 1118 + tcp_free_fastopen_req(tp); 1119 + inet->defer_connect = 0; 1120 + } 1116 1121 return err; 1117 1122 } 1118 1123 ··· 2322 2317 tcp_init_send_head(sk); 2323 2318 memset(&tp->rx_opt, 0, sizeof(tp->rx_opt)); 2324 2319 __sk_dst_reset(sk); 2320 + 2321 + /* Clean up fastopen related fields */ 2322 + tcp_free_fastopen_req(tp); 2323 + inet->defer_connect = 0; 2325 2324 2326 2325 WARN_ON(inet->inet_num && !icsk->icsk_bind_hash); 2327 2326
+8 -2
net/ipv4/tcp_input.c
··· 5886 5886 if (th->syn) { 5887 5887 if (th->fin) 5888 5888 goto discard; 5889 - if (icsk->icsk_af_ops->conn_request(sk, skb) < 0) 5890 - return 1; 5889 + /* It is possible that we process SYN packets from backlog, 5890 + * so we need to make sure to disable BH right there. 5891 + */ 5892 + local_bh_disable(); 5893 + acceptable = icsk->icsk_af_ops->conn_request(sk, skb) >= 0; 5894 + local_bh_enable(); 5891 5895 5896 + if (!acceptable) 5897 + return 1; 5892 5898 consume_skb(skb); 5893 5899 return 0; 5894 5900 }
+15 -7
net/ipv6/addrconf.c
··· 5693 5693 struct inet6_dev *idev = (struct inet6_dev *)ctl->extra1; 5694 5694 struct net *net = (struct net *)ctl->extra2; 5695 5695 5696 + if (!rtnl_trylock()) 5697 + return restart_syscall(); 5698 + 5696 5699 ret = proc_dointvec(ctl, write, buffer, lenp, ppos); 5697 5700 5698 5701 if (write) { 5699 5702 new_val = *((int *)ctl->data); 5700 5703 5701 - if (check_addr_gen_mode(new_val) < 0) 5702 - return -EINVAL; 5704 + if (check_addr_gen_mode(new_val) < 0) { 5705 + ret = -EINVAL; 5706 + goto out; 5707 + } 5703 5708 5704 5709 /* request for default */ 5705 5710 if (&net->ipv6.devconf_dflt->addr_gen_mode == ctl->data) { ··· 5713 5708 /* request for individual net device */ 5714 5709 } else { 5715 5710 if (!idev) 5716 - return ret; 5711 + goto out; 5717 5712 5718 - if (check_stable_privacy(idev, net, new_val) < 0) 5719 - return -EINVAL; 5713 + if (check_stable_privacy(idev, net, new_val) < 0) { 5714 + ret = -EINVAL; 5715 + goto out; 5716 + } 5720 5717 5721 5718 if (idev->cnf.addr_gen_mode != new_val) { 5722 5719 idev->cnf.addr_gen_mode = new_val; 5723 - rtnl_lock(); 5724 5720 addrconf_dev_config(idev->dev); 5725 - rtnl_unlock(); 5726 5721 } 5727 5722 } 5728 5723 } 5724 + 5725 + out: 5726 + rtnl_unlock(); 5729 5727 5730 5728 return ret; 5731 5729 }
+1
net/ipv6/netfilter/nf_conntrack_reasm.c
··· 589 589 hdr = ipv6_hdr(skb); 590 590 fhdr = (struct frag_hdr *)skb_transport_header(skb); 591 591 592 + skb_orphan(skb); 592 593 fq = fq_find(net, fhdr->identification, user, &hdr->saddr, &hdr->daddr, 593 594 skb->dev ? skb->dev->ifindex : 0, ip6_frag_ecn(hdr)); 594 595 if (fq == NULL) {
+16 -5
net/ipv6/route.c
··· 2169 2169 static int __ip6_del_rt_siblings(struct rt6_info *rt, struct fib6_config *cfg) 2170 2170 { 2171 2171 struct nl_info *info = &cfg->fc_nlinfo; 2172 + struct net *net = info->nl_net; 2172 2173 struct sk_buff *skb = NULL; 2173 2174 struct fib6_table *table; 2174 - int err; 2175 + int err = -ENOENT; 2175 2176 2177 + if (rt == net->ipv6.ip6_null_entry) 2178 + goto out_put; 2176 2179 table = rt->rt6i_table; 2177 2180 write_lock_bh(&table->tb6_lock); 2178 2181 ··· 2187 2184 if (skb) { 2188 2185 u32 seq = info->nlh ? info->nlh->nlmsg_seq : 0; 2189 2186 2190 - if (rt6_fill_node(info->nl_net, skb, rt, 2187 + if (rt6_fill_node(net, skb, rt, 2191 2188 NULL, NULL, 0, RTM_DELROUTE, 2192 2189 info->portid, seq, 0) < 0) { 2193 2190 kfree_skb(skb); ··· 2201 2198 rt6i_siblings) { 2202 2199 err = fib6_del(sibling, info); 2203 2200 if (err) 2204 - goto out; 2201 + goto out_unlock; 2205 2202 } 2206 2203 } 2207 2204 2208 2205 err = fib6_del(rt, info); 2209 - out: 2206 + out_unlock: 2210 2207 write_unlock_bh(&table->tb6_lock); 2208 + out_put: 2211 2209 ip6_rt_put(rt); 2212 2210 2213 2211 if (skb) { 2214 - rtnl_notify(skb, info->nl_net, info->portid, RTNLGRP_IPV6_ROUTE, 2212 + rtnl_notify(skb, net, info->portid, RTNLGRP_IPV6_ROUTE, 2215 2213 info->nlh, gfp_any()); 2216 2214 } 2217 2215 return err; ··· 2895 2891 [RTA_ENCAP] = { .type = NLA_NESTED }, 2896 2892 [RTA_EXPIRES] = { .type = NLA_U32 }, 2897 2893 [RTA_UID] = { .type = NLA_U32 }, 2894 + [RTA_MARK] = { .type = NLA_U32 }, 2898 2895 }; 2899 2896 2900 2897 static int rtm_to_fib6_config(struct sk_buff *skb, struct nlmsghdr *nlh, ··· 3630 3625 fl6.flowi6_oif = oif; 3631 3626 3632 3627 rt = (struct rt6_info *)ip6_route_output(net, NULL, &fl6); 3628 + } 3629 + 3630 + if (rt == net->ipv6.ip6_null_entry) { 3631 + err = rt->dst.error; 3632 + ip6_rt_put(rt); 3633 + goto errout; 3633 3634 } 3634 3635 3635 3636 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
+2 -1
net/mac80211/agg-rx.c
··· 85 85 ht_dbg(sta->sdata, 86 86 "Rx BA session stop requested for %pM tid %u %s reason: %d\n", 87 87 sta->sta.addr, tid, 88 - initiator == WLAN_BACK_RECIPIENT ? "recipient" : "inititator", 88 + initiator == WLAN_BACK_RECIPIENT ? "recipient" : "initiator", 89 89 (int)reason); 90 90 91 91 if (drv_ampdu_action(local, sta->sdata, &params)) ··· 398 398 tid_agg_rx->timeout = timeout; 399 399 tid_agg_rx->stored_mpdu_num = 0; 400 400 tid_agg_rx->auto_seq = auto_seq; 401 + tid_agg_rx->started = false; 401 402 tid_agg_rx->reorder_buf_filtered = 0; 402 403 status = WLAN_STATUS_SUCCESS; 403 404
+1 -1
net/mac80211/ieee80211_i.h
··· 428 428 bool downgraded; 429 429 }; 430 430 431 - DECLARE_EWMA(beacon_signal, 16, 4) 431 + DECLARE_EWMA(beacon_signal, 4, 4) 432 432 433 433 struct ieee80211_if_managed { 434 434 struct timer_list timer;
+1
net/mac80211/pm.c
··· 168 168 break; 169 169 } 170 170 171 + flush_delayed_work(&sdata->dec_tailroom_needed_wk); 171 172 drv_remove_interface(local, sdata); 172 173 } 173 174
+23 -8
net/mac80211/rx.c
··· 4 4 * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz> 5 5 * Copyright 2007-2010 Johannes Berg <johannes@sipsolutions.net> 6 6 * Copyright 2013-2014 Intel Mobile Communications GmbH 7 - * Copyright(c) 2015 - 2016 Intel Deutschland GmbH 7 + * Copyright(c) 2015 - 2017 Intel Deutschland GmbH 8 8 * 9 9 * This program is free software; you can redistribute it and/or modify 10 10 * it under the terms of the GNU General Public License version 2 as ··· 1033 1033 1034 1034 buf_size = tid_agg_rx->buf_size; 1035 1035 head_seq_num = tid_agg_rx->head_seq_num; 1036 + 1037 + /* 1038 + * If the current MPDU's SN is smaller than the SSN, it shouldn't 1039 + * be reordered. 1040 + */ 1041 + if (unlikely(!tid_agg_rx->started)) { 1042 + if (ieee80211_sn_less(mpdu_seq_num, head_seq_num)) { 1043 + ret = false; 1044 + goto out; 1045 + } 1046 + tid_agg_rx->started = true; 1047 + } 1036 1048 1037 1049 /* frame with out of date sequence number */ 1038 1050 if (ieee80211_sn_less(mpdu_seq_num, head_seq_num)) { ··· 3892 3880 stats->last_rate = sta_stats_encode_rate(status); 3893 3881 3894 3882 stats->fragments++; 3883 + stats->packets++; 3895 3884 3896 3885 if (!(status->flag & RX_FLAG_NO_SIGNAL_VAL)) { 3897 3886 stats->last_signal = status->signal; ··· 4086 4073 ieee80211_is_beacon(hdr->frame_control))) 4087 4074 ieee80211_scan_rx(local, skb); 4088 4075 4089 - if (pubsta) { 4090 - rx.sta = container_of(pubsta, struct sta_info, sta); 4091 - rx.sdata = rx.sta->sdata; 4092 - if (ieee80211_prepare_and_rx_handle(&rx, skb, true)) 4093 - return; 4094 - goto out; 4095 - } else if (ieee80211_is_data(fc)) { 4076 + if (ieee80211_is_data(fc)) { 4096 4077 struct sta_info *sta, *prev_sta; 4078 + 4079 + if (pubsta) { 4080 + rx.sta = container_of(pubsta, struct sta_info, sta); 4081 + rx.sdata = rx.sta->sdata; 4082 + if (ieee80211_prepare_and_rx_handle(&rx, skb, true)) 4083 + return; 4084 + goto out; 4085 + } 4097 4086 4098 4087 prev_sta = NULL; 4099 4088
+2 -2
net/mac80211/sta_info.c
··· 688 688 } 689 689 690 690 /* No need to do anything if the driver does all */ 691 - if (ieee80211_hw_check(&local->hw, AP_LINK_PS)) 691 + if (ieee80211_hw_check(&local->hw, AP_LINK_PS) && !local->ops->set_tim) 692 692 return; 693 693 694 694 if (sta->dead) ··· 1264 1264 sta_info_recalc_tim(sta); 1265 1265 1266 1266 ps_dbg(sdata, 1267 - "STA %pM aid %d sending %d filtered/%d PS frames since STA not sleeping anymore\n", 1267 + "STA %pM aid %d sending %d filtered/%d PS frames since STA woke up\n", 1268 1268 sta->sta.addr, sta->sta.aid, filtered, buffered); 1269 1269 1270 1270 ieee80211_check_fast_xmit(sta);
+5 -3
net/mac80211/sta_info.h
··· 189 189 * @auto_seq: used for offloaded BA sessions to automatically pick head_seq_and 190 190 * and ssn. 191 191 * @removed: this session is removed (but might have been found due to RCU) 192 + * @started: this session has started (head ssn or higher was received) 192 193 * 193 194 * This structure's lifetime is managed by RCU, assignments to 194 195 * the array holding it must hold the aggregation mutex. ··· 213 212 u16 ssn; 214 213 u16 buf_size; 215 214 u16 timeout; 216 - bool auto_seq; 217 - bool removed; 215 + u8 auto_seq:1, 216 + removed:1, 217 + started:1; 218 218 }; 219 219 220 220 /** ··· 372 370 unsigned int fail_avg; 373 371 }; 374 372 375 - DECLARE_EWMA(signal, 1024, 8) 373 + DECLARE_EWMA(signal, 10, 8) 376 374 377 375 struct ieee80211_sta_rx_stats { 378 376 unsigned long packets;
+2 -1
net/mac80211/status.c
··· 51 51 struct ieee80211_hdr *hdr = (void *)skb->data; 52 52 int ac; 53 53 54 - if (info->flags & IEEE80211_TX_CTL_NO_PS_BUFFER) { 54 + if (info->flags & (IEEE80211_TX_CTL_NO_PS_BUFFER | 55 + IEEE80211_TX_CTL_AMPDU)) { 55 56 ieee80211_free_txskb(&local->hw, skb); 56 57 return; 57 58 }
-2
net/netfilter/nf_conntrack_sip.c
··· 1628 1628 ports[ports_c++] = SIP_PORT; 1629 1629 1630 1630 for (i = 0; i < ports_c; i++) { 1631 - memset(&sip[i], 0, sizeof(sip[i])); 1632 - 1633 1631 nf_ct_helper_init(&sip[4 * i], AF_INET, IPPROTO_UDP, "sip", 1634 1632 SIP_PORT, ports[i], i, sip_exp_policy, 1635 1633 SIP_EXPECT_MAX,
+55 -78
net/netfilter/nf_tables_api.c
··· 461 461 return -1; 462 462 } 463 463 464 - static int nf_tables_table_notify(const struct nft_ctx *ctx, int event) 464 + static void nf_tables_table_notify(const struct nft_ctx *ctx, int event) 465 465 { 466 466 struct sk_buff *skb; 467 467 int err; 468 468 469 469 if (!ctx->report && 470 470 !nfnetlink_has_listeners(ctx->net, NFNLGRP_NFTABLES)) 471 - return 0; 471 + return; 472 472 473 - err = -ENOBUFS; 474 473 skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); 475 474 if (skb == NULL) 476 475 goto err; ··· 481 482 goto err; 482 483 } 483 484 484 - err = nfnetlink_send(skb, ctx->net, ctx->portid, NFNLGRP_NFTABLES, 485 - ctx->report, GFP_KERNEL); 485 + nfnetlink_send(skb, ctx->net, ctx->portid, NFNLGRP_NFTABLES, 486 + ctx->report, GFP_KERNEL); 487 + return; 486 488 err: 487 - if (err < 0) { 488 - nfnetlink_set_err(ctx->net, ctx->portid, NFNLGRP_NFTABLES, 489 - err); 490 - } 491 - return err; 489 + nfnetlink_set_err(ctx->net, ctx->portid, NFNLGRP_NFTABLES, -ENOBUFS); 492 490 } 493 491 494 492 static int nf_tables_dump_tables(struct sk_buff *skb, ··· 1046 1050 return -1; 1047 1051 } 1048 1052 1049 - static int nf_tables_chain_notify(const struct nft_ctx *ctx, int event) 1053 + static void nf_tables_chain_notify(const struct nft_ctx *ctx, int event) 1050 1054 { 1051 1055 struct sk_buff *skb; 1052 1056 int err; 1053 1057 1054 1058 if (!ctx->report && 1055 1059 !nfnetlink_has_listeners(ctx->net, NFNLGRP_NFTABLES)) 1056 - return 0; 1060 + return; 1057 1061 1058 - err = -ENOBUFS; 1059 1062 skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); 1060 1063 if (skb == NULL) 1061 1064 goto err; ··· 1067 1072 goto err; 1068 1073 } 1069 1074 1070 - err = nfnetlink_send(skb, ctx->net, ctx->portid, NFNLGRP_NFTABLES, 1071 - ctx->report, GFP_KERNEL); 1075 + nfnetlink_send(skb, ctx->net, ctx->portid, NFNLGRP_NFTABLES, 1076 + ctx->report, GFP_KERNEL); 1077 + return; 1072 1078 err: 1073 - if (err < 0) { 1074 - nfnetlink_set_err(ctx->net, ctx->portid, NFNLGRP_NFTABLES, 1075 - err); 1076 - } 1077 - return err; 1079 + nfnetlink_set_err(ctx->net, ctx->portid, NFNLGRP_NFTABLES, -ENOBUFS); 1078 1080 } 1079 1081 1080 1082 static int nf_tables_dump_chains(struct sk_buff *skb, ··· 1926 1934 return -1; 1927 1935 } 1928 1936 1929 - static int nf_tables_rule_notify(const struct nft_ctx *ctx, 1930 - const struct nft_rule *rule, 1931 - int event) 1937 + static void nf_tables_rule_notify(const struct nft_ctx *ctx, 1938 + const struct nft_rule *rule, int event) 1932 1939 { 1933 1940 struct sk_buff *skb; 1934 1941 int err; 1935 1942 1936 1943 if (!ctx->report && 1937 1944 !nfnetlink_has_listeners(ctx->net, NFNLGRP_NFTABLES)) 1938 - return 0; 1945 + return; 1939 1946 1940 - err = -ENOBUFS; 1941 1947 skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); 1942 1948 if (skb == NULL) 1943 1949 goto err; ··· 1948 1958 goto err; 1949 1959 } 1950 1960 1951 - err = nfnetlink_send(skb, ctx->net, ctx->portid, NFNLGRP_NFTABLES, 1952 - ctx->report, GFP_KERNEL); 1961 + nfnetlink_send(skb, ctx->net, ctx->portid, NFNLGRP_NFTABLES, 1962 + ctx->report, GFP_KERNEL); 1963 + return; 1953 1964 err: 1954 - if (err < 0) { 1955 - nfnetlink_set_err(ctx->net, ctx->portid, NFNLGRP_NFTABLES, 1956 - err); 1957 - } 1958 - return err; 1965 + nfnetlink_set_err(ctx->net, ctx->portid, NFNLGRP_NFTABLES, -ENOBUFS); 1959 1966 } 1960 1967 1961 1968 struct nft_rule_dump_ctx { ··· 2683 2696 return -1; 2684 2697 } 2685 2698 2686 - static int nf_tables_set_notify(const struct nft_ctx *ctx, 2687 - const struct nft_set *set, 2688 - int event, gfp_t gfp_flags) 2699 + static void nf_tables_set_notify(const struct nft_ctx *ctx, 2700 + const struct nft_set *set, int event, 2701 + gfp_t gfp_flags) 2689 2702 { 2690 2703 struct sk_buff *skb; 2691 2704 u32 portid = ctx->portid; ··· 2693 2706 2694 2707 if (!ctx->report && 2695 2708 !nfnetlink_has_listeners(ctx->net, NFNLGRP_NFTABLES)) 2696 - return 0; 2709 + return; 2697 2710 2698 - err = -ENOBUFS; 2699 2711 skb = nlmsg_new(NLMSG_GOODSIZE, gfp_flags); 2700 2712 if (skb == NULL) 2701 2713 goto err; ··· 2705 2719 goto err; 2706 2720 } 2707 2721 2708 - err = nfnetlink_send(skb, ctx->net, portid, NFNLGRP_NFTABLES, 2709 - ctx->report, gfp_flags); 2722 + nfnetlink_send(skb, ctx->net, portid, NFNLGRP_NFTABLES, ctx->report, 2723 + gfp_flags); 2724 + return; 2710 2725 err: 2711 - if (err < 0) 2712 - nfnetlink_set_err(ctx->net, portid, NFNLGRP_NFTABLES, err); 2713 - return err; 2726 + nfnetlink_set_err(ctx->net, portid, NFNLGRP_NFTABLES, -ENOBUFS); 2714 2727 } 2715 2728 2716 2729 static int nf_tables_dump_sets(struct sk_buff *skb, struct netlink_callback *cb) ··· 3489 3504 return -1; 3490 3505 } 3491 3506 3492 - static int nf_tables_setelem_notify(const struct nft_ctx *ctx, 3493 - const struct nft_set *set, 3494 - const struct nft_set_elem *elem, 3495 - int event, u16 flags) 3507 + static void nf_tables_setelem_notify(const struct nft_ctx *ctx, 3508 + const struct nft_set *set, 3509 + const struct nft_set_elem *elem, 3510 + int event, u16 flags) 3496 3511 { 3497 3512 struct net *net = ctx->net; 3498 3513 u32 portid = ctx->portid; ··· 3500 3515 int err; 3501 3516 3502 3517 if (!ctx->report && !nfnetlink_has_listeners(net, NFNLGRP_NFTABLES)) 3503 - return 0; 3518 + return; 3504 3519 3505 - err = -ENOBUFS; 3506 3520 skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); 3507 3521 if (skb == NULL) 3508 3522 goto err; ··· 3513 3529 goto err; 3514 3530 } 3515 3531 3516 - err = nfnetlink_send(skb, net, portid, NFNLGRP_NFTABLES, ctx->report, 3517 - GFP_KERNEL); 3532 + nfnetlink_send(skb, net, portid, NFNLGRP_NFTABLES, ctx->report, 3533 + GFP_KERNEL); 3534 + return; 3518 3535 err: 3519 - if (err < 0) 3520 - nfnetlink_set_err(net, portid, NFNLGRP_NFTABLES, err); 3521 - return err; 3536 + nfnetlink_set_err(net, portid, NFNLGRP_NFTABLES, -ENOBUFS); 3522 3537 } 3523 3538 3524 3539 static struct nft_trans *nft_trans_elem_alloc(struct nft_ctx *ctx, ··· 4459 4476 return nft_delobj(&ctx, obj); 4460 4477 } 4461 4478 4462 - int nft_obj_notify(struct net *net, struct nft_table *table, 4463 - struct nft_object *obj, u32 portid, u32 seq, int event, 4464 - int family, int report, gfp_t gfp) 4479 + void nft_obj_notify(struct net *net, struct nft_table *table, 4480 + struct nft_object *obj, u32 portid, u32 seq, int event, 4481 + int family, int report, gfp_t gfp) 4465 4482 { 4466 4483 struct sk_buff *skb; 4467 4484 int err; 4468 4485 4469 4486 if (!report && 4470 4487 !nfnetlink_has_listeners(net, NFNLGRP_NFTABLES)) 4471 - return 0; 4488 + return; 4472 4489 4473 - err = -ENOBUFS; 4474 4490 skb = nlmsg_new(NLMSG_GOODSIZE, gfp); 4475 4491 if (skb == NULL) 4476 4492 goto err; ··· 4481 4499 goto err; 4482 4500 } 4483 4501 4484 - err = nfnetlink_send(skb, net, portid, NFNLGRP_NFTABLES, report, gfp); 4502 + nfnetlink_send(skb, net, portid, NFNLGRP_NFTABLES, report, gfp); 4503 + return; 4485 4504 err: 4486 - if (err < 0) { 4487 - nfnetlink_set_err(net, portid, NFNLGRP_NFTABLES, err); 4488 - } 4489 - return err; 4505 + nfnetlink_set_err(net, portid, NFNLGRP_NFTABLES, -ENOBUFS); 4490 4506 } 4491 4507 EXPORT_SYMBOL_GPL(nft_obj_notify); 4492 4508 4493 - static int nf_tables_obj_notify(const struct nft_ctx *ctx, 4494 - struct nft_object *obj, int event) 4509 + static void nf_tables_obj_notify(const struct nft_ctx *ctx, 4510 + struct nft_object *obj, int event) 4495 4511 { 4496 - return nft_obj_notify(ctx->net, ctx->table, obj, ctx->portid, 4497 - ctx->seq, event, ctx->afi->family, ctx->report, 4498 - GFP_KERNEL); 4512 + nft_obj_notify(ctx->net, ctx->table, obj, ctx->portid, ctx->seq, event, 4513 + ctx->afi->family, ctx->report, GFP_KERNEL); 4499 4514 } 4500 4515 4501 4516 static int nf_tables_fill_gen_info(struct sk_buff *skb, struct net *net, ··· 4522 4543 return -EMSGSIZE; 4523 4544 } 4524 4545 4525 - static int nf_tables_gen_notify(struct net *net, struct sk_buff *skb, int event) 4546 + static void nf_tables_gen_notify(struct net *net, struct sk_buff *skb, 4547 + int event) 4526 4548 { 4527 4549 struct nlmsghdr *nlh = nlmsg_hdr(skb); 4528 4550 struct sk_buff *skb2; ··· 4531 4551 4532 4552 if (nlmsg_report(nlh) && 4533 4553 !nfnetlink_has_listeners(net, NFNLGRP_NFTABLES)) 4534 - return 0; 4554 + return; 4535 4555 4536 - err = -ENOBUFS; 4537 4556 skb2 = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); 4538 4557 if (skb2 == NULL) 4539 4558 goto err; ··· 4544 4565 goto err; 4545 4566 } 4546 4567 4547 - err = nfnetlink_send(skb2, net, NETLINK_CB(skb).portid, 4548 - NFNLGRP_NFTABLES, nlmsg_report(nlh), GFP_KERNEL); 4568 + nfnetlink_send(skb2, net, NETLINK_CB(skb).portid, NFNLGRP_NFTABLES, 4569 + nlmsg_report(nlh), GFP_KERNEL); 4570 + return; 4549 4571 err: 4550 - if (err < 0) { 4551 - nfnetlink_set_err(net, NETLINK_CB(skb).portid, NFNLGRP_NFTABLES, 4552 - err); 4553 - } 4554 - return err; 4572 + nfnetlink_set_err(net, NETLINK_CB(skb).portid, NFNLGRP_NFTABLES, 4573 + -ENOBUFS); 4555 4574 } 4556 4575 4557 4576 static int nf_tables_getgen(struct net *net, struct sock *nlsk,
+4 -5
net/netfilter/nft_set_rbtree.c
··· 60 60 d = memcmp(this, key, set->klen); 61 61 if (d < 0) { 62 62 parent = parent->rb_left; 63 - /* In case of adjacent ranges, we always see the high 64 - * part of the range in first place, before the low one. 65 - * So don't update interval if the keys are equal. 66 - */ 67 - if (interval && nft_rbtree_equal(set, this, interval)) 63 + if (interval && 64 + nft_rbtree_equal(set, this, interval) && 65 + nft_rbtree_interval_end(this) && 66 + !nft_rbtree_interval_end(interval)) 68 67 continue; 69 68 interval = rbe; 70 69 } else if (d > 0)
+1 -2
net/openvswitch/actions.c
··· 796 796 unsigned long orig_dst; 797 797 struct rt6_info ovs_rt; 798 798 799 - if (!v6ops) { 799 + if (!v6ops) 800 800 goto err; 801 - } 802 801 803 802 prepare_frag(vport, skb, orig_network_offset, 804 803 ovs_key_mac_proto(key));
-1
net/openvswitch/conntrack.c
··· 485 485 } else if (key->eth.type == htons(ETH_P_IPV6)) { 486 486 enum ip6_defrag_users user = IP6_DEFRAG_CONNTRACK_IN + zone; 487 487 488 - skb_orphan(skb); 489 488 memset(IP6CB(skb), 0, sizeof(struct inet6_skb_parm)); 490 489 err = nf_ct_frag6_gather(net, skb, user); 491 490 if (err) {
+6 -2
net/packet/af_packet.c
··· 3103 3103 int addr_len) 3104 3104 { 3105 3105 struct sock *sk = sock->sk; 3106 - char name[15]; 3106 + char name[sizeof(uaddr->sa_data) + 1]; 3107 3107 3108 3108 /* 3109 3109 * Check legality ··· 3111 3111 3112 3112 if (addr_len != sizeof(struct sockaddr)) 3113 3113 return -EINVAL; 3114 - strlcpy(name, uaddr->sa_data, sizeof(name)); 3114 + /* uaddr->sa_data comes from the userspace, it's not guaranteed to be 3115 + * zero-terminated. 3116 + */ 3117 + memcpy(name, uaddr->sa_data, sizeof(uaddr->sa_data)); 3118 + name[sizeof(uaddr->sa_data)] = 0; 3115 3119 3116 3120 return packet_do_bind(sk, name, 0, pkt_sk(sk)->num); 3117 3121 }
+3 -7
net/rds/ib.c
··· 45 45 #include "ib.h" 46 46 #include "ib_mr.h" 47 47 48 - unsigned int rds_ib_mr_1m_pool_size = RDS_MR_1M_POOL_SIZE; 49 - unsigned int rds_ib_mr_8k_pool_size = RDS_MR_8K_POOL_SIZE; 48 + static unsigned int rds_ib_mr_1m_pool_size = RDS_MR_1M_POOL_SIZE; 49 + static unsigned int rds_ib_mr_8k_pool_size = RDS_MR_8K_POOL_SIZE; 50 50 unsigned int rds_ib_retry_count = RDS_IB_DEFAULT_RETRY_COUNT; 51 51 52 52 module_param(rds_ib_mr_1m_pool_size, int, 0444); ··· 438 438 if (ret) 439 439 goto out_sysctl; 440 440 441 - ret = rds_trans_register(&rds_ib_transport); 442 - if (ret) 443 - goto out_recv; 441 + rds_trans_register(&rds_ib_transport); 444 442 445 443 rds_info_register_func(RDS_INFO_IB_CONNECTIONS, rds_ib_ic_info); 446 444 447 445 goto out; 448 446 449 - out_recv: 450 - rds_ib_recv_exit(); 451 447 out_sysctl: 452 448 rds_ib_sysctl_exit(); 453 449 out_ibreg:
-2
net/rds/ib_mr.h
··· 107 107 }; 108 108 109 109 extern struct workqueue_struct *rds_ib_mr_wq; 110 - extern unsigned int rds_ib_mr_1m_pool_size; 111 - extern unsigned int rds_ib_mr_8k_pool_size; 112 110 extern bool prefer_frmr; 113 111 114 112 struct rds_ib_mr_pool *rds_ib_create_mr_pool(struct rds_ib_device *rds_dev,
+1 -1
net/rds/rds.h
··· 903 903 void rds_connect_complete(struct rds_connection *conn); 904 904 905 905 /* transport.c */ 906 - int rds_trans_register(struct rds_transport *trans); 906 + void rds_trans_register(struct rds_transport *trans); 907 907 void rds_trans_unregister(struct rds_transport *trans); 908 908 struct rds_transport *rds_trans_get_preferred(struct net *net, __be32 addr); 909 909 void rds_trans_put(struct rds_transport *trans);
+1 -5
net/rds/tcp.c
··· 652 652 if (ret) 653 653 goto out_pernet; 654 654 655 - ret = rds_trans_register(&rds_tcp_transport); 656 - if (ret) 657 - goto out_recv; 655 + rds_trans_register(&rds_tcp_transport); 658 656 659 657 rds_info_register_func(RDS_INFO_TCP_SOCKETS, rds_tcp_tc_info); 660 658 661 659 goto out; 662 660 663 - out_recv: 664 - rds_tcp_recv_exit(); 665 661 out_pernet: 666 662 unregister_pernet_subsys(&rds_tcp_net_ops); 667 663 out_notifier:
+1 -3
net/rds/transport.c
··· 40 40 static struct rds_transport *transports[RDS_TRANS_COUNT]; 41 41 static DECLARE_RWSEM(rds_trans_sem); 42 42 43 - int rds_trans_register(struct rds_transport *trans) 43 + void rds_trans_register(struct rds_transport *trans) 44 44 { 45 45 BUG_ON(strlen(trans->t_name) + 1 > TRANSNAMSIZ); 46 46 ··· 55 55 } 56 56 57 57 up_write(&rds_trans_sem); 58 - 59 - return 0; 60 58 } 61 59 EXPORT_SYMBOL_GPL(rds_trans_register); 62 60
+9 -3
net/rxrpc/af_rxrpc.c
··· 290 290 cp.exclusive = false; 291 291 cp.service_id = srx->srx_service; 292 292 call = rxrpc_new_client_call(rx, &cp, srx, user_call_ID, gfp); 293 + /* The socket has been unlocked. */ 293 294 if (!IS_ERR(call)) 294 295 call->notify_rx = notify_rx; 295 296 296 - release_sock(&rx->sk); 297 + mutex_unlock(&call->user_mutex); 297 298 _leave(" = %p", call); 298 299 return call; 299 300 } ··· 311 310 void rxrpc_kernel_end_call(struct socket *sock, struct rxrpc_call *call) 312 311 { 313 312 _enter("%d{%d}", call->debug_id, atomic_read(&call->usage)); 313 + 314 + mutex_lock(&call->user_mutex); 314 315 rxrpc_release_call(rxrpc_sk(sock->sk), call); 316 + mutex_unlock(&call->user_mutex); 315 317 rxrpc_put_call(call, rxrpc_call_put_kernel); 316 318 } 317 319 EXPORT_SYMBOL(rxrpc_kernel_end_call); ··· 454 450 case RXRPC_SERVER_BOUND: 455 451 case RXRPC_SERVER_LISTENING: 456 452 ret = rxrpc_do_sendmsg(rx, m, len); 457 - break; 453 + /* The socket has been unlocked */ 454 + goto out; 458 455 default: 459 456 ret = -EINVAL; 460 - break; 457 + goto error_unlock; 461 458 } 462 459 463 460 error_unlock: 464 461 release_sock(&rx->sk); 462 + out: 465 463 _leave(" = %d", ret); 466 464 return ret; 467 465 }
+1
net/rxrpc/ar-internal.h
··· 467 467 struct rxrpc_connection *conn; /* connection carrying call */ 468 468 struct rxrpc_peer *peer; /* Peer record for remote address */ 469 469 struct rxrpc_sock __rcu *socket; /* socket responsible */ 470 + struct mutex user_mutex; /* User access mutex */ 470 471 ktime_t ack_at; /* When deferred ACK needs to happen */ 471 472 ktime_t resend_at; /* When next resend needs to happen */ 472 473 ktime_t ping_at; /* When next to send a ping */
+48
net/rxrpc/call_accept.c
··· 323 323 * 324 324 * If we want to report an error, we mark the skb with the packet type and 325 325 * abort code and return NULL. 326 + * 327 + * The call is returned with the user access mutex held. 326 328 */ 327 329 struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *local, 328 330 struct rxrpc_connection *conn, ··· 372 370 373 371 trace_rxrpc_receive(call, rxrpc_receive_incoming, 374 372 sp->hdr.serial, sp->hdr.seq); 373 + 374 + /* Lock the call to prevent rxrpc_kernel_send/recv_data() and 375 + * sendmsg()/recvmsg() inconveniently stealing the mutex once the 376 + * notification is generated. 377 + * 378 + * The BUG should never happen because the kernel should be well 379 + * behaved enough not to access the call before the first notification 380 + * event and userspace is prevented from doing so until the state is 381 + * appropriate. 382 + */ 383 + if (!mutex_trylock(&call->user_mutex)) 384 + BUG(); 375 385 376 386 /* Make the call live. */ 377 387 rxrpc_incoming_call(rx, call, skb); ··· 443 429 /* 444 430 * handle acceptance of a call by userspace 445 431 * - assign the user call ID to the call at the front of the queue 432 + * - called with the socket locked. 446 433 */ 447 434 struct rxrpc_call *rxrpc_accept_call(struct rxrpc_sock *rx, 448 435 unsigned long user_call_ID, 449 436 rxrpc_notify_rx_t notify_rx) 437 + __releases(&rx->sk.sk_lock.slock) 450 438 { 451 439 struct rxrpc_call *call; 452 440 struct rb_node *parent, **pp; ··· 462 446 463 447 if (list_empty(&rx->to_be_accepted)) { 464 448 write_unlock(&rx->call_lock); 449 + release_sock(&rx->sk); 465 450 kleave(" = -ENODATA [empty]"); 466 451 return ERR_PTR(-ENODATA); 467 452 } ··· 487 470 */ 488 471 call = list_entry(rx->to_be_accepted.next, 489 472 struct rxrpc_call, accept_link); 473 + write_unlock(&rx->call_lock); 474 + 475 + /* We need to gain the mutex from the interrupt handler without 476 + * upsetting lockdep, so we have to release it there and take it here. 477 + * We are, however, still holding the socket lock, so other accepts 478 + * must wait for us and no one can add the user ID behind our backs. 479 + */ 480 + if (mutex_lock_interruptible(&call->user_mutex) < 0) { 481 + release_sock(&rx->sk); 482 + kleave(" = -ERESTARTSYS"); 483 + return ERR_PTR(-ERESTARTSYS); 484 + } 485 + 486 + write_lock(&rx->call_lock); 490 487 list_del_init(&call->accept_link); 491 488 sk_acceptq_removed(&rx->sk); 492 489 rxrpc_see_call(call); 490 + 491 + /* Find the user ID insertion point. */ 492 + pp = &rx->calls.rb_node; 493 + parent = NULL; 494 + while (*pp) { 495 + parent = *pp; 496 + call = rb_entry(parent, struct rxrpc_call, sock_node); 497 + 498 + if (user_call_ID < call->user_call_ID) 499 + pp = &(*pp)->rb_left; 500 + else if (user_call_ID > call->user_call_ID) 501 + pp = &(*pp)->rb_right; 502 + else 503 + BUG(); 504 + } 493 505 494 506 write_lock_bh(&call->state_lock); 495 507 switch (call->state) { ··· 545 499 write_unlock(&rx->call_lock); 546 500 rxrpc_notify_socket(call); 547 501 rxrpc_service_prealloc(rx, GFP_KERNEL); 502 + release_sock(&rx->sk); 548 503 _leave(" = %p{%d}", call, call->debug_id); 549 504 return call; 550 505 ··· 562 515 write_unlock(&rx->call_lock); 563 516 out: 564 517 rxrpc_service_prealloc(rx, GFP_KERNEL); 518 + release_sock(&rx->sk); 565 519 _leave(" = %d", ret); 566 520 return ERR_PTR(ret); 567 521 }
+16 -2
net/rxrpc/call_object.c
··· 115 115 if (!call->rxtx_annotations) 116 116 goto nomem_2; 117 117 118 + mutex_init(&call->user_mutex); 118 119 setup_timer(&call->timer, rxrpc_call_timer_expired, 119 120 (unsigned long)call); 120 121 INIT_WORK(&call->processor, &rxrpc_process_call); ··· 195 194 } 196 195 197 196 /* 198 - * set up a call for the given data 199 - * - called in process context with IRQs enabled 197 + * Set up a call for the given parameters. 198 + * - Called with the socket lock held, which it must release. 199 + * - If it returns a call, the call's lock will need releasing by the caller. 200 200 */ 201 201 struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx, 202 202 struct rxrpc_conn_parameters *cp, 203 203 struct sockaddr_rxrpc *srx, 204 204 unsigned long user_call_ID, 205 205 gfp_t gfp) 206 + __releases(&rx->sk.sk_lock.slock) 206 207 { 207 208 struct rxrpc_call *call, *xcall; 208 209 struct rb_node *parent, **pp; ··· 215 212 216 213 call = rxrpc_alloc_client_call(srx, gfp); 217 214 if (IS_ERR(call)) { 215 + release_sock(&rx->sk); 218 216 _leave(" = %ld", PTR_ERR(call)); 219 217 return call; 220 218 } 221 219 222 220 trace_rxrpc_call(call, rxrpc_call_new_client, atomic_read(&call->usage), 223 221 here, (const void *)user_call_ID); 222 + 223 + /* We need to protect a partially set up call against the user as we 224 + * will be acting outside the socket lock. 225 + */ 226 + mutex_lock(&call->user_mutex); 224 227 225 228 /* Publish the call, even though it is incompletely set up as yet */ 226 229 write_lock(&rx->call_lock); ··· 259 250 list_add_tail(&call->link, &rxrpc_calls); 260 251 write_unlock(&rxrpc_call_lock); 261 252 253 + /* From this point on, the call is protected by its own lock. */ 254 + release_sock(&rx->sk); 255 + 262 256 /* Set up or get a connection record and set the protocol parameters, 263 257 * including channel number and call ID. 264 258 */ ··· 291 279 */ 292 280 error_dup_user_ID: 293 281 write_unlock(&rx->call_lock); 282 + release_sock(&rx->sk); 294 283 ret = -EEXIST; 295 284 296 285 error: ··· 300 287 trace_rxrpc_call(call, rxrpc_call_error, atomic_read(&call->usage), 301 288 here, ERR_PTR(ret)); 302 289 rxrpc_release_call(rx, call); 290 + mutex_unlock(&call->user_mutex); 303 291 rxrpc_put_call(call, rxrpc_call_put); 304 292 _leave(" = %d", ret); 305 293 return ERR_PTR(ret);
+1
net/rxrpc/input.c
··· 1194 1194 goto reject_packet; 1195 1195 } 1196 1196 rxrpc_send_ping(call, skb, skew); 1197 + mutex_unlock(&call->user_mutex); 1197 1198 } 1198 1199 1199 1200 rxrpc_input_call_packet(call, skb, skew);
+33 -6
net/rxrpc/recvmsg.c
··· 489 489 490 490 trace_rxrpc_recvmsg(call, rxrpc_recvmsg_dequeue, 0, 0, 0, 0); 491 491 492 + /* We're going to drop the socket lock, so we need to lock the call 493 + * against interference by sendmsg. 494 + */ 495 + if (!mutex_trylock(&call->user_mutex)) { 496 + ret = -EWOULDBLOCK; 497 + if (flags & MSG_DONTWAIT) 498 + goto error_requeue_call; 499 + ret = -ERESTARTSYS; 500 + if (mutex_lock_interruptible(&call->user_mutex) < 0) 501 + goto error_requeue_call; 502 + } 503 + 504 + release_sock(&rx->sk); 505 + 492 506 if (test_bit(RXRPC_CALL_RELEASED, &call->flags)) 493 507 BUG(); 494 508 ··· 518 504 &call->user_call_ID); 519 505 } 520 506 if (ret < 0) 521 - goto error; 507 + goto error_unlock_call; 522 508 } 523 509 524 510 if (msg->msg_name) { ··· 549 535 } 550 536 551 537 if (ret < 0) 552 - goto error; 538 + goto error_unlock_call; 553 539 554 540 if (call->state == RXRPC_CALL_COMPLETE) { 555 541 ret = rxrpc_recvmsg_term(call, msg); 556 542 if (ret < 0) 557 - goto error; 543 + goto error_unlock_call; 558 544 if (!(flags & MSG_PEEK)) 559 545 rxrpc_release_call(rx, call); 560 546 msg->msg_flags |= MSG_EOR; ··· 567 553 msg->msg_flags &= ~MSG_MORE; 568 554 ret = copied; 569 555 570 - error: 556 + error_unlock_call: 557 + mutex_unlock(&call->user_mutex); 571 558 rxrpc_put_call(call, rxrpc_call_put); 559 + trace_rxrpc_recvmsg(call, rxrpc_recvmsg_return, 0, 0, 0, ret); 560 + return ret; 561 + 562 + error_requeue_call: 563 + if (!(flags & MSG_PEEK)) { 564 + write_lock_bh(&rx->recvmsg_lock); 565 + list_add(&call->recvmsg_link, &rx->recvmsg_q); 566 + write_unlock_bh(&rx->recvmsg_lock); 567 + trace_rxrpc_recvmsg(call, rxrpc_recvmsg_requeue, 0, 0, 0, 0); 568 + } else { 569 + rxrpc_put_call(call, rxrpc_call_put); 570 + } 572 571 error_no_call: 573 572 release_sock(&rx->sk); 574 573 trace_rxrpc_recvmsg(call, rxrpc_recvmsg_return, 0, 0, 0, ret); ··· 638 611 iov.iov_len = size - *_offset; 639 612 iov_iter_kvec(&iter, ITER_KVEC | READ, &iov, 1, size - *_offset); 640 613 641 - lock_sock(sock->sk); 614 + mutex_lock(&call->user_mutex); 642 615 643 616 switch (call->state) { 644 617 case RXRPC_CALL_CLIENT_RECV_REPLY: ··· 677 650 read_phase_complete: 678 651 ret = 1; 679 652 out: 680 - release_sock(sock->sk); 653 + mutex_unlock(&call->user_mutex); 681 654 _leave(" = %d [%zu,%d]", ret, *_offset, *_abort); 682 655 return ret; 683 656
+47 -11
net/rxrpc/sendmsg.c
··· 61 61 } 62 62 63 63 trace_rxrpc_transmit(call, rxrpc_transmit_wait); 64 - release_sock(&rx->sk); 64 + mutex_unlock(&call->user_mutex); 65 65 *timeo = schedule_timeout(*timeo); 66 - lock_sock(&rx->sk); 66 + if (mutex_lock_interruptible(&call->user_mutex) < 0) { 67 + ret = sock_intr_errno(*timeo); 68 + break; 69 + } 67 70 } 68 71 69 72 remove_wait_queue(&call->waitq, &myself); ··· 176 173 /* 177 174 * send data through a socket 178 175 * - must be called in process context 179 - * - caller holds the socket locked 176 + * - The caller holds the call user access mutex, but not the socket lock. 180 177 */ 181 178 static int rxrpc_send_data(struct rxrpc_sock *rx, 182 179 struct rxrpc_call *call, ··· 442 439 443 440 /* 444 441 * Create a new client call for sendmsg(). 442 + * - Called with the socket lock held, which it must release. 443 + * - If it returns a call, the call's lock will need releasing by the caller. 445 444 */ 446 445 static struct rxrpc_call * 447 446 rxrpc_new_client_call_for_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, 448 447 unsigned long user_call_ID, bool exclusive) 448 + __releases(&rx->sk.sk_lock.slock) 449 449 { 450 450 struct rxrpc_conn_parameters cp; 451 451 struct rxrpc_call *call; ··· 458 452 459 453 _enter(""); 460 454 461 - if (!msg->msg_name) 455 + if (!msg->msg_name) { 456 + release_sock(&rx->sk); 462 457 return ERR_PTR(-EDESTADDRREQ); 458 + } 463 459 464 460 key = rx->key; 465 461 if (key && !rx->key->payload.data[0]) ··· 474 466 cp.exclusive = rx->exclusive | exclusive; 475 467 cp.service_id = srx->srx_service; 476 468 call = rxrpc_new_client_call(rx, &cp, srx, user_call_ID, GFP_KERNEL); 469 + /* The socket is now unlocked */ 477 470 478 471 _leave(" = %p\n", call); 479 472 return call; ··· 486 477 * - the socket may be either a client socket or a server socket 487 478 */ 488 479 int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len) 480 + __releases(&rx->sk.sk_lock.slock) 489 481 { 490 482 enum rxrpc_command cmd; 491 483 struct rxrpc_call *call; ··· 500 490 ret = rxrpc_sendmsg_cmsg(msg, &user_call_ID, &cmd, &abort_code, 501 491 &exclusive); 502 492 if (ret < 0) 503 - return ret; 493 + goto error_release_sock; 504 494 505 495 if (cmd == RXRPC_CMD_ACCEPT) { 496 + ret = -EINVAL; 506 497 if (rx->sk.sk_state != RXRPC_SERVER_LISTENING) 507 - return -EINVAL; 498 + goto error_release_sock; 508 499 call = rxrpc_accept_call(rx, user_call_ID, NULL); 500 + /* The socket is now unlocked. */ 509 501 if (IS_ERR(call)) 510 502 return PTR_ERR(call); 511 503 rxrpc_put_call(call, rxrpc_call_put); ··· 516 504 517 505 call = rxrpc_find_call_by_user_ID(rx, user_call_ID); 518 506 if (!call) { 507 + ret = -EBADSLT; 519 508 if (cmd != RXRPC_CMD_SEND_DATA) 520 - return -EBADSLT; 509 + goto error_release_sock; 521 510 call = rxrpc_new_client_call_for_sendmsg(rx, msg, user_call_ID, 522 511 exclusive); 512 + /* The socket is now unlocked... */ 523 513 if (IS_ERR(call)) 524 514 return PTR_ERR(call); 515 + /* ... and we have the call lock. */ 516 + } else { 517 + ret = -EBUSY; 518 + if (call->state == RXRPC_CALL_UNINITIALISED || 519 + call->state == RXRPC_CALL_CLIENT_AWAIT_CONN || 520 + call->state == RXRPC_CALL_SERVER_PREALLOC || 521 + call->state == RXRPC_CALL_SERVER_SECURING || 522 + call->state == RXRPC_CALL_SERVER_ACCEPTING) 523 + goto error_release_sock; 524 + 525 + ret = mutex_lock_interruptible(&call->user_mutex); 526 + release_sock(&rx->sk); 527 + if (ret < 0) { 528 + ret = -ERESTARTSYS; 529 + goto error_put; 530 + } 525 531 } 526 532 527 533 _debug("CALL %d USR %lx ST %d on CONN %p", ··· 567 537 ret = rxrpc_send_data(rx, call, msg, len); 568 538 } 569 539 540 + mutex_unlock(&call->user_mutex); 541 + error_put: 570 542 rxrpc_put_call(call, rxrpc_call_put); 571 543 _leave(" = %d", ret); 544 + return ret; 545 + 546 + error_release_sock: 547 + release_sock(&rx->sk); 572 548 return ret; 573 549 } 574 550 ··· 600 564 ASSERTCMP(msg->msg_name, ==, NULL); 601 565 ASSERTCMP(msg->msg_control, ==, NULL); 602 566 603 - lock_sock(sock->sk); 567 + mutex_lock(&call->user_mutex); 604 568 605 569 _debug("CALL %d USR %lx ST %d on CONN %p", 606 570 call->debug_id, call->user_call_ID, call->state, call->conn); ··· 615 579 ret = rxrpc_send_data(rxrpc_sk(sock->sk), call, msg, len); 616 580 } 617 581 618 - release_sock(sock->sk); 582 + mutex_unlock(&call->user_mutex); 619 583 _leave(" = %d", ret); 620 584 return ret; 621 585 } ··· 636 600 { 637 601 _enter("{%d},%d,%d,%s", call->debug_id, abort_code, error, why); 638 602 639 - lock_sock(sock->sk); 603 + mutex_lock(&call->user_mutex); 640 604 641 605 if (rxrpc_abort_call(why, call, 0, abort_code, error)) 642 606 rxrpc_send_abort_packet(call); 643 607 644 - release_sock(sock->sk); 608 + mutex_unlock(&call->user_mutex); 645 609 _leave(""); 646 610 } 647 611
+3
net/sctp/input.c
··· 884 884 arg.paddr = &t->ipaddr; 885 885 arg.lport = htons(t->asoc->base.bind_addr.port); 886 886 887 + rcu_read_lock(); 887 888 list = rhltable_lookup(&sctp_transport_hashtable, &arg, 888 889 sctp_hash_params); 889 890 890 891 rhl_for_each_entry_rcu(transport, tmp, list, node) 891 892 if (transport->asoc->ep == t->asoc->ep) { 893 + rcu_read_unlock(); 892 894 err = -EEXIST; 893 895 goto out; 894 896 } 897 + rcu_read_unlock(); 895 898 896 899 err = rhltable_insert_key(&sctp_transport_hashtable, &arg, 897 900 &t->node, sctp_hash_params);
+1
net/strparser/strparser.c
··· 504 504 505 505 static void __exit strp_mod_exit(void) 506 506 { 507 + destroy_workqueue(strp_wq); 507 508 } 508 509 module_init(strp_mod_init); 509 510 module_exit(strp_mod_exit);