Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net

Pull networking fixes from David Miller:

1) Fix memory leak in vti6, from Torsten Hilbrich.

2) Fix double free in xfrm_policy_timer, from YueHaibing.

3) NL80211_ATTR_CHANNEL_WIDTH attribute is put with wrong type, from
Johannes Berg.

4) Wrong allocation failure check in qlcnic driver, from Xu Wang.

5) Get ks8851-ml IO operations right, for real this time, from Marek
Vasut.

* git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net: (22 commits)
r8169: fix PHY driver check on platforms w/o module softdeps
net: ks8851-ml: Fix IO operations, again
mlxsw: spectrum_mr: Fix list iteration in error path
qlcnic: Fix bad kzalloc null test
mac80211: set IEEE80211_TX_CTRL_PORT_CTRL_PROTO for nl80211 TX
mac80211: mark station unauthorized before key removal
mac80211: Check port authorization in the ieee80211_tx_dequeue() case
cfg80211: Do not warn on same channel at the end of CSA
mac80211: drop data frames without key on encrypted links
ieee80211: fix HE SPR size calculation
nl80211: fix NL80211_ATTR_CHANNEL_WIDTH attribute type
xfrm: policy: Fix doulbe free in xfrm_policy_timer
bpf: Explicitly memset some bpf info structures declared on the stack
bpf: Explicitly memset the bpf_attr structure
bpf: Sanitize the bpf_struct_ops tcp-cc name
vti6: Fix memory leak of skb if input policy check fails
esp: remove the skb from the chain when it's enqueued in cryptd_wq
ipv6: xfrm6_tunnel.c: Use built-in RCU list checking
xfrm: add the missing verify_sec_ctx_len check in xfrm_add_acquire
xfrm: fix uctx len check in verify_sec_ctx_len
...

+220 -79
+4 -4
drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c
··· 637 637 return 0; 638 638 639 639 err_erif_unresolve: 640 - list_for_each_entry_from_reverse(erve, &mr_vif->route_evif_list, 641 - vif_node) 640 + list_for_each_entry_continue_reverse(erve, &mr_vif->route_evif_list, 641 + vif_node) 642 642 mlxsw_sp_mr_route_evif_unresolve(mr_table, erve); 643 643 err_irif_unresolve: 644 - list_for_each_entry_from_reverse(irve, &mr_vif->route_ivif_list, 645 - vif_node) 644 + list_for_each_entry_continue_reverse(irve, &mr_vif->route_ivif_list, 645 + vif_node) 646 646 mlxsw_sp_mr_route_ivif_unresolve(mr_table, irve); 647 647 mr_vif->rif = NULL; 648 648 return err;
+52 -4
drivers/net/ethernet/micrel/ks8851_mll.c
··· 157 157 */ 158 158 159 159 /** 160 + * ks_check_endian - Check whether endianness of the bus is correct 161 + * @ks : The chip information 162 + * 163 + * The KS8851-16MLL EESK pin allows selecting the endianness of the 16bit 164 + * bus. To maintain optimum performance, the bus endianness should be set 165 + * such that it matches the endianness of the CPU. 166 + */ 167 + 168 + static int ks_check_endian(struct ks_net *ks) 169 + { 170 + u16 cider; 171 + 172 + /* 173 + * Read CIDER register first, however read it the "wrong" way around. 174 + * If the endian strap on the KS8851-16MLL in incorrect and the chip 175 + * is operating in different endianness than the CPU, then the meaning 176 + * of BE[3:0] byte-enable bits is also swapped such that: 177 + * BE[3,2,1,0] becomes BE[1,0,3,2] 178 + * 179 + * Luckily for us, the byte-enable bits are the top four MSbits of 180 + * the address register and the CIDER register is at offset 0xc0. 181 + * Hence, by reading address 0xc0c0, which is not impacted by endian 182 + * swapping, we assert either BE[3:2] or BE[1:0] while reading the 183 + * CIDER register. 184 + * 185 + * If the bus configuration is correct, reading 0xc0c0 asserts 186 + * BE[3:2] and this read returns 0x0000, because to read register 187 + * with bottom two LSbits of address set to 0, BE[1:0] must be 188 + * asserted. 189 + * 190 + * If the bus configuration is NOT correct, reading 0xc0c0 asserts 191 + * BE[1:0] and this read returns non-zero 0x8872 value. 192 + */ 193 + iowrite16(BE3 | BE2 | KS_CIDER, ks->hw_addr_cmd); 194 + cider = ioread16(ks->hw_addr); 195 + if (!cider) 196 + return 0; 197 + 198 + netdev_err(ks->netdev, "incorrect EESK endian strap setting\n"); 199 + 200 + return -EINVAL; 201 + } 202 + 203 + /** 160 204 * ks_rdreg16 - read 16 bit register from device 161 205 * @ks : The chip information 162 206 * @offset: The register address ··· 210 166 211 167 static u16 ks_rdreg16(struct ks_net *ks, int offset) 212 168 { 213 - ks->cmd_reg_cache = (u16)offset | ((BE3 | BE2) >> (offset & 0x02)); 169 + ks->cmd_reg_cache = (u16)offset | ((BE1 | BE0) << (offset & 0x02)); 214 170 iowrite16(ks->cmd_reg_cache, ks->hw_addr_cmd); 215 171 return ioread16(ks->hw_addr); 216 172 } ··· 225 181 226 182 static void ks_wrreg16(struct ks_net *ks, int offset, u16 value) 227 183 { 228 - ks->cmd_reg_cache = (u16)offset | ((BE3 | BE2) >> (offset & 0x02)); 184 + ks->cmd_reg_cache = (u16)offset | ((BE1 | BE0) << (offset & 0x02)); 229 185 iowrite16(ks->cmd_reg_cache, ks->hw_addr_cmd); 230 186 iowrite16(value, ks->hw_addr); 231 187 } ··· 241 197 { 242 198 len >>= 1; 243 199 while (len--) 244 - *wptr++ = be16_to_cpu(ioread16(ks->hw_addr)); 200 + *wptr++ = (u16)ioread16(ks->hw_addr); 245 201 } 246 202 247 203 /** ··· 255 211 { 256 212 len >>= 1; 257 213 while (len--) 258 - iowrite16(cpu_to_be16(*wptr++), ks->hw_addr); 214 + iowrite16(*wptr++, ks->hw_addr); 259 215 } 260 216 261 217 static void ks_disable_int(struct ks_net *ks) ··· 1261 1217 err = PTR_ERR(ks->hw_addr_cmd); 1262 1218 goto err_free; 1263 1219 } 1220 + 1221 + err = ks_check_endian(ks); 1222 + if (err) 1223 + goto err_free; 1264 1224 1265 1225 netdev->irq = platform_get_irq(pdev, 0); 1266 1226
+1 -1
drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
··· 1720 1720 1721 1721 ahw->reset.seq_error = 0; 1722 1722 ahw->reset.buff = kzalloc(QLC_83XX_RESTART_TEMPLATE_SIZE, GFP_KERNEL); 1723 - if (p_dev->ahw->reset.buff == NULL) 1723 + if (ahw->reset.buff == NULL) 1724 1724 return -ENOMEM; 1725 1725 1726 1726 p_buff = p_dev->ahw->reset.buff;
+7 -9
drivers/net/ethernet/realtek/r8169_main.c
··· 5285 5285 if (!tp->phydev) { 5286 5286 mdiobus_unregister(new_bus); 5287 5287 return -ENODEV; 5288 + } else if (!tp->phydev->drv) { 5289 + /* Most chip versions fail with the genphy driver. 5290 + * Therefore ensure that the dedicated PHY driver is loaded. 5291 + */ 5292 + dev_err(&pdev->dev, "realtek.ko not loaded, maybe it needs to be added to initramfs?\n"); 5293 + mdiobus_unregister(new_bus); 5294 + return -EUNATCH; 5288 5295 } 5289 5296 5290 5297 /* PHY will be woken up in rtl_open() */ ··· 5452 5445 struct net_device *dev; 5453 5446 int chipset, region; 5454 5447 int jumbo_max, rc; 5455 - 5456 - /* Some tools for creating an initramfs don't consider softdeps, then 5457 - * r8169.ko may be in initramfs, but realtek.ko not. Then the generic 5458 - * PHY driver is used that doesn't work with most chip versions. 5459 - */ 5460 - if (!driver_find("RTL8201CP Ethernet", &mdio_bus_type)) { 5461 - dev_err(&pdev->dev, "realtek.ko not loaded, maybe it needs to be added to initramfs?\n"); 5462 - return -ENOENT; 5463 - } 5464 5448 5465 5449 dev = devm_alloc_etherdev(&pdev->dev, sizeof (*tp)); 5466 5450 if (!dev)
+1
include/linux/bpf.h
··· 160 160 } 161 161 void copy_map_value_locked(struct bpf_map *map, void *dst, void *src, 162 162 bool lock_src); 163 + int bpf_obj_name_cpy(char *dst, const char *src, unsigned int size); 163 164 164 165 struct bpf_offload_dev; 165 166 struct bpf_offloaded_map;
+2 -2
include/linux/ieee80211.h
··· 2102 2102 { 2103 2103 struct ieee80211_he_spr *he_spr = (void *)he_spr_ie; 2104 2104 u8 spr_len = sizeof(struct ieee80211_he_spr); 2105 - u32 he_spr_params; 2105 + u8 he_spr_params; 2106 2106 2107 2107 /* Make sure the input is not NULL */ 2108 2108 if (!he_spr_ie) 2109 2109 return 0; 2110 2110 2111 2111 /* Calc required length */ 2112 - he_spr_params = le32_to_cpu(he_spr->he_sr_control); 2112 + he_spr_params = he_spr->he_sr_control; 2113 2113 if (he_spr_params & IEEE80211_HE_SPR_NON_SRG_OFFSET_PRESENT) 2114 2114 spr_len++; 2115 2115 if (he_spr_params & IEEE80211_HE_SPR_SRG_INFORMATION_PRESENT)
+2 -1
kernel/bpf/btf.c
··· 4564 4564 union bpf_attr __user *uattr) 4565 4565 { 4566 4566 struct bpf_btf_info __user *uinfo; 4567 - struct bpf_btf_info info = {}; 4567 + struct bpf_btf_info info; 4568 4568 u32 info_copy, btf_copy; 4569 4569 void __user *ubtf; 4570 4570 u32 uinfo_len; ··· 4573 4573 uinfo_len = attr->info.info_len; 4574 4574 4575 4575 info_copy = min_t(u32, uinfo_len, sizeof(info)); 4576 + memset(&info, 0, sizeof(info)); 4576 4577 if (copy_from_user(&info, uinfo, info_copy)) 4577 4578 return -EFAULT; 4578 4579
+20 -14
kernel/bpf/syscall.c
··· 696 696 offsetof(union bpf_attr, CMD##_LAST_FIELD) - \ 697 697 sizeof(attr->CMD##_LAST_FIELD)) != NULL 698 698 699 - /* dst and src must have at least BPF_OBJ_NAME_LEN number of bytes. 700 - * Return 0 on success and < 0 on error. 699 + /* dst and src must have at least "size" number of bytes. 700 + * Return strlen on success and < 0 on error. 701 701 */ 702 - static int bpf_obj_name_cpy(char *dst, const char *src) 702 + int bpf_obj_name_cpy(char *dst, const char *src, unsigned int size) 703 703 { 704 - const char *end = src + BPF_OBJ_NAME_LEN; 704 + const char *end = src + size; 705 + const char *orig_src = src; 705 706 706 - memset(dst, 0, BPF_OBJ_NAME_LEN); 707 + memset(dst, 0, size); 707 708 /* Copy all isalnum(), '_' and '.' chars. */ 708 709 while (src < end && *src) { 709 710 if (!isalnum(*src) && ··· 713 712 *dst++ = *src++; 714 713 } 715 714 716 - /* No '\0' found in BPF_OBJ_NAME_LEN number of bytes */ 715 + /* No '\0' found in "size" number of bytes */ 717 716 if (src == end) 718 717 return -EINVAL; 719 718 720 - return 0; 719 + return src - orig_src; 721 720 } 722 721 723 722 int map_check_no_btf(const struct bpf_map *map, ··· 811 810 if (IS_ERR(map)) 812 811 return PTR_ERR(map); 813 812 814 - err = bpf_obj_name_cpy(map->name, attr->map_name); 815 - if (err) 813 + err = bpf_obj_name_cpy(map->name, attr->map_name, 814 + sizeof(attr->map_name)); 815 + if (err < 0) 816 816 goto free_map; 817 817 818 818 atomic64_set(&map->refcnt, 1); ··· 2100 2098 goto free_prog; 2101 2099 2102 2100 prog->aux->load_time = ktime_get_boottime_ns(); 2103 - err = bpf_obj_name_cpy(prog->aux->name, attr->prog_name); 2104 - if (err) 2101 + err = bpf_obj_name_cpy(prog->aux->name, attr->prog_name, 2102 + sizeof(attr->prog_name)); 2103 + if (err < 0) 2105 2104 goto free_prog; 2106 2105 2107 2106 /* run eBPF verifier */ ··· 2795 2792 union bpf_attr __user *uattr) 2796 2793 { 2797 2794 struct bpf_prog_info __user *uinfo = u64_to_user_ptr(attr->info.info); 2798 - struct bpf_prog_info info = {}; 2795 + struct bpf_prog_info info; 2799 2796 u32 info_len = attr->info.info_len; 2800 2797 struct bpf_prog_stats stats; 2801 2798 char __user *uinsns; ··· 2807 2804 return err; 2808 2805 info_len = min_t(u32, sizeof(info), info_len); 2809 2806 2807 + memset(&info, 0, sizeof(info)); 2810 2808 if (copy_from_user(&info, uinfo, info_len)) 2811 2809 return -EFAULT; 2812 2810 ··· 3071 3067 union bpf_attr __user *uattr) 3072 3068 { 3073 3069 struct bpf_map_info __user *uinfo = u64_to_user_ptr(attr->info.info); 3074 - struct bpf_map_info info = {}; 3070 + struct bpf_map_info info; 3075 3071 u32 info_len = attr->info.info_len; 3076 3072 int err; 3077 3073 ··· 3080 3076 return err; 3081 3077 info_len = min_t(u32, sizeof(info), info_len); 3082 3078 3079 + memset(&info, 0, sizeof(info)); 3083 3080 info.type = map->map_type; 3084 3081 info.id = map->id; 3085 3082 info.key_size = map->key_size; ··· 3364 3359 3365 3360 SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, size) 3366 3361 { 3367 - union bpf_attr attr = {}; 3362 + union bpf_attr attr; 3368 3363 int err; 3369 3364 3370 3365 if (sysctl_unprivileged_bpf_disabled && !capable(CAP_SYS_ADMIN)) ··· 3376 3371 size = min_t(u32, size, sizeof(attr)); 3377 3372 3378 3373 /* copy attributes from user space, may be less than sizeof(bpf_attr) */ 3374 + memset(&attr, 0, sizeof(attr)); 3379 3375 if (copy_from_user(&attr, uattr, size) != 0) 3380 3376 return -EFAULT; 3381 3377
+1
net/ipv4/Kconfig
··· 303 303 304 304 config NET_IPVTI 305 305 tristate "Virtual (secure) IP: tunneling" 306 + depends on IPV6 || IPV6=n 306 307 select INET_TUNNEL 307 308 select NET_IP_TUNNEL 308 309 select XFRM
+2 -5
net/ipv4/bpf_tcp_ca.c
··· 184 184 { 185 185 const struct tcp_congestion_ops *utcp_ca; 186 186 struct tcp_congestion_ops *tcp_ca; 187 - size_t tcp_ca_name_len; 188 187 int prog_fd; 189 188 u32 moff; 190 189 ··· 198 199 tcp_ca->flags = utcp_ca->flags; 199 200 return 1; 200 201 case offsetof(struct tcp_congestion_ops, name): 201 - tcp_ca_name_len = strnlen(utcp_ca->name, sizeof(utcp_ca->name)); 202 - if (!tcp_ca_name_len || 203 - tcp_ca_name_len == sizeof(utcp_ca->name)) 202 + if (bpf_obj_name_cpy(tcp_ca->name, utcp_ca->name, 203 + sizeof(tcp_ca->name)) <= 0) 204 204 return -EINVAL; 205 205 if (tcp_ca_find(utcp_ca->name)) 206 206 return -EEXIST; 207 - memcpy(tcp_ca->name, utcp_ca->name, sizeof(tcp_ca->name)); 208 207 return 1; 209 208 } 210 209
+29 -7
net/ipv4/ip_vti.c
··· 187 187 int mtu; 188 188 189 189 if (!dst) { 190 - struct rtable *rt; 190 + switch (skb->protocol) { 191 + case htons(ETH_P_IP): { 192 + struct rtable *rt; 191 193 192 - fl->u.ip4.flowi4_oif = dev->ifindex; 193 - fl->u.ip4.flowi4_flags |= FLOWI_FLAG_ANYSRC; 194 - rt = __ip_route_output_key(dev_net(dev), &fl->u.ip4); 195 - if (IS_ERR(rt)) { 194 + fl->u.ip4.flowi4_oif = dev->ifindex; 195 + fl->u.ip4.flowi4_flags |= FLOWI_FLAG_ANYSRC; 196 + rt = __ip_route_output_key(dev_net(dev), &fl->u.ip4); 197 + if (IS_ERR(rt)) { 198 + dev->stats.tx_carrier_errors++; 199 + goto tx_error_icmp; 200 + } 201 + dst = &rt->dst; 202 + skb_dst_set(skb, dst); 203 + break; 204 + } 205 + #if IS_ENABLED(CONFIG_IPV6) 206 + case htons(ETH_P_IPV6): 207 + fl->u.ip6.flowi6_oif = dev->ifindex; 208 + fl->u.ip6.flowi6_flags |= FLOWI_FLAG_ANYSRC; 209 + dst = ip6_route_output(dev_net(dev), NULL, &fl->u.ip6); 210 + if (dst->error) { 211 + dst_release(dst); 212 + dst = NULL; 213 + dev->stats.tx_carrier_errors++; 214 + goto tx_error_icmp; 215 + } 216 + skb_dst_set(skb, dst); 217 + break; 218 + #endif 219 + default: 196 220 dev->stats.tx_carrier_errors++; 197 221 goto tx_error_icmp; 198 222 } 199 - dst = &rt->dst; 200 - skb_dst_set(skb, dst); 201 223 } 202 224 203 225 dst_hold(dst);
+26 -8
net/ipv6/ip6_vti.c
··· 311 311 312 312 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) { 313 313 rcu_read_unlock(); 314 - return 0; 314 + goto discard; 315 315 } 316 316 317 317 ipv6h = ipv6_hdr(skb); ··· 450 450 int mtu; 451 451 452 452 if (!dst) { 453 - fl->u.ip6.flowi6_oif = dev->ifindex; 454 - fl->u.ip6.flowi6_flags |= FLOWI_FLAG_ANYSRC; 455 - dst = ip6_route_output(dev_net(dev), NULL, &fl->u.ip6); 456 - if (dst->error) { 457 - dst_release(dst); 458 - dst = NULL; 453 + switch (skb->protocol) { 454 + case htons(ETH_P_IP): { 455 + struct rtable *rt; 456 + 457 + fl->u.ip4.flowi4_oif = dev->ifindex; 458 + fl->u.ip4.flowi4_flags |= FLOWI_FLAG_ANYSRC; 459 + rt = __ip_route_output_key(dev_net(dev), &fl->u.ip4); 460 + if (IS_ERR(rt)) 461 + goto tx_err_link_failure; 462 + dst = &rt->dst; 463 + skb_dst_set(skb, dst); 464 + break; 465 + } 466 + case htons(ETH_P_IPV6): 467 + fl->u.ip6.flowi6_oif = dev->ifindex; 468 + fl->u.ip6.flowi6_flags |= FLOWI_FLAG_ANYSRC; 469 + dst = ip6_route_output(dev_net(dev), NULL, &fl->u.ip6); 470 + if (dst->error) { 471 + dst_release(dst); 472 + dst = NULL; 473 + goto tx_err_link_failure; 474 + } 475 + skb_dst_set(skb, dst); 476 + break; 477 + default: 459 478 goto tx_err_link_failure; 460 479 } 461 - skb_dst_set(skb, dst); 462 480 } 463 481 464 482 dst_hold(dst);
+1 -1
net/ipv6/xfrm6_tunnel.c
··· 78 78 79 79 hlist_for_each_entry_rcu(x6spi, 80 80 &xfrm6_tn->spi_byaddr[xfrm6_tunnel_spi_hash_byaddr(saddr)], 81 - list_byaddr) { 81 + list_byaddr, lockdep_is_held(&xfrm6_tunnel_spi_lock)) { 82 82 if (xfrm6_addr_equal(&x6spi->addr, saddr)) 83 83 return x6spi; 84 84 }
+2 -1
net/mac80211/debugfs_sta.c
··· 5 5 * Copyright 2007 Johannes Berg <johannes@sipsolutions.net> 6 6 * Copyright 2013-2014 Intel Mobile Communications GmbH 7 7 * Copyright(c) 2016 Intel Deutschland GmbH 8 - * Copyright (C) 2018 - 2019 Intel Corporation 8 + * Copyright (C) 2018 - 2020 Intel Corporation 9 9 */ 10 10 11 11 #include <linux/debugfs.h> ··· 78 78 FLAG(MPSP_OWNER), 79 79 FLAG(MPSP_RECIPIENT), 80 80 FLAG(PS_DELIVER), 81 + FLAG(USES_ENCRYPTION), 81 82 #undef FLAG 82 83 }; 83 84
+12 -8
net/mac80211/key.c
··· 6 6 * Copyright 2007-2008 Johannes Berg <johannes@sipsolutions.net> 7 7 * Copyright 2013-2014 Intel Mobile Communications GmbH 8 8 * Copyright 2015-2017 Intel Deutschland GmbH 9 - * Copyright 2018-2019 Intel Corporation 9 + * Copyright 2018-2020 Intel Corporation 10 10 */ 11 11 12 12 #include <linux/if_ether.h> ··· 262 262 sta ? sta->sta.addr : bcast_addr, ret); 263 263 } 264 264 265 - int ieee80211_set_tx_key(struct ieee80211_key *key) 265 + static int _ieee80211_set_tx_key(struct ieee80211_key *key, bool force) 266 266 { 267 267 struct sta_info *sta = key->sta; 268 268 struct ieee80211_local *local = key->local; 269 269 270 270 assert_key_lock(local); 271 271 272 + set_sta_flag(sta, WLAN_STA_USES_ENCRYPTION); 273 + 272 274 sta->ptk_idx = key->conf.keyidx; 273 275 274 - if (!ieee80211_hw_check(&local->hw, AMPDU_KEYBORDER_SUPPORT)) 276 + if (force || !ieee80211_hw_check(&local->hw, AMPDU_KEYBORDER_SUPPORT)) 275 277 clear_sta_flag(sta, WLAN_STA_BLOCK_BA); 276 278 ieee80211_check_fast_xmit(sta); 277 279 278 280 return 0; 281 + } 282 + 283 + int ieee80211_set_tx_key(struct ieee80211_key *key) 284 + { 285 + return _ieee80211_set_tx_key(key, false); 279 286 } 280 287 281 288 static void ieee80211_pairwise_rekey(struct ieee80211_key *old, ··· 448 441 if (pairwise) { 449 442 rcu_assign_pointer(sta->ptk[idx], new); 450 443 if (new && 451 - !(new->conf.flags & IEEE80211_KEY_FLAG_NO_AUTO_TX)) { 452 - sta->ptk_idx = idx; 453 - clear_sta_flag(sta, WLAN_STA_BLOCK_BA); 454 - ieee80211_check_fast_xmit(sta); 455 - } 444 + !(new->conf.flags & IEEE80211_KEY_FLAG_NO_AUTO_TX)) 445 + _ieee80211_set_tx_key(new, true); 456 446 } else { 457 447 rcu_assign_pointer(sta->gtk[idx], new); 458 448 }
+6 -1
net/mac80211/sta_info.c
··· 4 4 * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz> 5 5 * Copyright 2013-2014 Intel Mobile Communications GmbH 6 6 * Copyright (C) 2015 - 2017 Intel Deutschland GmbH 7 - * Copyright (C) 2018-2019 Intel Corporation 7 + * Copyright (C) 2018-2020 Intel Corporation 8 8 */ 9 9 10 10 #include <linux/module.h> ··· 1048 1048 1049 1049 might_sleep(); 1050 1050 lockdep_assert_held(&local->sta_mtx); 1051 + 1052 + while (sta->sta_state == IEEE80211_STA_AUTHORIZED) { 1053 + ret = sta_info_move_state(sta, IEEE80211_STA_ASSOC); 1054 + WARN_ON_ONCE(ret); 1055 + } 1051 1056 1052 1057 /* now keys can no longer be reached */ 1053 1058 ieee80211_free_sta_keys(local, sta);
+1
net/mac80211/sta_info.h
··· 98 98 WLAN_STA_MPSP_OWNER, 99 99 WLAN_STA_MPSP_RECIPIENT, 100 100 WLAN_STA_PS_DELIVER, 101 + WLAN_STA_USES_ENCRYPTION, 101 102 102 103 NUM_WLAN_STA_FLAGS, 103 104 };
+33 -6
net/mac80211/tx.c
··· 5 5 * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz> 6 6 * Copyright 2007 Johannes Berg <johannes@sipsolutions.net> 7 7 * Copyright 2013-2014 Intel Mobile Communications GmbH 8 - * Copyright (C) 2018 Intel Corporation 8 + * Copyright (C) 2018, 2020 Intel Corporation 9 9 * 10 10 * Transmit and frame generation functions. 11 11 */ ··· 590 590 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb); 591 591 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data; 592 592 593 - if (unlikely(info->flags & IEEE80211_TX_INTFL_DONT_ENCRYPT)) 593 + if (unlikely(info->flags & IEEE80211_TX_INTFL_DONT_ENCRYPT)) { 594 594 tx->key = NULL; 595 - else if (tx->sta && 596 - (key = rcu_dereference(tx->sta->ptk[tx->sta->ptk_idx]))) 595 + return TX_CONTINUE; 596 + } 597 + 598 + if (tx->sta && 599 + (key = rcu_dereference(tx->sta->ptk[tx->sta->ptk_idx]))) 597 600 tx->key = key; 598 601 else if (ieee80211_is_group_privacy_action(tx->skb) && 599 602 (key = rcu_dereference(tx->sdata->default_multicast_key))) ··· 657 654 if (!skip_hw && tx->key && 658 655 tx->key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) 659 656 info->control.hw_key = &tx->key->conf; 657 + } else if (!ieee80211_is_mgmt(hdr->frame_control) && tx->sta && 658 + test_sta_flag(tx->sta, WLAN_STA_USES_ENCRYPTION)) { 659 + return TX_DROP; 660 660 } 661 661 662 662 return TX_CONTINUE; ··· 3604 3598 tx.skb = skb; 3605 3599 tx.sdata = vif_to_sdata(info->control.vif); 3606 3600 3607 - if (txq->sta) 3601 + if (txq->sta) { 3608 3602 tx.sta = container_of(txq->sta, struct sta_info, sta); 3603 + /* 3604 + * Drop unicast frames to unauthorised stations unless they are 3605 + * EAPOL frames from the local station. 3606 + */ 3607 + if (unlikely(!ieee80211_vif_is_mesh(&tx.sdata->vif) && 3608 + tx.sdata->vif.type != NL80211_IFTYPE_OCB && 3609 + !is_multicast_ether_addr(hdr->addr1) && 3610 + !test_sta_flag(tx.sta, WLAN_STA_AUTHORIZED) && 3611 + (!(info->control.flags & 3612 + IEEE80211_TX_CTRL_PORT_CTRL_PROTO) || 3613 + !ether_addr_equal(tx.sdata->vif.addr, 3614 + hdr->addr2)))) { 3615 + I802_DEBUG_INC(local->tx_handlers_drop_unauth_port); 3616 + ieee80211_free_txskb(&local->hw, skb); 3617 + goto begin; 3618 + } 3619 + } 3609 3620 3610 3621 /* 3611 3622 * The key can be removed while the packet was queued, so need to call ··· 5149 5126 struct ieee80211_local *local = sdata->local; 5150 5127 struct sk_buff *skb; 5151 5128 struct ethhdr *ehdr; 5129 + u32 ctrl_flags = 0; 5152 5130 u32 flags; 5153 5131 5154 5132 /* Only accept CONTROL_PORT_PROTOCOL configured in CONNECT/ASSOCIATE ··· 5158 5134 if (proto != sdata->control_port_protocol && 5159 5135 proto != cpu_to_be16(ETH_P_PREAUTH)) 5160 5136 return -EINVAL; 5137 + 5138 + if (proto == sdata->control_port_protocol) 5139 + ctrl_flags |= IEEE80211_TX_CTRL_PORT_CTRL_PROTO; 5161 5140 5162 5141 if (unencrypted) 5163 5142 flags = IEEE80211_TX_INTFL_DONT_ENCRYPT; ··· 5187 5160 skb_reset_mac_header(skb); 5188 5161 5189 5162 local_bh_disable(); 5190 - __ieee80211_subif_start_xmit(skb, skb->dev, flags, 0); 5163 + __ieee80211_subif_start_xmit(skb, skb->dev, flags, ctrl_flags); 5191 5164 local_bh_enable(); 5192 5165 5193 5166 return 0;
+1 -1
net/wireless/nl80211.c
··· 16416 16416 goto nla_put_failure; 16417 16417 16418 16418 if ((sta_opmode->changed & STA_OPMODE_MAX_BW_CHANGED) && 16419 - nla_put_u8(msg, NL80211_ATTR_CHANNEL_WIDTH, sta_opmode->bw)) 16419 + nla_put_u32(msg, NL80211_ATTR_CHANNEL_WIDTH, sta_opmode->bw)) 16420 16420 goto nla_put_failure; 16421 16421 16422 16422 if ((sta_opmode->changed & STA_OPMODE_N_SS_CHANGED) &&
+5 -1
net/wireless/scan.c
··· 2022 2022 2023 2023 spin_lock_bh(&rdev->bss_lock); 2024 2024 2025 - if (WARN_ON(cbss->pub.channel == chan)) 2025 + /* 2026 + * Some APs use CSA also for bandwidth changes, i.e., without actually 2027 + * changing the control channel, so no need to update in such a case. 2028 + */ 2029 + if (cbss->pub.channel == chan) 2026 2030 goto done; 2027 2031 2028 2032 /* use transmitting bss */
+5 -4
net/xfrm/xfrm_device.c
··· 78 78 int err; 79 79 unsigned long flags; 80 80 struct xfrm_state *x; 81 - struct sk_buff *skb2, *nskb; 82 81 struct softnet_data *sd; 82 + struct sk_buff *skb2, *nskb, *pskb = NULL; 83 83 netdev_features_t esp_features = features; 84 84 struct xfrm_offload *xo = xfrm_offload(skb); 85 85 struct sec_path *sp; ··· 168 168 } else { 169 169 if (skb == skb2) 170 170 skb = nskb; 171 - 172 - if (!skb) 173 - return NULL; 171 + else 172 + pskb->next = nskb; 174 173 175 174 continue; 176 175 } 177 176 178 177 skb_push(skb2, skb2->data - skb_mac_header(skb2)); 178 + pskb = skb2; 179 179 } 180 180 181 181 return skb; ··· 383 383 return xfrm_dev_feat_change(dev); 384 384 385 385 case NETDEV_DOWN: 386 + case NETDEV_UNREGISTER: 386 387 return xfrm_dev_down(dev); 387 388 } 388 389 return NOTIFY_DONE;
+2
net/xfrm/xfrm_policy.c
··· 434 434 435 435 static void xfrm_policy_kill(struct xfrm_policy *policy) 436 436 { 437 + write_lock_bh(&policy->lock); 437 438 policy->walk.dead = 1; 439 + write_unlock_bh(&policy->lock); 438 440 439 441 atomic_inc(&policy->genid); 440 442
+5 -1
net/xfrm/xfrm_user.c
··· 110 110 return 0; 111 111 112 112 uctx = nla_data(rt); 113 - if (uctx->len != (sizeof(struct xfrm_user_sec_ctx) + uctx->ctx_len)) 113 + if (uctx->len > nla_len(rt) || 114 + uctx->len != (sizeof(struct xfrm_user_sec_ctx) + uctx->ctx_len)) 114 115 return -EINVAL; 115 116 116 117 return 0; ··· 2274 2273 xfrm_mark_get(attrs, &mark); 2275 2274 2276 2275 err = verify_newpolicy_info(&ua->policy); 2276 + if (err) 2277 + goto free_state; 2278 + err = verify_sec_ctx_len(attrs); 2277 2279 if (err) 2278 2280 goto free_state; 2279 2281