Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge tag 'wireless-next-2026-03-26' of https://git.kernel.org/pub/scm/linux/kernel/git/wireless/wireless-next

Johannes Berg says:

====================
A fairly big set of changes all over, notably with:
- cfg80211: new APIs for NAN (Neighbor Aware Networking,
aka Wi-Fi Aware) so less work must be in firmware
- mt76:
- mt7996/mt7925 MLO fixes/improvements
- mt7996 NPU support (HW eth/wifi traffic offload)
- iwlwifi: UNII-9 and continuing UHR work

* tag 'wireless-next-2026-03-26' of https://git.kernel.org/pub/scm/linux/kernel/git/wireless/wireless-next: (230 commits)
wifi: mac80211: ignore reserved bits in reconfiguration status
wifi: cfg80211: allow protected action frame TX for NAN
wifi: ieee80211: Add some missing NAN definitions
wifi: nl80211: Add a notification to notify NAN channel evacuation
wifi: nl80211: add NL80211_CMD_NAN_ULW_UPDATE notification
wifi: nl80211: allow reporting spurious NAN Data frames
wifi: cfg80211: allow ToDS=0/FromDS=0 data frames on NAN data interfaces
wifi: nl80211: define an API for configuring the NAN peer's schedule
wifi: nl80211: add support for NAN stations
wifi: cfg80211: separately store HT, VHT and HE capabilities for NAN
wifi: cfg80211: add support for NAN data interface
wifi: cfg80211: make sure NAN chandefs are valid
wifi: cfg80211: Add an API to configure local NAN schedule
wifi: mac80211: cleanup error path of ieee80211_do_open
wifi: mac80211: extract channel logic from link logic
wifi: iwlwifi: mld: set RX_FLAG_RADIOTAP_TLV_AT_END generically
wifi: iwlwifi: reduce the number of prints upon firmware crash
wifi: iwlwifi: fix the description of SESSION_PROTECTION_CMD
wifi: iwlwifi: mld: introduce iwl_mld_vif_fw_id_valid
wifi: iwlwifi: mld: block EMLSR during TDLS connections
...
====================

Link: https://patch.msgid.link/20260326152021.305959-3-johannes@sipsolutions.net
Signed-off-by: Jakub Kicinski <kuba@kernel.org>

+7131 -2321
+2 -6
drivers/net/wireless/ath/ath10k/usb.c
··· 1016 1016 1017 1017 netif_napi_add(ar->napi_dev, &ar->napi, ath10k_usb_napi_poll); 1018 1018 1019 - usb_get_dev(dev); 1020 1019 vendor_id = le16_to_cpu(dev->descriptor.idVendor); 1021 1020 product_id = le16_to_cpu(dev->descriptor.idProduct); 1022 1021 ··· 1054 1055 err: 1055 1056 ath10k_core_destroy(ar); 1056 1057 1057 - usb_put_dev(dev); 1058 - 1059 1058 return ret; 1060 1059 } 1061 1060 1062 - static void ath10k_usb_remove(struct usb_interface *interface) 1061 + static void ath10k_usb_disconnect(struct usb_interface *interface) 1063 1062 { 1064 1063 struct ath10k_usb *ar_usb; 1065 1064 ··· 1068 1071 ath10k_core_unregister(ar_usb->ar); 1069 1072 netif_napi_del(&ar_usb->ar->napi); 1070 1073 ath10k_usb_destroy(ar_usb->ar); 1071 - usb_put_dev(interface_to_usbdev(interface)); 1072 1074 ath10k_core_destroy(ar_usb->ar); 1073 1075 } 1074 1076 ··· 1113 1117 .probe = ath10k_usb_probe, 1114 1118 .suspend = ath10k_usb_pm_suspend, 1115 1119 .resume = ath10k_usb_pm_resume, 1116 - .disconnect = ath10k_usb_remove, 1120 + .disconnect = ath10k_usb_disconnect, 1117 1121 .id_table = ath10k_usb_ids, 1118 1122 .supports_autosuspend = true, 1119 1123 .disable_hub_initiated_lpm = 1,
+2 -2
drivers/net/wireless/ath/ath12k/ahb.h
··· 21 21 #define ATH12K_ROOTPD_READY_TIMEOUT (5 * HZ) 22 22 #define ATH12K_RPROC_AFTER_POWERUP QCOM_SSR_AFTER_POWERUP 23 23 #define ATH12K_AHB_FW_PREFIX "q6_fw" 24 - #define ATH12K_AHB_FW_SUFFIX ".mdt" 25 - #define ATH12K_AHB_FW2 "iu_fw.mdt" 24 + #define ATH12K_AHB_FW_SUFFIX ".mbn" 25 + #define ATH12K_AHB_FW2 "iu_fw.mbn" 26 26 #define ATH12K_AHB_UPD_SWID 0x12 27 27 #define ATH12K_USERPD_SPAWN_TIMEOUT (5 * HZ) 28 28 #define ATH12K_USERPD_READY_TIMEOUT (10 * HZ)
+1 -1
drivers/net/wireless/ath/ath12k/core.h
··· 523 523 u16 links_map; 524 524 u8 assoc_link_id; 525 525 u16 ml_peer_id; 526 - u8 num_peer; 526 + u16 free_logical_link_idx_map; 527 527 528 528 enum ieee80211_sta_state state; 529 529 };
+13 -11
drivers/net/wireless/ath/ath12k/dp_htt.c
··· 205 205 if (!(usr_stats->tlv_flags & BIT(HTT_PPDU_STATS_TAG_USR_RATE))) 206 206 return; 207 207 208 - if (usr_stats->tlv_flags & BIT(HTT_PPDU_STATS_TAG_USR_COMPLTN_COMMON)) { 208 + if (usr_stats->tlv_flags & BIT(HTT_PPDU_STATS_TAG_USR_COMPLTN_COMMON)) 209 209 is_ampdu = 210 210 HTT_USR_CMPLTN_IS_AMPDU(usr_stats->cmpltn_cmn.flags); 211 - tx_retry_failed = 212 - __le16_to_cpu(usr_stats->cmpltn_cmn.mpdu_tried) - 213 - __le16_to_cpu(usr_stats->cmpltn_cmn.mpdu_success); 214 - tx_retry_count = 215 - HTT_USR_CMPLTN_LONG_RETRY(usr_stats->cmpltn_cmn.flags) + 216 - HTT_USR_CMPLTN_SHORT_RETRY(usr_stats->cmpltn_cmn.flags); 217 - } 218 211 219 212 if (usr_stats->tlv_flags & 220 213 BIT(HTT_PPDU_STATS_TAG_USR_COMPLTN_ACK_BA_STATUS)) { ··· 216 223 HTT_PPDU_STATS_ACK_BA_INFO_NUM_MSDU_M); 217 224 tid = le32_get_bits(usr_stats->ack_ba.info, 218 225 HTT_PPDU_STATS_ACK_BA_INFO_TID_NUM); 219 - } 220 226 221 - if (common->fes_duration_us) 222 - tx_duration = le32_to_cpu(common->fes_duration_us); 227 + if (usr_stats->tlv_flags & BIT(HTT_PPDU_STATS_TAG_USR_COMPLTN_COMMON)) { 228 + tx_retry_failed = 229 + __le16_to_cpu(usr_stats->cmpltn_cmn.mpdu_tried) - 230 + __le16_to_cpu(usr_stats->cmpltn_cmn.mpdu_success); 231 + tx_retry_count = 232 + HTT_USR_CMPLTN_LONG_RETRY(usr_stats->cmpltn_cmn.flags) + 233 + HTT_USR_CMPLTN_SHORT_RETRY(usr_stats->cmpltn_cmn.flags); 234 + } 235 + 236 + if (common->fes_duration_us) 237 + tx_duration = le32_to_cpu(common->fes_duration_us); 238 + } 223 239 224 240 user_rate = &usr_stats->rate; 225 241 flags = HTT_USR_RATE_PREAMBLE(user_rate->rate_flags);
+19 -12
drivers/net/wireless/ath/ath12k/hal.h
··· 268 268 }; 269 269 270 270 enum hal_rx_legacy_rate { 271 - HAL_RX_LEGACY_RATE_1_MBPS, 272 - HAL_RX_LEGACY_RATE_2_MBPS, 273 - HAL_RX_LEGACY_RATE_5_5_MBPS, 274 - HAL_RX_LEGACY_RATE_6_MBPS, 275 - HAL_RX_LEGACY_RATE_9_MBPS, 276 - HAL_RX_LEGACY_RATE_11_MBPS, 277 - HAL_RX_LEGACY_RATE_12_MBPS, 278 - HAL_RX_LEGACY_RATE_18_MBPS, 279 - HAL_RX_LEGACY_RATE_24_MBPS, 280 - HAL_RX_LEGACY_RATE_36_MBPS, 281 - HAL_RX_LEGACY_RATE_48_MBPS, 282 - HAL_RX_LEGACY_RATE_54_MBPS, 271 + HAL_RX_LEGACY_RATE_LP_1_MBPS, 272 + HAL_RX_LEGACY_RATE_LP_2_MBPS, 273 + HAL_RX_LEGACY_RATE_LP_5_5_MBPS, 274 + HAL_RX_LEGACY_RATE_LP_11_MBPS, 275 + HAL_RX_LEGACY_RATE_SP_2_MBPS, 276 + HAL_RX_LEGACY_RATE_SP_5_5_MBPS, 277 + HAL_RX_LEGACY_RATE_SP_11_MBPS, 283 278 HAL_RX_LEGACY_RATE_INVALID, 279 + }; 280 + 281 + enum hal_rx_legacy_rates_ofdm { 282 + HAL_RX_LEGACY_RATE_OFDM_48_MBPS, 283 + HAL_RX_LEGACY_RATE_OFDM_24_MBPS, 284 + HAL_RX_LEGACY_RATE_OFDM_12_MBPS, 285 + HAL_RX_LEGACY_RATE_OFDM_6_MBPS, 286 + HAL_RX_LEGACY_RATE_OFDM_54_MBPS, 287 + HAL_RX_LEGACY_RATE_OFDM_36_MBPS, 288 + HAL_RX_LEGACY_RATE_OFDM_18_MBPS, 289 + HAL_RX_LEGACY_RATE_OFDM_9_MBPS, 290 + HAL_RX_LEGACY_RATE_OFDM_INVALID, 284 291 }; 285 292 286 293 enum hal_ring_type {
+43 -24
drivers/net/wireless/ath/ath12k/mac.c
··· 164 164 CHAN6G(233, 7115, 0), 165 165 }; 166 166 167 + #define ATH12K_MAC_RATE_A_M(bps, code) \ 168 + { .bitrate = (bps), .hw_value = (code),\ 169 + .flags = IEEE80211_RATE_MANDATORY_A } 170 + 171 + #define ATH12K_MAC_RATE_B(bps, code, code_short) \ 172 + { .bitrate = (bps), .hw_value = (code), .hw_value_short = (code_short),\ 173 + .flags = IEEE80211_RATE_SHORT_PREAMBLE } 174 + 167 175 static struct ieee80211_rate ath12k_legacy_rates[] = { 168 176 { .bitrate = 10, 169 177 .hw_value = ATH12K_HW_RATE_CCK_LP_1M }, 170 - { .bitrate = 20, 171 - .hw_value = ATH12K_HW_RATE_CCK_LP_2M, 172 - .hw_value_short = ATH12K_HW_RATE_CCK_SP_2M, 173 - .flags = IEEE80211_RATE_SHORT_PREAMBLE }, 174 - { .bitrate = 55, 175 - .hw_value = ATH12K_HW_RATE_CCK_LP_5_5M, 176 - .hw_value_short = ATH12K_HW_RATE_CCK_SP_5_5M, 177 - .flags = IEEE80211_RATE_SHORT_PREAMBLE }, 178 - { .bitrate = 110, 179 - .hw_value = ATH12K_HW_RATE_CCK_LP_11M, 180 - .hw_value_short = ATH12K_HW_RATE_CCK_SP_11M, 181 - .flags = IEEE80211_RATE_SHORT_PREAMBLE }, 182 - 183 - { .bitrate = 60, .hw_value = ATH12K_HW_RATE_OFDM_6M }, 184 - { .bitrate = 90, .hw_value = ATH12K_HW_RATE_OFDM_9M }, 185 - { .bitrate = 120, .hw_value = ATH12K_HW_RATE_OFDM_12M }, 186 - { .bitrate = 180, .hw_value = ATH12K_HW_RATE_OFDM_18M }, 187 - { .bitrate = 240, .hw_value = ATH12K_HW_RATE_OFDM_24M }, 188 - { .bitrate = 360, .hw_value = ATH12K_HW_RATE_OFDM_36M }, 189 - { .bitrate = 480, .hw_value = ATH12K_HW_RATE_OFDM_48M }, 190 - { .bitrate = 540, .hw_value = ATH12K_HW_RATE_OFDM_54M }, 178 + ATH12K_MAC_RATE_B(20, ATH12K_HW_RATE_CCK_LP_2M, 179 + ATH12K_HW_RATE_CCK_SP_2M), 180 + ATH12K_MAC_RATE_B(55, ATH12K_HW_RATE_CCK_LP_5_5M, 181 + ATH12K_HW_RATE_CCK_SP_5_5M), 182 + ATH12K_MAC_RATE_B(110, ATH12K_HW_RATE_CCK_LP_11M, 183 + ATH12K_HW_RATE_CCK_SP_11M), 184 + ATH12K_MAC_RATE_A_M(60, ATH12K_HW_RATE_OFDM_6M), 185 + ATH12K_MAC_RATE_A_M(90, ATH12K_HW_RATE_OFDM_9M), 186 + ATH12K_MAC_RATE_A_M(120, ATH12K_HW_RATE_OFDM_12M), 187 + ATH12K_MAC_RATE_A_M(180, ATH12K_HW_RATE_OFDM_18M), 188 + ATH12K_MAC_RATE_A_M(240, ATH12K_HW_RATE_OFDM_24M), 189 + ATH12K_MAC_RATE_A_M(360, ATH12K_HW_RATE_OFDM_36M), 190 + ATH12K_MAC_RATE_A_M(480, ATH12K_HW_RATE_OFDM_48M), 191 + ATH12K_MAC_RATE_A_M(540, ATH12K_HW_RATE_OFDM_54M), 191 192 }; 192 193 193 194 static const int ··· 733 732 if (ath12k_mac_bitrate_is_cck(rate->bitrate) != cck) 734 733 continue; 735 734 736 - if (rate->hw_value == hw_rate) 735 + /* To handle 802.11a PPDU type */ 736 + if ((!cck) && (rate->hw_value == hw_rate) && 737 + (rate->flags & IEEE80211_RATE_MANDATORY_A)) 737 738 return i; 739 + /* To handle 802.11b short PPDU type */ 738 740 else if (rate->flags & IEEE80211_RATE_SHORT_PREAMBLE && 739 741 rate->hw_value_short == hw_rate) 742 + return i; 743 + /* To handle 802.11b long PPDU type */ 744 + else if (rate->hw_value == hw_rate) 740 745 return i; 741 746 } 742 747 ··· 6793 6786 return; 6794 6787 6795 6788 ahsta->links_map &= ~BIT(link_id); 6789 + ahsta->free_logical_link_idx_map |= BIT(arsta->link_idx); 6790 + 6796 6791 rcu_assign_pointer(ahsta->link[link_id], NULL); 6797 6792 synchronize_rcu(); 6798 6793 ··· 7113 7104 struct ieee80211_sta *sta = ath12k_ahsta_to_sta(ahsta); 7114 7105 struct ieee80211_link_sta *link_sta; 7115 7106 struct ath12k_link_vif *arvif; 7107 + int link_idx; 7116 7108 7117 7109 lockdep_assert_wiphy(ah->hw->wiphy); 7118 7110 ··· 7132 7122 7133 7123 ether_addr_copy(arsta->addr, link_sta->addr); 7134 7124 7135 - /* logical index of the link sta in order of creation */ 7136 - arsta->link_idx = ahsta->num_peer++; 7125 + if (!ahsta->free_logical_link_idx_map) 7126 + return -ENOSPC; 7127 + 7128 + /* 7129 + * Allocate a logical link index by selecting the first available bit 7130 + * from the free logical index map 7131 + */ 7132 + link_idx = __ffs(ahsta->free_logical_link_idx_map); 7133 + ahsta->free_logical_link_idx_map &= ~BIT(link_idx); 7134 + arsta->link_idx = link_idx; 7137 7135 7138 7136 arsta->link_id = link_id; 7139 7137 ahsta->links_map |= BIT(arsta->link_id); ··· 7650 7632 if (old_state == IEEE80211_STA_NOTEXIST && 7651 7633 new_state == IEEE80211_STA_NONE) { 7652 7634 memset(ahsta, 0, sizeof(*ahsta)); 7635 + ahsta->free_logical_link_idx_map = U16_MAX; 7653 7636 7654 7637 arsta = &ahsta->deflink; 7655 7638
+60 -16
drivers/net/wireless/ath/ath12k/wifi7/dp_mon.c
··· 405 405 } 406 406 } 407 407 408 + static __always_inline u8 409 + ath12k_wifi7_hal_mon_map_legacy_rate_to_hw_rate(u8 rate) 410 + { 411 + u8 ath12k_rate; 412 + 413 + /* Map hal_rx_legacy_rate to ath12k_hw_rate_cck */ 414 + switch (rate) { 415 + case HAL_RX_LEGACY_RATE_LP_1_MBPS: 416 + ath12k_rate = ATH12K_HW_RATE_CCK_LP_1M; 417 + break; 418 + case HAL_RX_LEGACY_RATE_LP_2_MBPS: 419 + ath12k_rate = ATH12K_HW_RATE_CCK_LP_2M; 420 + break; 421 + case HAL_RX_LEGACY_RATE_LP_5_5_MBPS: 422 + ath12k_rate = ATH12K_HW_RATE_CCK_LP_5_5M; 423 + break; 424 + case HAL_RX_LEGACY_RATE_LP_11_MBPS: 425 + ath12k_rate = ATH12K_HW_RATE_CCK_LP_11M; 426 + break; 427 + case HAL_RX_LEGACY_RATE_SP_2_MBPS: 428 + ath12k_rate = ATH12K_HW_RATE_CCK_SP_2M; 429 + break; 430 + case HAL_RX_LEGACY_RATE_SP_5_5_MBPS: 431 + ath12k_rate = ATH12K_HW_RATE_CCK_SP_5_5M; 432 + break; 433 + case HAL_RX_LEGACY_RATE_SP_11_MBPS: 434 + ath12k_rate = ATH12K_HW_RATE_CCK_SP_11M; 435 + break; 436 + default: 437 + ath12k_rate = rate; 438 + break; 439 + } 440 + 441 + return ath12k_rate; 442 + } 443 + 408 444 static void 409 445 ath12k_wifi7_dp_mon_parse_l_sig_b(const struct hal_rx_lsig_b_info *lsigb, 410 446 struct hal_rx_mon_ppdu_info *ppdu_info) ··· 451 415 rate = u32_get_bits(info0, HAL_RX_LSIG_B_INFO_INFO0_RATE); 452 416 switch (rate) { 453 417 case 1: 454 - rate = HAL_RX_LEGACY_RATE_1_MBPS; 418 + rate = HAL_RX_LEGACY_RATE_LP_1_MBPS; 455 419 break; 456 420 case 2: 457 - case 5: 458 - rate = HAL_RX_LEGACY_RATE_2_MBPS; 421 + rate = HAL_RX_LEGACY_RATE_LP_2_MBPS; 459 422 break; 460 423 case 3: 461 - case 6: 462 - rate = HAL_RX_LEGACY_RATE_5_5_MBPS; 424 + rate = HAL_RX_LEGACY_RATE_LP_5_5_MBPS; 463 425 break; 464 426 case 4: 427 + rate = HAL_RX_LEGACY_RATE_LP_11_MBPS; 428 + break; 429 + case 5: 430 + rate = HAL_RX_LEGACY_RATE_SP_2_MBPS; 431 + break; 432 + case 6: 433 + rate = HAL_RX_LEGACY_RATE_SP_5_5_MBPS; 434 + break; 465 435 case 7: 466 - rate = HAL_RX_LEGACY_RATE_11_MBPS; 436 + rate = HAL_RX_LEGACY_RATE_SP_11_MBPS; 467 437 break; 468 438 default: 469 439 rate = HAL_RX_LEGACY_RATE_INVALID; 440 + break; 470 441 } 471 442 472 - ppdu_info->rate = rate; 443 + ppdu_info->rate = ath12k_wifi7_hal_mon_map_legacy_rate_to_hw_rate(rate); 473 444 ppdu_info->cck_flag = 1; 474 445 } 475 446 ··· 490 447 rate = u32_get_bits(info0, HAL_RX_LSIG_A_INFO_INFO0_RATE); 491 448 switch (rate) { 492 449 case 8: 493 - rate = HAL_RX_LEGACY_RATE_48_MBPS; 450 + rate = HAL_RX_LEGACY_RATE_OFDM_48_MBPS; 494 451 break; 495 452 case 9: 496 - rate = HAL_RX_LEGACY_RATE_24_MBPS; 453 + rate = HAL_RX_LEGACY_RATE_OFDM_24_MBPS; 497 454 break; 498 455 case 10: 499 - rate = HAL_RX_LEGACY_RATE_12_MBPS; 456 + rate = HAL_RX_LEGACY_RATE_OFDM_12_MBPS; 500 457 break; 501 458 case 11: 502 - rate = HAL_RX_LEGACY_RATE_6_MBPS; 459 + rate = HAL_RX_LEGACY_RATE_OFDM_6_MBPS; 503 460 break; 504 461 case 12: 505 - rate = HAL_RX_LEGACY_RATE_54_MBPS; 462 + rate = HAL_RX_LEGACY_RATE_OFDM_54_MBPS; 506 463 break; 507 464 case 13: 508 - rate = HAL_RX_LEGACY_RATE_36_MBPS; 465 + rate = HAL_RX_LEGACY_RATE_OFDM_36_MBPS; 509 466 break; 510 467 case 14: 511 - rate = HAL_RX_LEGACY_RATE_18_MBPS; 468 + rate = HAL_RX_LEGACY_RATE_OFDM_18_MBPS; 512 469 break; 513 470 case 15: 514 - rate = HAL_RX_LEGACY_RATE_9_MBPS; 471 + rate = HAL_RX_LEGACY_RATE_OFDM_9_MBPS; 515 472 break; 516 473 default: 517 - rate = HAL_RX_LEGACY_RATE_INVALID; 474 + rate = HAL_RX_LEGACY_RATE_OFDM_INVALID; 475 + break; 518 476 } 519 477 520 478 ppdu_info->rate = rate;
+27 -31
drivers/net/wireless/ath/ath12k/wmi.c
··· 10017 10017 10018 10018 static int 10019 10019 ath12k_wmi_send_unit_test_cmd(struct ath12k *ar, 10020 - struct wmi_unit_test_cmd ut_cmd, 10021 - u32 *test_args) 10020 + const struct wmi_unit_test_arg *ut) 10022 10021 { 10023 10022 struct ath12k_wmi_pdev *wmi = ar->wmi; 10024 10023 struct wmi_unit_test_cmd *cmd; 10024 + int buf_len, arg_len; 10025 10025 struct sk_buff *skb; 10026 10026 struct wmi_tlv *tlv; 10027 + __le32 *ut_cmd_args; 10027 10028 void *ptr; 10028 - u32 *ut_cmd_args; 10029 - int buf_len, arg_len; 10030 10029 int ret; 10031 10030 int i; 10032 10031 10033 - arg_len = sizeof(u32) * le32_to_cpu(ut_cmd.num_args); 10034 - buf_len = sizeof(ut_cmd) + arg_len + TLV_HDR_SIZE; 10032 + arg_len = sizeof(*ut_cmd_args) * ut->num_args; 10033 + buf_len = sizeof(*cmd) + arg_len + TLV_HDR_SIZE; 10035 10034 10036 10035 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, buf_len); 10037 10036 if (!skb) 10038 10037 return -ENOMEM; 10039 10038 10040 - cmd = (struct wmi_unit_test_cmd *)skb->data; 10039 + ptr = skb->data; 10040 + cmd = ptr; 10041 10041 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_UNIT_TEST_CMD, 10042 - sizeof(ut_cmd)); 10042 + sizeof(*cmd)); 10043 + cmd->vdev_id = cpu_to_le32(ut->vdev_id); 10044 + cmd->module_id = cpu_to_le32(ut->module_id); 10045 + cmd->num_args = cpu_to_le32(ut->num_args); 10046 + cmd->diag_token = cpu_to_le32(ut->diag_token); 10043 10047 10044 - cmd->vdev_id = ut_cmd.vdev_id; 10045 - cmd->module_id = ut_cmd.module_id; 10046 - cmd->num_args = ut_cmd.num_args; 10047 - cmd->diag_token = ut_cmd.diag_token; 10048 - 10049 - ptr = skb->data + sizeof(ut_cmd); 10050 - 10048 + ptr += sizeof(*cmd); 10051 10049 tlv = ptr; 10052 10050 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, arg_len); 10053 10051 10054 10052 ptr += TLV_HDR_SIZE; 10055 - 10056 10053 ut_cmd_args = ptr; 10057 - for (i = 0; i < le32_to_cpu(ut_cmd.num_args); i++) 10058 - ut_cmd_args[i] = test_args[i]; 10054 + for (i = 0; i < ut->num_args; i++) 10055 + ut_cmd_args[i] = cpu_to_le32(ut->args[i]); 10059 10056 10060 10057 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 10061 10058 "WMI unit test : module %d vdev %d n_args %d token %d\n", 10062 - cmd->module_id, cmd->vdev_id, cmd->num_args, 10063 - cmd->diag_token); 10059 + ut->module_id, ut->vdev_id, ut->num_args, ut->diag_token); 10064 10060 10065 10061 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_UNIT_TEST_CMDID); 10066 10062 ··· 10072 10076 int ath12k_wmi_simulate_radar(struct ath12k *ar) 10073 10077 { 10074 10078 struct ath12k_link_vif *arvif; 10075 - u32 dfs_args[DFS_MAX_TEST_ARGS]; 10076 - struct wmi_unit_test_cmd wmi_ut; 10079 + struct wmi_unit_test_arg wmi_ut = {}; 10077 10080 bool arvif_found = false; 10078 10081 10079 10082 list_for_each_entry(arvif, &ar->arvifs, list) { ··· 10085 10090 if (!arvif_found) 10086 10091 return -EINVAL; 10087 10092 10088 - dfs_args[DFS_TEST_CMDID] = 0; 10089 - dfs_args[DFS_TEST_PDEV_ID] = ar->pdev->pdev_id; 10090 - /* Currently we could pass segment_id(b0 - b1), chirp(b2) 10093 + wmi_ut.args[DFS_TEST_CMDID] = 0; 10094 + wmi_ut.args[DFS_TEST_PDEV_ID] = ar->pdev->pdev_id; 10095 + /* 10096 + * Currently we could pass segment_id(b0 - b1), chirp(b2) 10091 10097 * freq offset (b3 - b10) to unit test. For simulation 10092 10098 * purpose this can be set to 0 which is valid. 10093 10099 */ 10094 - dfs_args[DFS_TEST_RADAR_PARAM] = 0; 10100 + wmi_ut.args[DFS_TEST_RADAR_PARAM] = 0; 10095 10101 10096 - wmi_ut.vdev_id = cpu_to_le32(arvif->vdev_id); 10097 - wmi_ut.module_id = cpu_to_le32(DFS_UNIT_TEST_MODULE); 10098 - wmi_ut.num_args = cpu_to_le32(DFS_MAX_TEST_ARGS); 10099 - wmi_ut.diag_token = cpu_to_le32(DFS_UNIT_TEST_TOKEN); 10102 + wmi_ut.vdev_id = arvif->vdev_id; 10103 + wmi_ut.module_id = DFS_UNIT_TEST_MODULE; 10104 + wmi_ut.num_args = DFS_MAX_TEST_ARGS; 10105 + wmi_ut.diag_token = DFS_UNIT_TEST_TOKEN; 10100 10106 10101 10107 ath12k_dbg(ar->ab, ATH12K_DBG_REG, "Triggering Radar Simulation\n"); 10102 10108 10103 - return ath12k_wmi_send_unit_test_cmd(ar, wmi_ut, dfs_args); 10109 + return ath12k_wmi_send_unit_test_cmd(ar, &wmi_ut); 10104 10110 } 10105 10111 10106 10112 int ath12k_wmi_send_tpc_stats_request(struct ath12k *ar,
+9 -5
drivers/net/wireless/ath/ath12k/wmi.h
··· 4193 4193 struct ath12k_wmi_mac_addr_params peer_macaddr; 4194 4194 } __packed; 4195 4195 4196 - #define DFS_PHYERR_UNIT_TEST_CMD 0 4197 4196 #define DFS_UNIT_TEST_MODULE 0x2b 4198 4197 #define DFS_UNIT_TEST_TOKEN 0xAA 4199 4198 ··· 4203 4204 DFS_MAX_TEST_ARGS, 4204 4205 }; 4205 4206 4206 - struct wmi_dfs_unit_test_arg { 4207 - u32 cmd_id; 4208 - u32 pdev_id; 4209 - u32 radar_param; 4207 + /* update if another test command requires more */ 4208 + #define WMI_UNIT_TEST_ARGS_MAX DFS_MAX_TEST_ARGS 4209 + 4210 + struct wmi_unit_test_arg { 4211 + u32 vdev_id; 4212 + u32 module_id; 4213 + u32 diag_token; 4214 + u32 num_args; 4215 + u32 args[WMI_UNIT_TEST_ARGS_MAX]; 4210 4216 }; 4211 4217 4212 4218 struct wmi_unit_test_cmd {
+4 -12
drivers/net/wireless/ath/ath6kl/usb.c
··· 1124 1124 int vendor_id, product_id; 1125 1125 int ret = 0; 1126 1126 1127 - usb_get_dev(dev); 1128 - 1129 1127 vendor_id = le16_to_cpu(dev->descriptor.idVendor); 1130 1128 product_id = le16_to_cpu(dev->descriptor.idProduct); 1131 1129 ··· 1141 1143 ath6kl_dbg(ATH6KL_DBG_USB, "USB 1.1 Host\n"); 1142 1144 1143 1145 ar_usb = ath6kl_usb_create(interface); 1144 - 1145 - if (ar_usb == NULL) { 1146 - ret = -ENOMEM; 1147 - goto err_usb_put; 1148 - } 1146 + if (ar_usb == NULL) 1147 + return -ENOMEM; 1149 1148 1150 1149 ar = ath6kl_core_create(&ar_usb->udev->dev); 1151 1150 if (ar == NULL) { ··· 1171 1176 ath6kl_core_destroy(ar); 1172 1177 err_usb_destroy: 1173 1178 ath6kl_usb_destroy(ar_usb); 1174 - err_usb_put: 1175 - usb_put_dev(dev); 1176 1179 1177 1180 return ret; 1178 1181 } 1179 1182 1180 - static void ath6kl_usb_remove(struct usb_interface *interface) 1183 + static void ath6kl_usb_disconnect(struct usb_interface *interface) 1181 1184 { 1182 - usb_put_dev(interface_to_usbdev(interface)); 1183 1185 ath6kl_usb_device_detached(interface); 1184 1186 } 1185 1187 ··· 1227 1235 .probe = ath6kl_usb_probe, 1228 1236 .suspend = ath6kl_usb_pm_suspend, 1229 1237 .resume = ath6kl_usb_pm_resume, 1230 - .disconnect = ath6kl_usb_remove, 1238 + .disconnect = ath6kl_usb_disconnect, 1231 1239 .id_table = ath6kl_usb_ids, 1232 1240 .supports_autosuspend = true, 1233 1241 .disable_hub_initiated_lpm = 1,
-11
drivers/net/wireless/ath/ath6kl/wmi.h
··· 1630 1630 WMI_LOCK_BSS_MODE = 3, /* Lock to the current BSS */ 1631 1631 }; 1632 1632 1633 - struct bss_bias { 1634 - u8 bssid[ETH_ALEN]; 1635 - s8 bias; 1636 - } __packed; 1637 - 1638 - struct bss_bias_info { 1639 - u8 num_bss; 1640 - struct bss_bias bss_bias[]; 1641 - } __packed; 1642 - 1643 1633 struct low_rssi_scan_params { 1644 1634 __le16 lrssi_scan_period; 1645 1635 a_sle16 lrssi_scan_threshold; ··· 1642 1652 union { 1643 1653 u8 bssid[ETH_ALEN]; /* WMI_FORCE_ROAM */ 1644 1654 u8 roam_mode; /* WMI_SET_ROAM_MODE */ 1645 - struct bss_bias_info bss; /* WMI_SET_HOST_BIAS */ 1646 1655 struct low_rssi_scan_params params; /* WMI_SET_LRSSI_SCAN_PARAMS 1647 1656 */ 1648 1657 } __packed info;
-4
drivers/net/wireless/ath/ath9k/hif_usb.c
··· 1382 1382 goto err_alloc; 1383 1383 } 1384 1384 1385 - usb_get_dev(udev); 1386 - 1387 1385 hif_dev->udev = udev; 1388 1386 hif_dev->interface = interface; 1389 1387 hif_dev->usb_device_id = id; ··· 1401 1403 err_fw_req: 1402 1404 usb_set_intfdata(interface, NULL); 1403 1405 kfree(hif_dev); 1404 - usb_put_dev(udev); 1405 1406 err_alloc: 1406 1407 return ret; 1407 1408 } ··· 1448 1451 1449 1452 kfree(hif_dev); 1450 1453 dev_info(&udev->dev, "ath9k_htc: USB layer deinitialized\n"); 1451 - usb_put_dev(udev); 1452 1454 } 1453 1455 1454 1456 #ifdef CONFIG_PM
+8 -10
drivers/net/wireless/broadcom/b43/dma.c
··· 837 837 struct b43_dmaring *ring; 838 838 int i, err; 839 839 dma_addr_t dma_test; 840 + size_t nr_slots; 840 841 841 - ring = kzalloc_obj(*ring); 842 + if (for_tx) 843 + nr_slots = B43_TXRING_SLOTS; 844 + else 845 + nr_slots = B43_RXRING_SLOTS; 846 + 847 + ring = kzalloc_flex(*ring, meta, nr_slots); 842 848 if (!ring) 843 849 goto out; 844 850 845 - ring->nr_slots = B43_RXRING_SLOTS; 846 - if (for_tx) 847 - ring->nr_slots = B43_TXRING_SLOTS; 851 + ring->nr_slots = nr_slots; 848 852 849 - ring->meta = kzalloc_objs(struct b43_dmadesc_meta, ring->nr_slots); 850 - if (!ring->meta) 851 - goto err_kfree_ring; 852 853 for (i = 0; i < ring->nr_slots; i++) 853 854 ring->meta->skb = B43_DMA_PTR_POISON; 854 855 ··· 944 943 err_kfree_txhdr_cache: 945 944 kfree(ring->txhdr_cache); 946 945 err_kfree_meta: 947 - kfree(ring->meta); 948 - err_kfree_ring: 949 946 kfree(ring); 950 947 ring = NULL; 951 948 goto out; ··· 1003 1004 free_ringmemory(ring); 1004 1005 1005 1006 kfree(ring->txhdr_cache); 1006 - kfree(ring->meta); 1007 1007 kfree(ring); 1008 1008 } 1009 1009
+2 -2
drivers/net/wireless/broadcom/b43/dma.h
··· 228 228 const struct b43_dma_ops *ops; 229 229 /* Kernel virtual base address of the ring memory. */ 230 230 void *descbase; 231 - /* Meta data about all descriptors. */ 232 - struct b43_dmadesc_meta *meta; 233 231 /* Cache of TX headers for each TX frame. 234 232 * This is to avoid an allocation on each TX. 235 233 * This is NULL for an RX ring. ··· 271 273 /* Statistics: Total number of TX plus all retries. */ 272 274 u64 nr_total_packet_tries; 273 275 #endif /* CONFIG_B43_DEBUG */ 276 + /* Meta data about all descriptors. */ 277 + struct b43_dmadesc_meta meta[] __counted_by(nr_slots); 274 278 }; 275 279 276 280 static inline u32 b43_dma_read(struct b43_dmaring *ring, u16 offset)
+1 -1
drivers/net/wireless/intel/iwlwifi/cfg/bz.c
··· 10 10 #include "fw/api/txq.h" 11 11 12 12 /* Highest firmware core release supported */ 13 - #define IWL_BZ_UCODE_CORE_MAX 101 13 + #define IWL_BZ_UCODE_CORE_MAX 102 14 14 15 15 /* Lowest firmware API version supported */ 16 16 #define IWL_BZ_UCODE_API_MIN 100
+1 -1
drivers/net/wireless/intel/iwlwifi/cfg/dr.c
··· 9 9 #include "fw/api/txq.h" 10 10 11 11 /* Highest firmware core release supported */ 12 - #define IWL_DR_UCODE_CORE_MAX 101 12 + #define IWL_DR_UCODE_CORE_MAX 102 13 13 14 14 /* Lowest firmware API version supported */ 15 15 #define IWL_DR_UCODE_API_MIN 100
+1 -1
drivers/net/wireless/intel/iwlwifi/cfg/sc.c
··· 10 10 #include "fw/api/txq.h" 11 11 12 12 /* Highest firmware core release supported */ 13 - #define IWL_SC_UCODE_CORE_MAX 101 13 + #define IWL_SC_UCODE_CORE_MAX 102 14 14 15 15 /* Lowest firmware API version supported */ 16 16 #define IWL_SC_UCODE_API_MIN 100
+117 -15
drivers/net/wireless/intel/iwlwifi/fw/acpi.c
··· 504 504 u8 num_chains, u8 num_sub_bands) 505 505 { 506 506 for (u8 chain = 0; chain < num_chains; chain++) { 507 - for (u8 subband = 0; subband < BIOS_SAR_MAX_SUB_BANDS_NUM; 507 + for (u8 subband = 0; 508 + subband < ARRAY_SIZE(chains[chain].subbands); 508 509 subband++) { 509 510 /* if we don't have the values, use the default */ 510 511 if (subband >= num_sub_bands) { ··· 535 534 if (IS_ERR(data)) 536 535 return PTR_ERR(data); 537 536 538 - /* start by trying to read revision 2 */ 537 + /* start by trying to read revision 3 */ 538 + wifi_pkg = iwl_acpi_get_wifi_pkg(fwrt->dev, data, 539 + ACPI_WRDS_WIFI_DATA_SIZE_REV3, 540 + &tbl_rev); 541 + if (!IS_ERR(wifi_pkg)) { 542 + if (tbl_rev != 3) { 543 + ret = -EINVAL; 544 + goto out_free; 545 + } 546 + 547 + num_chains = ACPI_SAR_NUM_CHAINS_REV2; 548 + num_sub_bands = ACPI_SAR_NUM_SUB_BANDS_REV3; 549 + 550 + goto read_table; 551 + } 552 + 553 + /* then try revision 2 */ 539 554 wifi_pkg = iwl_acpi_get_wifi_pkg(fwrt->dev, data, 540 555 ACPI_WRDS_WIFI_DATA_SIZE_REV2, 541 556 &tbl_rev); ··· 608 591 goto out_free; 609 592 } 610 593 594 + if (WARN_ON(num_chains * num_sub_bands > 595 + ARRAY_SIZE(fwrt->sar_profiles[0].chains) * 596 + ARRAY_SIZE(fwrt->sar_profiles[0].chains[0].subbands))) { 597 + ret = -EINVAL; 598 + goto out_free; 599 + } 600 + 611 601 IWL_DEBUG_RADIO(fwrt, "Reading WRDS tbl_rev=%d\n", tbl_rev); 612 602 613 603 flags = wifi_pkg->package.elements[1].integer.value; ··· 648 624 if (IS_ERR(data)) 649 625 return PTR_ERR(data); 650 626 651 - /* start by trying to read revision 2 */ 627 + /* start by trying to read revision 3 */ 628 + wifi_pkg = iwl_acpi_get_wifi_pkg(fwrt->dev, data, 629 + ACPI_EWRD_WIFI_DATA_SIZE_REV3, 630 + &tbl_rev); 631 + if (!IS_ERR(wifi_pkg)) { 632 + if (tbl_rev != 3) { 633 + ret = -EINVAL; 634 + goto out_free; 635 + } 636 + 637 + num_sub_bands = ACPI_SAR_NUM_SUB_BANDS_REV3; 638 + 639 + goto read_table; 640 + } 641 + 642 + /* then try revision 2 */ 652 643 wifi_pkg = iwl_acpi_get_wifi_pkg(fwrt->dev, data, 653 644 ACPI_EWRD_WIFI_DATA_SIZE_REV2, 654 645 &tbl_rev); ··· 718 679 goto out_free; 719 680 } 720 681 682 + if (WARN_ON(ACPI_SAR_NUM_CHAINS_REV0 * num_sub_bands > 683 + ARRAY_SIZE(fwrt->sar_profiles[0].chains) * 684 + ARRAY_SIZE(fwrt->sar_profiles[0].chains[0].subbands))) { 685 + ret = -EINVAL; 686 + goto out_free; 687 + } 688 + 721 689 enabled = !!(wifi_pkg->package.elements[1].integer.value); 722 690 n_profiles = wifi_pkg->package.elements[2].integer.value; 723 691 ··· 767 721 if (tbl_rev < 2) 768 722 goto set_enabled; 769 723 724 + if (WARN_ON(ACPI_SAR_NUM_CHAINS_REV0 * 2 * num_sub_bands > 725 + ARRAY_SIZE(fwrt->sar_profiles[0].chains) * 726 + ARRAY_SIZE(fwrt->sar_profiles[0].chains[0].subbands))) { 727 + ret = -EINVAL; 728 + goto out_free; 729 + } 730 + 770 731 /* parse cdb chains for all profiles */ 771 732 for (i = 0; i < n_profiles; i++) { 772 733 struct iwl_sar_profile_chain *chains; ··· 812 759 u8 profiles; 813 760 u8 min_profiles; 814 761 } rev_data[] = { 762 + { 763 + .revisions = BIT(4), 764 + .bands = ACPI_GEO_NUM_BANDS_REV4, 765 + .profiles = ACPI_NUM_GEO_PROFILES_REV3, 766 + .min_profiles = BIOS_GEO_MIN_PROFILE_NUM, 767 + }, 815 768 { 816 769 .revisions = BIT(3), 817 770 .bands = ACPI_GEO_NUM_BANDS_REV2, ··· 871 812 num_bands = rev_data[idx].bands; 872 813 num_profiles = rev_data[idx].profiles; 873 814 815 + if (WARN_ON(num_profiles > 816 + ARRAY_SIZE(fwrt->geo_profiles))) { 817 + ret = -EINVAL; 818 + goto out_free; 819 + } 820 + 821 + if (WARN_ON(num_bands > 822 + ARRAY_SIZE(fwrt->geo_profiles[0].bands))) { 823 + ret = -EINVAL; 824 + goto out_free; 825 + } 826 + 874 827 if (rev_data[idx].min_profiles) { 875 828 /* read header that says # of profiles */ 876 829 union acpi_object *entry; ··· 922 851 923 852 read_table: 924 853 fwrt->geo_rev = tbl_rev; 854 + 925 855 for (i = 0; i < num_profiles; i++) { 926 - for (j = 0; j < BIOS_GEO_MAX_NUM_BANDS; j++) { 856 + struct iwl_geo_profile *prof = &fwrt->geo_profiles[i]; 857 + 858 + for (j = 0; j < ARRAY_SIZE(prof->bands); j++) { 927 859 union acpi_object *entry; 928 860 929 861 /* 930 - * num_bands is either 2 or 3, if it's only 2 then 931 - * fill the third band (6 GHz) with the values from 932 - * 5 GHz (second band) 862 + * num_bands is either 2 or 3 or 4, if it's lower 863 + * than 4, fill the third band (6 GHz) with the values 864 + * from 5 GHz (second band) 933 865 */ 934 866 if (j >= num_bands) { 935 - fwrt->geo_profiles[i].bands[j].max = 936 - fwrt->geo_profiles[i].bands[1].max; 867 + prof->bands[j].max = prof->bands[1].max; 937 868 } else { 938 869 entry = &wifi_pkg->package.elements[entry_idx]; 939 870 entry_idx++; ··· 945 872 goto out_free; 946 873 } 947 874 948 - fwrt->geo_profiles[i].bands[j].max = 875 + prof->bands[j].max = 949 876 entry->integer.value; 950 877 } 951 878 952 - for (k = 0; k < BIOS_GEO_NUM_CHAINS; k++) { 879 + for (k = 0; 880 + k < ARRAY_SIZE(prof->bands[0].chains); 881 + k++) { 953 882 /* same here as above */ 954 883 if (j >= num_bands) { 955 - fwrt->geo_profiles[i].bands[j].chains[k] = 956 - fwrt->geo_profiles[i].bands[1].chains[k]; 884 + prof->bands[j].chains[k] = 885 + prof->bands[1].chains[k]; 957 886 } else { 958 887 entry = &wifi_pkg->package.elements[entry_idx]; 959 888 entry_idx++; ··· 965 890 goto out_free; 966 891 } 967 892 968 - fwrt->geo_profiles[i].bands[j].chains[k] = 893 + prof->bands[j].chains[k] = 969 894 entry->integer.value; 970 895 } 971 896 } ··· 973 898 } 974 899 975 900 fwrt->geo_num_profiles = num_profiles; 901 + fwrt->geo_bios_source = BIOS_SOURCE_ACPI; 976 902 fwrt->geo_enabled = true; 977 903 ret = 0; 978 904 out_free: ··· 990 914 data = iwl_acpi_get_object(fwrt->dev, ACPI_PPAG_METHOD); 991 915 if (IS_ERR(data)) 992 916 return PTR_ERR(data); 917 + 918 + /* try to read ppag table rev 5 */ 919 + wifi_pkg = iwl_acpi_get_wifi_pkg(fwrt->dev, data, 920 + ACPI_PPAG_WIFI_DATA_SIZE_V3, &tbl_rev); 921 + if (!IS_ERR(wifi_pkg)) { 922 + if (tbl_rev == 5) { 923 + num_sub_bands = IWL_NUM_SUB_BANDS_V3; 924 + IWL_DEBUG_RADIO(fwrt, 925 + "Reading PPAG table (tbl_rev=%d)\n", 926 + tbl_rev); 927 + goto read_table; 928 + } else { 929 + ret = -EINVAL; 930 + goto out_free; 931 + } 932 + } 993 933 994 934 /* try to read ppag table rev 1 to 4 (all have the same data size) */ 995 935 wifi_pkg = iwl_acpi_get_wifi_pkg(fwrt->dev, data, ··· 1042 950 goto out_free; 1043 951 1044 952 read_table: 953 + if (WARN_ON_ONCE(num_sub_bands > 954 + ARRAY_SIZE(fwrt->ppag_chains[0].subbands))) { 955 + ret = -EINVAL; 956 + goto out_free; 957 + } 958 + 959 + BUILD_BUG_ON(ACPI_PPAG_NUM_CHAINS > 960 + ARRAY_SIZE(fwrt->ppag_chains)); 961 + 1045 962 fwrt->ppag_bios_rev = tbl_rev; 1046 963 flags = &wifi_pkg->package.elements[1]; 1047 964 ··· 1067 966 * first sub-band (j=0) corresponds to Low-Band (2.4GHz), and the 1068 967 * following sub-bands to High-Band (5GHz). 1069 968 */ 1070 - for (i = 0; i < IWL_NUM_CHAIN_LIMITS; i++) { 969 + for (i = 0; i < ACPI_PPAG_NUM_CHAINS; i++) { 1071 970 for (j = 0; j < num_sub_bands; j++) { 1072 971 union acpi_object *ent; 1073 972 ··· 1081 980 } 1082 981 } 1083 982 983 + iwl_bios_print_ppag(fwrt, num_sub_bands); 1084 984 fwrt->ppag_bios_source = BIOS_SOURCE_ACPI; 1085 985 ret = 0; 1086 986
+19 -9
drivers/net/wireless/intel/iwlwifi/fw/acpi.h
··· 8 8 9 9 #include <linux/acpi.h> 10 10 #include "fw/regulatory.h" 11 - #include "fw/api/commands.h" 12 - #include "fw/api/power.h" 13 - #include "fw/api/phy.h" 14 - #include "fw/api/nvm-reg.h" 15 - #include "fw/api/config.h" 16 11 #include "fw/img.h" 17 12 #include "iwl-trans.h" 18 13 ··· 39 44 #define ACPI_SAR_NUM_SUB_BANDS_REV0 5 40 45 #define ACPI_SAR_NUM_SUB_BANDS_REV1 11 41 46 #define ACPI_SAR_NUM_SUB_BANDS_REV2 11 47 + #define ACPI_SAR_NUM_SUB_BANDS_REV3 12 42 48 43 49 #define ACPI_WRDS_WIFI_DATA_SIZE_REV0 (ACPI_SAR_NUM_CHAINS_REV0 * \ 44 50 ACPI_SAR_NUM_SUB_BANDS_REV0 + 2) ··· 47 51 ACPI_SAR_NUM_SUB_BANDS_REV1 + 2) 48 52 #define ACPI_WRDS_WIFI_DATA_SIZE_REV2 (ACPI_SAR_NUM_CHAINS_REV2 * \ 49 53 ACPI_SAR_NUM_SUB_BANDS_REV2 + 2) 54 + #define ACPI_WRDS_WIFI_DATA_SIZE_REV3 (ACPI_SAR_NUM_CHAINS_REV2 * \ 55 + ACPI_SAR_NUM_SUB_BANDS_REV3 + 2) 50 56 #define ACPI_EWRD_WIFI_DATA_SIZE_REV0 ((ACPI_SAR_PROFILE_NUM - 1) * \ 51 57 ACPI_SAR_NUM_CHAINS_REV0 * \ 52 58 ACPI_SAR_NUM_SUB_BANDS_REV0 + 3) ··· 58 60 #define ACPI_EWRD_WIFI_DATA_SIZE_REV2 ((ACPI_SAR_PROFILE_NUM - 1) * \ 59 61 ACPI_SAR_NUM_CHAINS_REV2 * \ 60 62 ACPI_SAR_NUM_SUB_BANDS_REV2 + 3) 63 + #define ACPI_EWRD_WIFI_DATA_SIZE_REV3 ((ACPI_SAR_PROFILE_NUM - 1) * \ 64 + ACPI_SAR_NUM_CHAINS_REV2 * \ 65 + ACPI_SAR_NUM_SUB_BANDS_REV3 + 3) 61 66 #define ACPI_WPFC_WIFI_DATA_SIZE 5 /* domain and 4 filter config words */ 62 67 63 68 /* revision 0 and 1 are identical, except for the semantics in the FW */ 64 69 #define ACPI_GEO_NUM_BANDS_REV0 2 65 70 #define ACPI_GEO_NUM_BANDS_REV2 3 71 + #define ACPI_GEO_NUM_BANDS_REV4 4 66 72 67 73 #define ACPI_WRDD_WIFI_DATA_SIZE 2 68 74 #define ACPI_SPLC_WIFI_DATA_SIZE 2 ··· 98 96 */ 99 97 #define ACPI_WTAS_WIFI_DATA_SIZE (3 + IWL_WTAS_BLACK_LIST_MAX) 100 98 101 - #define ACPI_PPAG_WIFI_DATA_SIZE_V1 ((IWL_NUM_CHAIN_LIMITS * \ 102 - IWL_NUM_SUB_BANDS_V1) + 2) 103 - #define ACPI_PPAG_WIFI_DATA_SIZE_V2 ((IWL_NUM_CHAIN_LIMITS * \ 104 - IWL_NUM_SUB_BANDS_V2) + 2) 99 + #define ACPI_PPAG_NUM_CHAINS 2 100 + #define ACPI_PPAG_NUM_BANDS_V1 5 101 + #define ACPI_PPAG_NUM_BANDS_V2 11 102 + #define ACPI_PPAG_NUM_BANDS_V3 12 103 + #define ACPI_PPAG_WIFI_DATA_SIZE_V1 ((ACPI_PPAG_NUM_CHAINS * \ 104 + ACPI_PPAG_NUM_BANDS_V1) + 2) 105 + #define ACPI_PPAG_WIFI_DATA_SIZE_V2 ((ACPI_PPAG_NUM_CHAINS * \ 106 + ACPI_PPAG_NUM_BANDS_V2) + 2) 107 + 108 + /* used for ACPI PPAG table rev 5 */ 109 + #define ACPI_PPAG_WIFI_DATA_SIZE_V3 ((ACPI_PPAG_NUM_CHAINS * \ 110 + ACPI_PPAG_NUM_BANDS_V3) + 2) 105 111 106 112 #define IWL_SAR_ENABLE_MSK BIT(0) 107 113 #define IWL_REDUCE_POWER_FLAGS_POS 1
+2 -1
drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h
··· 56 56 RFH_QUEUE_CONFIG_CMD = 0xD, 57 57 58 58 /** 59 - * @TLC_MNG_CONFIG_CMD: &struct iwl_tlc_config_cmd_v4 59 + * @TLC_MNG_CONFIG_CMD: &struct iwl_tlc_config_cmd_v4 or 60 + * &struct iwl_tlc_config_cmd_v5 or &struct iwl_tlc_config_cmd. 60 61 */ 61 62 TLC_MNG_CONFIG_CMD = 0xF, 62 63
+156 -11
drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h
··· 1 1 /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ 2 2 /* 3 - * Copyright (C) 2012-2014, 2018-2019, 2021-2025 Intel Corporation 3 + * Copyright (C) 2012-2014, 2018-2019, 2021-2026 Intel Corporation 4 4 * Copyright (C) 2013-2015 Intel Mobile Communications GmbH 5 5 * Copyright (C) 2016-2017 Intel Deutschland GmbH 6 6 */ ··· 26 26 */ 27 27 MISSED_VAP_NOTIF = 0xFA, 28 28 /** 29 - * @SESSION_PROTECTION_CMD: &struct iwl_mvm_session_prot_cmd 29 + * @SESSION_PROTECTION_CMD: &struct iwl_session_prot_cmd 30 30 */ 31 31 SESSION_PROTECTION_CMD = 0x5, 32 32 /** ··· 34 34 */ 35 35 CANCEL_CHANNEL_SWITCH_CMD = 0x6, 36 36 /** 37 - * @MAC_CONFIG_CMD: &struct iwl_mac_config_cmd 37 + * @MAC_CONFIG_CMD: &struct iwl_mac_config_cmd_v3 or 38 + * &struct iwl_mac_config_cmd 38 39 */ 39 40 MAC_CONFIG_CMD = 0x8, 40 41 /** ··· 43 42 */ 44 43 LINK_CONFIG_CMD = 0x9, 45 44 /** 46 - * @STA_CONFIG_CMD: &struct iwl_sta_cfg_cmd 45 + * @STA_CONFIG_CMD: &struct iwl_sta_cfg_cmd_v1, 46 + * &struct iwl_sta_cfg_cmd_v2, or &struct iwl_sta_cfg_cmd 47 47 */ 48 48 STA_CONFIG_CMD = 0xA, 49 49 /** ··· 358 356 } __packed; 359 357 360 358 /** 361 - * struct iwl_mac_config_cmd - command structure to configure MAC contexts in 359 + * struct iwl_mac_config_cmd_v3 - command structure to configure MAC contexts in 362 360 * MLD API for versions 2 and 3 363 361 * ( MAC_CONTEXT_CONFIG_CMD = 0x8 ) 364 362 * ··· 377 375 * @client: client mac data 378 376 * @p2p_dev: mac data for p2p device 379 377 */ 380 - struct iwl_mac_config_cmd { 378 + struct iwl_mac_config_cmd_v3 { 381 379 __le32 id_and_color; 382 380 __le32 action; 383 381 /* MAC_CONTEXT_TYPE_API_E */ ··· 395 393 struct iwl_mac_client_data client; 396 394 struct iwl_mac_p2p_dev_data p2p_dev; 397 395 }; 398 - } __packed; /* MAC_CONTEXT_CONFIG_CMD_API_S_VER_2_VER_3 */ 396 + } __packed; /* MAC_CONTEXT_CONFIG_CMD_API_S_VER_2, _VER_3 */ 397 + 398 + /** 399 + * struct iwl_mac_nan_data - NAN specific MAC data 400 + * @ndi_addrs: extra NDI addresses being used 401 + * @ndi_addrs_count: number of extra NDI addresses 402 + */ 403 + struct iwl_mac_nan_data { 404 + struct { 405 + u8 addr[ETH_ALEN]; 406 + __le16 reserved; 407 + } __packed ndi_addrs[2]; 408 + __le32 ndi_addrs_count; 409 + } __packed; /* MAC_CONTEXT_CONFIG_NAN_DATA_API_S_VER_1 */ 410 + 411 + /** 412 + * struct iwl_mac_config_cmd - command structure to configure MAC contexts in 413 + * MLD API for versions 4 414 + * ( MAC_CONTEXT_CONFIG_CMD = 0x8 ) 415 + * 416 + * @id_and_color: ID and color of the MAC 417 + * @action: action to perform, see &enum iwl_ctxt_action 418 + * @mac_type: one of &enum iwl_mac_types 419 + * @local_mld_addr: mld address 420 + * @reserved_for_local_mld_addr: reserved 421 + * @filter_flags: combination of &enum iwl_mac_config_filter_flags 422 + * @wifi_gen_v2: he/eht parameters as in cmd version 2 423 + * @wifi_gen: he/eht/uhr parameters as in cmd version 3 424 + * @nic_not_ack_enabled: mark that the NIC doesn't support receiving 425 + * ACK-enabled AGG, (i.e. both BACK and non-BACK frames in single AGG). 426 + * If the NIC is not ACK_ENABLED it may use the EOF-bit in first non-0 427 + * len delim to determine if AGG or single. 428 + * @client: client mac data 429 + * @p2p_dev: mac data for p2p device 430 + * @nan: NAN specific data (NAN data interface addresses) 431 + */ 432 + struct iwl_mac_config_cmd { 433 + __le32 id_and_color; 434 + __le32 action; 435 + /* MAC_CONTEXT_TYPE_API_E */ 436 + __le32 mac_type; 437 + u8 local_mld_addr[6]; 438 + __le16 reserved_for_local_mld_addr; 439 + __le32 filter_flags; 440 + union { 441 + struct iwl_mac_wifi_gen_support_v2 wifi_gen_v2; 442 + struct iwl_mac_wifi_gen_support wifi_gen; 443 + }; 444 + __le32 nic_not_ack_enabled; 445 + /* MAC_CONTEXT_CONFIG_SPECIFIC_DATA_API_U_VER_3 */ 446 + union { 447 + struct iwl_mac_client_data client; 448 + struct iwl_mac_p2p_dev_data p2p_dev; 449 + struct iwl_mac_nan_data nan; 450 + }; 451 + } __packed; /* MAC_CONTEXT_CONFIG_CMD_API_S_VER_4 */ 399 452 400 453 /** 401 454 * enum iwl_link_ctx_modify_flags - indicate to the fw what fields are being ··· 709 652 */ 710 653 #define IWL_FW_MAX_ACTIVE_LINKS_NUM 2 711 654 #define IWL_FW_MAX_LINK_ID 3 655 + #define IWL_FW_MAX_LINKS IWL_FW_MAX_LINK_ID + 1 712 656 713 657 /** 714 658 * enum iwl_fw_sta_type - FW station types ··· 720 662 * @STATION_TYPE_MCAST: the station used for BCAST / MCAST in GO. Will be 721 663 * suspended / resumed at the right timing depending on the clients' 722 664 * power save state and the DTIM timing 665 + * @STATION_TYPE_NAN_PEER_NMI: NAN management peer station type. A station 666 + * of this type can have any number of links (even none) set in the 667 + * link_mask. (Supported since version 3.) 668 + * @STATION_TYPE_NAN_PEER_NDI: NAN data peer station type. A station 669 + * of this type can have any number of links (even none) set in the 670 + * link_mask. (Supported since version 3.) 671 + * @STATION_TYPE_MAX: maximum number of FW station types 723 672 * @STATION_TYPE_AUX: aux sta. In the FW there is no need for a special type 724 673 * for the aux sta, so this type is only for driver - internal use. 725 674 */ ··· 734 669 STATION_TYPE_PEER, 735 670 STATION_TYPE_BCAST_MGMT, 736 671 STATION_TYPE_MCAST, 737 - STATION_TYPE_AUX, 738 - }; /* STATION_TYPE_E_VER_1 */ 672 + STATION_TYPE_NAN_PEER_NMI, 673 + STATION_TYPE_NAN_PEER_NDI, 674 + STATION_TYPE_MAX, 675 + STATION_TYPE_AUX = STATION_TYPE_MAX /* this doesn't exist in FW */ 676 + }; /* STATION_TYPE_E_VER_1, _VER_2 */ 739 677 740 678 /** 741 679 * struct iwl_sta_cfg_cmd_v1 - cmd structure to add a peer sta to the uCode's ··· 797 729 } __packed; /* STA_CMD_API_S_VER_1 */ 798 730 799 731 /** 800 - * struct iwl_sta_cfg_cmd - cmd structure to add a peer sta to the uCode's 732 + * struct iwl_sta_cfg_cmd_v2 - cmd structure to add a peer sta to the uCode's 801 733 * station table 802 734 * ( STA_CONFIG_CMD = 0xA ) 803 735 * ··· 837 769 * @mic_compute_pad_delay: MIC compute time padding 838 770 * @reserved: Reserved for alignment 839 771 */ 840 - struct iwl_sta_cfg_cmd { 772 + struct iwl_sta_cfg_cmd_v2 { 841 773 __le32 sta_id; 842 774 __le32 link_id; 843 775 u8 peer_mld_address[ETH_ALEN]; ··· 866 798 u8 mic_compute_pad_delay; 867 799 u8 reserved[2]; 868 800 } __packed; /* STA_CMD_API_S_VER_2 */ 801 + 802 + /** 803 + * struct iwl_sta_cfg_cmd - cmd structure to add a peer sta to the uCode's 804 + * station table 805 + * ( STA_CONFIG_CMD = 0xA ) 806 + * 807 + * @sta_id: index of station in uCode's station table 808 + * @link_mask: bitmap of link FW IDs used with this STA 809 + * @peer_mld_address: the peers mld address 810 + * @reserved_for_peer_mld_address: reserved 811 + * @peer_link_address: the address of the link that is used to communicate 812 + * with this sta 813 + * @reserved_for_peer_link_address: reserved 814 + * @station_type: type of this station. See &enum iwl_fw_sta_type 815 + * @assoc_id: for GO only 816 + * @beamform_flags: beam forming controls 817 + * @mfp: indicates whether the STA uses management frame protection or not. 818 + * @mimo: indicates whether the sta uses mimo or not 819 + * @mimo_protection: indicates whether the sta uses mimo protection or not 820 + * @ack_enabled: indicates that the AP supports receiving ACK- 821 + * enabled AGG, i.e. both BACK and non-BACK frames in a single AGG 822 + * @trig_rnd_alloc: indicates that trigger based random allocation 823 + * is enabled according to UORA element existence 824 + * @tx_ampdu_spacing: minimum A-MPDU spacing: 825 + * 4 - 2us density, 5 - 4us density, 6 - 8us density, 7 - 16us density 826 + * @tx_ampdu_max_size: maximum A-MPDU length: 0 - 8K, 1 - 16K, 2 - 32K, 827 + * 3 - 64K, 4 - 128K, 5 - 256K, 6 - 512K, 7 - 1024K. 828 + * @sp_length: the size of the SP in actual number of frames 829 + * @uapsd_acs: 4 LS bits are trigger enabled ACs, 4 MS bits are the deliver 830 + * enabled ACs. 831 + * @pkt_ext: optional, exists according to PPE-present bit in the HE/EHT-PHY 832 + * capa 833 + * @htc_flags: which features are supported in HTC 834 + * @use_ldpc_x2_cw: Indicates whether to use LDPC with double CW 835 + * @use_icf: Indicates whether to use ICF instead of RTS 836 + * @dps_pad_time: DPS (Dynamic Power Save) padding delay resolution to ensure 837 + * proper timing alignment 838 + * @dps_trans_delay: DPS minimal time that takes the peer to return to low power 839 + * @dps_enabled: flag indicating whether or not DPS is enabled 840 + * @mic_prep_pad_delay: MIC prep time padding 841 + * @mic_compute_pad_delay: MIC compute time padding 842 + * @nmi_sta_id: for an NDI peer STA, the NMI peer STA ID it relates to 843 + * @ndi_local_addr: for an NDI peer STA, the local NDI interface MAC address 844 + * @reserved: Reserved for alignment 845 + */ 846 + struct iwl_sta_cfg_cmd { 847 + __le32 sta_id; 848 + __le32 link_mask; 849 + u8 peer_mld_address[ETH_ALEN]; 850 + __le16 reserved_for_peer_mld_address; 851 + u8 peer_link_address[ETH_ALEN]; 852 + __le16 reserved_for_peer_link_address; 853 + __le32 station_type; 854 + __le32 assoc_id; 855 + __le32 beamform_flags; 856 + __le32 mfp; 857 + __le32 mimo; 858 + __le32 mimo_protection; 859 + __le32 ack_enabled; 860 + __le32 trig_rnd_alloc; 861 + __le32 tx_ampdu_spacing; 862 + __le32 tx_ampdu_max_size; 863 + __le32 sp_length; 864 + __le32 uapsd_acs; 865 + struct iwl_he_pkt_ext_v2 pkt_ext; 866 + __le32 htc_flags; 867 + u8 use_ldpc_x2_cw; 868 + u8 use_icf; 869 + u8 dps_pad_time; 870 + u8 dps_trans_delay; 871 + u8 dps_enabled; 872 + u8 mic_prep_pad_delay; 873 + u8 mic_compute_pad_delay; 874 + u8 nmi_sta_id; 875 + u8 ndi_local_addr[ETH_ALEN]; 876 + u8 reserved[2]; 877 + } __packed; /* STA_CMD_API_S_VER_3 */ 869 878 870 879 /** 871 880 * struct iwl_aux_sta_cmd - command for AUX STA configuration
+2 -4
drivers/net/wireless/intel/iwlwifi/fw/api/mac.h
··· 57 57 * @FW_MAC_TYPE_P2P_DEVICE: P2P Device 58 58 * @FW_MAC_TYPE_P2P_STA: P2P client 59 59 * @FW_MAC_TYPE_GO: P2P GO 60 - * @FW_MAC_TYPE_TEST: ? 61 - * @FW_MAC_TYPE_MAX: highest support MAC type 60 + * @FW_MAC_TYPE_NAN: NAN (since version 4) 62 61 */ 63 62 enum iwl_mac_types { 64 63 FW_MAC_TYPE_FIRST = 1, ··· 69 70 FW_MAC_TYPE_P2P_DEVICE, 70 71 FW_MAC_TYPE_P2P_STA, 71 72 FW_MAC_TYPE_GO, 72 - FW_MAC_TYPE_TEST, 73 - FW_MAC_TYPE_MAX = FW_MAC_TYPE_TEST 73 + FW_MAC_TYPE_NAN, 74 74 }; /* MAC_CONTEXT_TYPE_API_E_VER_1 */ 75 75 76 76 /**
+14 -4
drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h
··· 204 204 } __packed; /* REGULATORY_NVM_GET_INFO_PHY_SKU_SECTION_S_VER_1 */ 205 205 206 206 #define IWL_NUM_CHANNELS_V1 51 207 - #define IWL_NUM_CHANNELS 110 207 + #define IWL_NUM_CHANNELS_V2 110 208 208 209 209 /** 210 210 * struct iwl_nvm_get_info_regulatory_v1 - regulatory information ··· 227 227 struct iwl_nvm_get_info_regulatory { 228 228 __le32 lar_enabled; 229 229 __le32 n_channels; 230 - __le32 channel_profile[IWL_NUM_CHANNELS]; 230 + __le32 channel_profile[IWL_NUM_CHANNELS_V2]; 231 231 } __packed; /* REGULATORY_NVM_GET_INFO_REGULATORY_S_VER_2 */ 232 232 233 233 /** ··· 701 701 #define UATS_TABLE_COL_SIZE 13 702 702 703 703 /** 704 - * struct iwl_mcc_allowed_ap_type_cmd - struct for MCC_ALLOWED_AP_TYPE_CMD 704 + * struct iwl_mcc_allowed_ap_type_cmd_v1 - struct for MCC_ALLOWED_AP_TYPE_CMD 705 705 * @mcc_to_ap_type_map: mapping an MCC to 6 GHz AP type support (UATS) 706 706 * @reserved: reserved 707 707 */ 708 - struct iwl_mcc_allowed_ap_type_cmd { 708 + struct iwl_mcc_allowed_ap_type_cmd_v1 { 709 709 u8 mcc_to_ap_type_map[UATS_TABLE_ROW_SIZE][UATS_TABLE_COL_SIZE]; 710 710 __le16 reserved; 711 711 } __packed; /* MCC_ALLOWED_AP_TYPE_CMD_API_S_VER_1 */ 712 + 713 + /** 714 + * struct iwl_mcc_allowed_ap_type_cmd - struct for MCC_ALLOWED_AP_TYPE_CMD 715 + * @mcc_to_ap_type_map: mapping an MCC to 6 GHz AP type support (UATS) 716 + * @mcc_to_ap_type_unii9_map: mapping an MCC to UNII-9 AP type support allowed 717 + */ 718 + struct iwl_mcc_allowed_ap_type_cmd { 719 + u8 mcc_to_ap_type_map[UATS_TABLE_ROW_SIZE][UATS_TABLE_COL_SIZE]; 720 + u8 mcc_to_ap_type_unii9_map[UATS_TABLE_ROW_SIZE][UATS_TABLE_COL_SIZE]; 721 + } __packed; /* MCC_ALLOWED_AP_TYPE_CMD_API_S_VER_2 */ 712 722 713 723 #endif /* __iwl_fw_api_nvm_reg_h__ */
+36 -1
drivers/net/wireless/intel/iwlwifi/fw/api/power.h
··· 269 269 #define IWL_NUM_CHAIN_LIMITS 2 270 270 #define IWL_NUM_SUB_BANDS_V1 5 271 271 #define IWL_NUM_SUB_BANDS_V2 11 272 + #define IWL_NUM_SUB_BANDS_V3 12 272 273 273 274 /** 274 275 * struct iwl_dev_tx_power_common - Common part of the TX power reduction cmd ··· 426 425 __le32 flags; 427 426 } __packed; /* TX_REDUCED_POWER_API_S_VER_10 */ 428 427 428 + struct iwl_dev_tx_power_cmd_v11 { 429 + __le16 per_chain[IWL_NUM_CHAIN_TABLES_V2][IWL_NUM_CHAIN_LIMITS][IWL_NUM_SUB_BANDS_V3]; 430 + u8 per_chain_restriction_changed; 431 + u8 reserved; 432 + __le32 timer_period; 433 + __le32 flags; 434 + } __packed; /* TX_REDUCED_POWER_API_S_VER_11 */ 435 + 429 436 /* 430 437 * struct iwl_dev_tx_power_cmd - TX power reduction command (multiversion) 431 438 * @common: common part of the command 432 439 * @v9: version 9 part of the command 433 440 * @v10: version 10 part of the command 441 + * @v11: version 11 part of the command 434 442 */ 435 443 struct iwl_dev_tx_power_cmd { 436 444 struct iwl_dev_tx_power_common common; 437 445 union { 438 446 struct iwl_dev_tx_power_cmd_v9 v9; 439 447 struct iwl_dev_tx_power_cmd_v10 v10; 448 + struct iwl_dev_tx_power_cmd_v11 v11; 440 449 }; 441 - } __packed; /* TX_REDUCED_POWER_API_S_VER_9_VER10 */ 450 + } __packed; /* TX_REDUCED_POWER_API_S_VER_9 451 + * TX_REDUCED_POWER_API_S_VER_10 452 + * TX_REDUCED_POWER_API_S_VER_11 453 + */ 442 454 443 455 #define IWL_NUM_GEO_PROFILES 3 444 456 #define IWL_NUM_GEO_PROFILES_V3 8 445 457 #define IWL_NUM_BANDS_PER_CHAIN_V1 2 446 458 #define IWL_NUM_BANDS_PER_CHAIN_V2 3 459 + #define IWL_NUM_BANDS_PER_CHAIN_V6 4 447 460 448 461 /** 449 462 * enum iwl_geo_per_chain_offset_operation - type of operation ··· 539 524 __le32 table_revision; 540 525 } __packed; /* PER_CHAIN_LIMIT_OFFSET_CMD_VER_5 */ 541 526 527 + /** 528 + * struct iwl_geo_tx_power_profiles_cmd_v6 - struct for PER_CHAIN_LIMIT_OFFSET_CMD cmd. 529 + * @ops: operations, value from &enum iwl_geo_per_chain_offset_operation 530 + * @table: offset profile per band. 531 + * @bios_hdr: describes the revision and the source of the BIOS 532 + */ 533 + struct iwl_geo_tx_power_profiles_cmd_v6 { 534 + __le32 ops; 535 + struct iwl_per_chain_offset table[IWL_NUM_GEO_PROFILES_V3][IWL_NUM_BANDS_PER_CHAIN_V6]; 536 + struct iwl_bios_config_hdr bios_hdr; 537 + } __packed; /* PER_CHAIN_LIMIT_OFFSET_CMD_VER_6 */ 538 + 542 539 union iwl_geo_tx_power_profiles_cmd { 543 540 struct iwl_geo_tx_power_profiles_cmd_v1 v1; 544 541 struct iwl_geo_tx_power_profiles_cmd_v2 v2; 545 542 struct iwl_geo_tx_power_profiles_cmd_v3 v3; 546 543 struct iwl_geo_tx_power_profiles_cmd_v4 v4; 547 544 struct iwl_geo_tx_power_profiles_cmd_v5 v5; 545 + struct iwl_geo_tx_power_profiles_cmd_v6 v6; 548 546 }; 549 547 550 548 /** ··· 601 573 * @v1: command version 1 structure. 602 574 * @v5: command version 5 structure. 603 575 * @v7: command version 7 structure. 576 + * @v8: command version 8 structure. 604 577 * @v1.flags: values from &enum iwl_ppag_flags 605 578 * @v1.gain: table of antenna gain values per chain and sub-band 606 579 * @v1.reserved: reserved ··· 610 581 * @v7.ppag_config_info: see @struct bios_value_u32 611 582 * @v7.gain: table of antenna gain values per chain and sub-band 612 583 * @v7.reserved: reserved 584 + * @v8.ppag_config_info: see @struct bios_value_u32 585 + * @v8.gain: table of antenna gain values per chain and sub-band 613 586 */ 614 587 union iwl_ppag_table_cmd { 615 588 struct { ··· 629 598 s8 gain[IWL_NUM_CHAIN_LIMITS][IWL_NUM_SUB_BANDS_V2]; 630 599 s8 reserved[2]; 631 600 } __packed v7; /* PER_PLAT_ANTENNA_GAIN_CMD_API_S_VER_7 */ 601 + struct { 602 + struct bios_value_u32 ppag_config_info; 603 + s8 gain[IWL_NUM_CHAIN_LIMITS][IWL_NUM_SUB_BANDS_V3]; 604 + } __packed v8; /* PER_PLAT_ANTENNA_GAIN_CMD_API_S_VER_8 */ 632 605 } __packed; 633 606 634 607 #define IWL_PPAG_CMD_V1_MASK (IWL_PPAG_ETSI_MASK | IWL_PPAG_CHINA_MASK)
+45
drivers/net/wireless/intel/iwlwifi/fw/api/scan.h
··· 985 985 } __packed; /* SCAN_PROBE_PARAMS_API_S_VER_4 */ 986 986 987 987 #define SCAN_MAX_NUM_CHANS_V3 67 988 + #define SCAN_MAX_NUM_CHANS_V4 68 988 989 989 990 /** 990 991 * struct iwl_scan_channel_params_v4 - channel params ··· 1027 1026 u8 n_aps_override[2]; 1028 1027 struct iwl_scan_channel_cfg_umac channel_config[SCAN_MAX_NUM_CHANS_V3]; 1029 1028 } __packed; /* SCAN_CHANNEL_PARAMS_API_S_VER_6 */ 1029 + 1030 + /** 1031 + * struct iwl_scan_channel_params_v8 - channel params 1032 + * @flags: channel flags &enum iwl_scan_channel_flags 1033 + * @count: num of channels in scan request 1034 + * @n_aps_override: override the number of APs the FW uses to calculate dwell 1035 + * time when adaptive dwell is used. 1036 + * Channel k will use n_aps_override[i] when BIT(20 + i) is set in 1037 + * channel_config[k].flags 1038 + * @channel_config: array of explicit channel configurations 1039 + * for 2.4Ghz and 5.2Ghz bands 1040 + */ 1041 + struct iwl_scan_channel_params_v8 { 1042 + u8 flags; 1043 + u8 count; 1044 + u8 n_aps_override[2]; 1045 + struct iwl_scan_channel_cfg_umac channel_config[SCAN_MAX_NUM_CHANS_V4]; 1046 + } __packed; /* SCAN_CHANNEL_PARAMS_API_S_VER_8 */ 1030 1047 1031 1048 /** 1032 1049 * struct iwl_scan_general_params_v11 - channel params ··· 1129 1110 } __packed; /* SCAN_REQUEST_PARAMS_API_S_VER_17 - 14 */ 1130 1111 1131 1112 /** 1113 + * struct iwl_scan_req_params_v18 - scan request parameters (v18) 1114 + * @general_params: &struct iwl_scan_general_params_v11 1115 + * @channel_params: &struct iwl_scan_channel_params_v8 1116 + * @periodic_params: &struct iwl_scan_periodic_parms_v1 1117 + * @probe_params: &struct iwl_scan_probe_params_v4 1118 + */ 1119 + struct iwl_scan_req_params_v18 { 1120 + struct iwl_scan_general_params_v11 general_params; 1121 + struct iwl_scan_channel_params_v8 channel_params; 1122 + struct iwl_scan_periodic_parms_v1 periodic_params; 1123 + struct iwl_scan_probe_params_v4 probe_params; 1124 + } __packed; /* SCAN_REQUEST_PARAMS_API_S_VER_18 */ 1125 + 1126 + /** 1132 1127 * struct iwl_scan_req_umac_v12 - scan request command (v12) 1133 1128 * @uid: scan id, &enum iwl_umac_scan_uid_offsets 1134 1129 * @ooc_priority: out of channel priority - &enum iwl_scan_priority ··· 1165 1132 __le32 ooc_priority; 1166 1133 struct iwl_scan_req_params_v17 scan_params; 1167 1134 } __packed; /* SCAN_REQUEST_CMD_UMAC_API_S_VER_17 - 14 */ 1135 + 1136 + /** 1137 + * struct iwl_scan_req_umac_v18 - scan request command (v18) 1138 + * @uid: scan id, &enum iwl_umac_scan_uid_offsets 1139 + * @ooc_priority: out of channel priority - &enum iwl_scan_priority 1140 + * @scan_params: scan parameters 1141 + */ 1142 + struct iwl_scan_req_umac_v18 { 1143 + __le32 uid; 1144 + __le32 ooc_priority; 1145 + struct iwl_scan_req_params_v18 scan_params; 1146 + } __packed; /* SCAN_REQUEST_CMD_UMAC_API_S_VER_18 */ 1168 1147 1169 1148 /** 1170 1149 * struct iwl_umac_scan_abort - scan abort command
+2 -3
drivers/net/wireless/intel/iwlwifi/fw/api/stats.h
··· 598 598 } __packed; /* STATISTICS_NTFY_PER_STA_API_S_VER_1 */ 599 599 600 600 #define IWL_STATS_MAX_PHY_OPERATIONAL 3 601 - #define IWL_STATS_MAX_FW_LINKS (IWL_FW_MAX_LINK_ID + 1) 602 601 603 602 /** 604 603 * struct iwl_system_statistics_notif_oper - statistics notification ··· 609 610 */ 610 611 struct iwl_system_statistics_notif_oper { 611 612 __le32 time_stamp; 612 - struct iwl_stats_ntfy_per_link per_link[IWL_STATS_MAX_FW_LINKS]; 613 + struct iwl_stats_ntfy_per_link per_link[IWL_FW_MAX_LINKS]; 613 614 struct iwl_stats_ntfy_per_phy per_phy[IWL_STATS_MAX_PHY_OPERATIONAL]; 614 615 struct iwl_stats_ntfy_per_sta per_sta[IWL_STATION_COUNT_MAX]; 615 616 } __packed; /* STATISTICS_FW_NTFY_OPERATIONAL_API_S_VER_3 */ ··· 623 624 */ 624 625 struct iwl_system_statistics_part1_notif_oper { 625 626 __le32 time_stamp; 626 - struct iwl_stats_ntfy_part1_per_link per_link[IWL_STATS_MAX_FW_LINKS]; 627 + struct iwl_stats_ntfy_part1_per_link per_link[IWL_FW_MAX_LINKS]; 627 628 __le32 per_phy_crc_error_stats[IWL_STATS_MAX_PHY_OPERATIONAL]; 628 629 } __packed; /* STATISTICS_FW_NTFY_OPERATIONAL_PART1_API_S_VER_4 */ 629 630
+2 -2
drivers/net/wireless/intel/iwlwifi/fw/dbg.c
··· 2933 2933 IWL_WARN(fwrt, "Collecting data: trigger %d fired.\n", 2934 2934 le32_to_cpu(desc->trig_desc.type)); 2935 2935 2936 - queue_delayed_work(system_unbound_wq, &wk_data->wk, 2936 + queue_delayed_work(system_dfl_wq, &wk_data->wk, 2937 2937 usecs_to_jiffies(delay)); 2938 2938 2939 2939 return 0; ··· 3236 3236 if (sync) 3237 3237 iwl_fw_dbg_collect_sync(fwrt, idx); 3238 3238 else 3239 - queue_delayed_work(system_unbound_wq, 3239 + queue_delayed_work(system_dfl_wq, 3240 3240 &fwrt->dump.wks[idx].wk, 3241 3241 usecs_to_jiffies(delay)); 3242 3242
+1 -68
drivers/net/wireless/intel/iwlwifi/fw/dump.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause 2 2 /* 3 - * Copyright (C) 2012-2014, 2018-2025 Intel Corporation 3 + * Copyright (C) 2012-2014, 2018-2026 Intel Corporation 4 4 * Copyright (C) 2013-2014 Intel Mobile Communications GmbH 5 5 * Copyright (C) 2015-2017 Intel Deutschland GmbH 6 6 */ ··· 128 128 129 129 IWL_ERR(fwrt, "0x%08X | %s\n", table.error_id, 130 130 iwl_fw_lookup_assert_desc(table.error_id)); 131 - IWL_ERR(fwrt, "0x%08X | umac branchlink1\n", table.blink1); 132 - IWL_ERR(fwrt, "0x%08X | umac branchlink2\n", table.blink2); 133 - IWL_ERR(fwrt, "0x%08X | umac interruptlink1\n", table.ilink1); 134 131 IWL_ERR(fwrt, "0x%08X | umac interruptlink2\n", table.ilink2); 135 132 IWL_ERR(fwrt, "0x%08X | umac data1\n", table.data1); 136 133 IWL_ERR(fwrt, "0x%08X | umac data2\n", table.data2); 137 134 IWL_ERR(fwrt, "0x%08X | umac data3\n", table.data3); 138 - IWL_ERR(fwrt, "0x%08X | umac major\n", table.umac_major); 139 - IWL_ERR(fwrt, "0x%08X | umac minor\n", table.umac_minor); 140 - IWL_ERR(fwrt, "0x%08X | frame pointer\n", table.frame_pointer); 141 - IWL_ERR(fwrt, "0x%08X | stack pointer\n", table.stack_pointer); 142 135 IWL_ERR(fwrt, "0x%08X | last host cmd\n", table.cmd_header); 143 - IWL_ERR(fwrt, "0x%08X | isr status reg\n", table.nic_isr_pref); 144 136 } 145 137 146 138 static void iwl_fwrt_dump_lmac_error_log(struct iwl_fw_runtime *fwrt, u8 lmac_num) ··· 192 200 193 201 IWL_ERR(fwrt, "0x%08X | %-28s\n", table.error_id, 194 202 iwl_fw_lookup_assert_desc(table.error_id)); 195 - IWL_ERR(fwrt, "0x%08X | trm_hw_status0\n", table.trm_hw_status0); 196 - IWL_ERR(fwrt, "0x%08X | trm_hw_status1\n", table.trm_hw_status1); 197 - IWL_ERR(fwrt, "0x%08X | branchlink2\n", table.blink2); 198 - IWL_ERR(fwrt, "0x%08X | interruptlink1\n", table.ilink1); 199 203 IWL_ERR(fwrt, "0x%08X | interruptlink2\n", table.ilink2); 200 204 IWL_ERR(fwrt, "0x%08X | data1\n", table.data1); 201 205 IWL_ERR(fwrt, "0x%08X | data2\n", table.data2); 202 206 IWL_ERR(fwrt, "0x%08X | data3\n", table.data3); 203 - IWL_ERR(fwrt, "0x%08X | beacon time\n", table.bcon_time); 204 - IWL_ERR(fwrt, "0x%08X | tsf low\n", table.tsf_low); 205 - IWL_ERR(fwrt, "0x%08X | tsf hi\n", table.tsf_hi); 206 - IWL_ERR(fwrt, "0x%08X | time gp1\n", table.gp1); 207 - IWL_ERR(fwrt, "0x%08X | time gp2\n", table.gp2); 208 - IWL_ERR(fwrt, "0x%08X | uCode revision type\n", table.fw_rev_type); 209 - IWL_ERR(fwrt, "0x%08X | uCode version major\n", table.major); 210 - IWL_ERR(fwrt, "0x%08X | uCode version minor\n", table.minor); 211 - IWL_ERR(fwrt, "0x%08X | hw version\n", table.hw_ver); 212 - IWL_ERR(fwrt, "0x%08X | board version\n", table.brd_ver); 213 - IWL_ERR(fwrt, "0x%08X | hcmd\n", table.hcmd); 214 - IWL_ERR(fwrt, "0x%08X | isr0\n", table.isr0); 215 - IWL_ERR(fwrt, "0x%08X | isr1\n", table.isr1); 216 - IWL_ERR(fwrt, "0x%08X | isr2\n", table.isr2); 217 - IWL_ERR(fwrt, "0x%08X | isr3\n", table.isr3); 218 - IWL_ERR(fwrt, "0x%08X | isr4\n", table.isr4); 219 - IWL_ERR(fwrt, "0x%08X | last cmd Id\n", table.last_cmd_id); 220 - IWL_ERR(fwrt, "0x%08X | wait_event\n", table.wait_event); 221 - IWL_ERR(fwrt, "0x%08X | l2p_control\n", table.l2p_control); 222 - IWL_ERR(fwrt, "0x%08X | l2p_duration\n", table.l2p_duration); 223 - IWL_ERR(fwrt, "0x%08X | l2p_mhvalid\n", table.l2p_mhvalid); 224 - IWL_ERR(fwrt, "0x%08X | l2p_addr_match\n", table.l2p_addr_match); 225 - IWL_ERR(fwrt, "0x%08X | lmpm_pmg_sel\n", table.lmpm_pmg_sel); 226 - IWL_ERR(fwrt, "0x%08X | timestamp\n", table.u_timestamp); 227 - IWL_ERR(fwrt, "0x%08X | flow_handler\n", table.flow_handler); 228 207 } 229 208 230 209 /* ··· 227 264 struct iwl_trans *trans = fwrt->trans; 228 265 struct iwl_tcm_error_event_table table = {}; 229 266 u32 base = fwrt->trans->dbg.tcm_error_event_table[idx]; 230 - int i; 231 267 u32 flag = idx ? IWL_ERROR_EVENT_TABLE_TCM2 : 232 268 IWL_ERROR_EVENT_TABLE_TCM1; 233 269 ··· 237 275 238 276 IWL_ERR(fwrt, "TCM%d status:\n", idx + 1); 239 277 IWL_ERR(fwrt, "0x%08X | error ID\n", table.error_id); 240 - IWL_ERR(fwrt, "0x%08X | tcm branchlink2\n", table.blink2); 241 - IWL_ERR(fwrt, "0x%08X | tcm interruptlink1\n", table.ilink1); 242 278 IWL_ERR(fwrt, "0x%08X | tcm interruptlink2\n", table.ilink2); 243 279 IWL_ERR(fwrt, "0x%08X | tcm data1\n", table.data1); 244 280 IWL_ERR(fwrt, "0x%08X | tcm data2\n", table.data2); 245 281 IWL_ERR(fwrt, "0x%08X | tcm data3\n", table.data3); 246 - IWL_ERR(fwrt, "0x%08X | tcm log PC\n", table.logpc); 247 - IWL_ERR(fwrt, "0x%08X | tcm frame pointer\n", table.frame_pointer); 248 - IWL_ERR(fwrt, "0x%08X | tcm stack pointer\n", table.stack_pointer); 249 - IWL_ERR(fwrt, "0x%08X | tcm msg ID\n", table.msgid); 250 - IWL_ERR(fwrt, "0x%08X | tcm ISR status\n", table.isr); 251 - for (i = 0; i < ARRAY_SIZE(table.hw_status); i++) 252 - IWL_ERR(fwrt, "0x%08X | tcm HW status[%d]\n", 253 - table.hw_status[i], i); 254 - for (i = 0; i < ARRAY_SIZE(table.sw_status); i++) 255 - IWL_ERR(fwrt, "0x%08X | tcm SW status[%d]\n", 256 - table.sw_status[i], i); 257 282 } 258 283 259 284 /* ··· 287 338 288 339 IWL_ERR(fwrt, "RCM%d status:\n", idx + 1); 289 340 IWL_ERR(fwrt, "0x%08X | error ID\n", table.error_id); 290 - IWL_ERR(fwrt, "0x%08X | rcm branchlink2\n", table.blink2); 291 - IWL_ERR(fwrt, "0x%08X | rcm interruptlink1\n", table.ilink1); 292 341 IWL_ERR(fwrt, "0x%08X | rcm interruptlink2\n", table.ilink2); 293 342 IWL_ERR(fwrt, "0x%08X | rcm data1\n", table.data1); 294 343 IWL_ERR(fwrt, "0x%08X | rcm data2\n", table.data2); 295 344 IWL_ERR(fwrt, "0x%08X | rcm data3\n", table.data3); 296 - IWL_ERR(fwrt, "0x%08X | rcm log PC\n", table.logpc); 297 - IWL_ERR(fwrt, "0x%08X | rcm frame pointer\n", table.frame_pointer); 298 - IWL_ERR(fwrt, "0x%08X | rcm stack pointer\n", table.stack_pointer); 299 - IWL_ERR(fwrt, "0x%08X | rcm msg ID\n", table.msgid); 300 - IWL_ERR(fwrt, "0x%08X | rcm ISR status\n", table.isr); 301 - IWL_ERR(fwrt, "0x%08X | frame HW status\n", table.frame_hw_status); 302 - IWL_ERR(fwrt, "0x%08X | LMAC-to-RCM request mbox\n", 303 - table.mbx_lmac_to_rcm_req); 304 - IWL_ERR(fwrt, "0x%08X | RCM-to-LMAC request mbox\n", 305 - table.mbx_rcm_to_lmac_req); 306 - IWL_ERR(fwrt, "0x%08X | MAC header control\n", table.mh_ctl); 307 - IWL_ERR(fwrt, "0x%08X | MAC header addr1 low\n", table.mh_addr1_lo); 308 - IWL_ERR(fwrt, "0x%08X | MAC header info\n", table.mh_info); 309 - IWL_ERR(fwrt, "0x%08X | MAC header error\n", table.mh_err); 310 345 } 311 346 312 347 static void iwl_fwrt_dump_iml_error_log(struct iwl_fw_runtime *fwrt)
+15
drivers/net/wireless/intel/iwlwifi/fw/file.h
··· 103 103 IWL_UCODE_TLV_D3_KEK_KCK_ADDR = 67, 104 104 IWL_UCODE_TLV_CURRENT_PC = 68, 105 105 IWL_UCODE_TLV_FSEQ_BIN_VERSION = 72, 106 + IWL_UCODE_TLV_CMD_BIOS_TABLE = 73, 106 107 107 108 /* contains sub-sections like PNVM file does (did) */ 108 109 IWL_UCODE_TLV_PNVM_DATA = 74, ··· 1039 1038 u8 group; 1040 1039 u8 cmd_ver; 1041 1040 u8 notif_ver; 1041 + } __packed; 1042 + 1043 + /** 1044 + * struct iwl_fw_cmd_bios_table - firmware command BIOS revision entry 1045 + * @cmd: command ID 1046 + * @group: group ID 1047 + * @max_acpi_revision: max supported ACPI revision of command. 1048 + * @max_uefi_revision: max supported UEFI revision of command. 1049 + */ 1050 + struct iwl_fw_cmd_bios_table { 1051 + u8 cmd; 1052 + u8 group; 1053 + u8 max_acpi_revision; 1054 + u8 max_uefi_revision; 1042 1055 } __packed; 1043 1056 1044 1057 struct iwl_fw_tcm_error_addr {
+31 -1
drivers/net/wireless/intel/iwlwifi/fw/img.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause 2 2 /* 3 3 * Copyright(c) 2019 - 2021 Intel Corporation 4 - * Copyright(c) 2024 Intel Corporation 4 + * Copyright(c) 2024 - 2025 Intel Corporation 5 5 */ 6 6 #include <fw/api/commands.h> 7 7 #include "img.h" 8 + 9 + u8 iwl_fw_lookup_cmd_bios_supported_revision(const struct iwl_fw *fw, 10 + enum bios_source table_source, 11 + u32 cmd_id, u8 def) 12 + { 13 + const struct iwl_fw_cmd_bios_table *entry; 14 + /* prior to LONG_GROUP, we never used this CMD version API */ 15 + u8 grp = iwl_cmd_groupid(cmd_id) ?: LONG_GROUP; 16 + u8 cmd = iwl_cmd_opcode(cmd_id); 17 + 18 + if (table_source != BIOS_SOURCE_ACPI && 19 + table_source != BIOS_SOURCE_UEFI) 20 + return def; 21 + 22 + if (!fw->ucode_capa.cmd_bios_tables || 23 + !fw->ucode_capa.n_cmd_bios_tables) 24 + return def; 25 + 26 + entry = fw->ucode_capa.cmd_bios_tables; 27 + for (int i = 0; i < fw->ucode_capa.n_cmd_bios_tables; i++, entry++) { 28 + if (entry->group == grp && entry->cmd == cmd) { 29 + if (table_source == BIOS_SOURCE_ACPI) 30 + return entry->max_acpi_revision; 31 + return entry->max_uefi_revision; 32 + } 33 + } 34 + 35 + return def; 36 + } 37 + EXPORT_SYMBOL_GPL(iwl_fw_lookup_cmd_bios_supported_revision); 8 38 9 39 u8 iwl_fw_lookup_cmd_ver(const struct iwl_fw *fw, u32 cmd_id, u8 def) 10 40 {
+8
drivers/net/wireless/intel/iwlwifi/fw/img.h
··· 9 9 #include <linux/types.h> 10 10 11 11 #include "api/dbg-tlv.h" 12 + #include "api/nvm-reg.h" 12 13 13 14 #include "file.h" 14 15 #include "error-dump.h" ··· 58 57 59 58 const struct iwl_fw_cmd_version *cmd_versions; 60 59 u32 n_cmd_versions; 60 + 61 + const struct iwl_fw_cmd_bios_table *cmd_bios_tables; 62 + u32 n_cmd_bios_tables; 61 63 }; 62 64 63 65 static inline bool ··· 277 273 278 274 return &fw->img[ucode_type]; 279 275 } 276 + 277 + u8 iwl_fw_lookup_cmd_bios_supported_revision(const struct iwl_fw *fw, 278 + enum bios_source table_source, 279 + u32 cmd_id, u8 def); 280 280 281 281 u8 iwl_fw_lookup_cmd_ver(const struct iwl_fw *fw, u32 cmd_id, u8 def); 282 282
+25 -126
drivers/net/wireless/intel/iwlwifi/fw/regulatory.c
··· 241 241 int profs[BIOS_SAR_NUM_CHAINS] = { prof_a, prof_b }; 242 242 int i, j; 243 243 244 + if (WARN_ON_ONCE(n_subbands > 245 + ARRAY_SIZE(fwrt->sar_profiles[0].chains[0].subbands))) 246 + return -EINVAL; 247 + 244 248 for (i = 0; i < BIOS_SAR_NUM_CHAINS; i++) { 245 249 struct iwl_sar_profile *prof; 246 250 ··· 304 300 } 305 301 IWL_EXPORT_SYMBOL(iwl_sar_fill_profile); 306 302 307 - static bool iwl_ppag_value_valid(struct iwl_fw_runtime *fwrt, int chain, 308 - int subband) 309 - { 310 - s8 ppag_val = fwrt->ppag_chains[chain].subbands[subband]; 311 - 312 - if ((subband == 0 && 313 - (ppag_val > IWL_PPAG_MAX_LB || ppag_val < IWL_PPAG_MIN_LB)) || 314 - (subband != 0 && 315 - (ppag_val > IWL_PPAG_MAX_HB || ppag_val < IWL_PPAG_MIN_HB))) { 316 - IWL_DEBUG_RADIO(fwrt, "Invalid PPAG value: %d\n", ppag_val); 317 - return false; 318 - } 319 - return true; 320 - } 321 - 322 - /* Utility function for iwlmvm and iwlxvt */ 323 - int iwl_fill_ppag_table(struct iwl_fw_runtime *fwrt, 324 - union iwl_ppag_table_cmd *cmd, int *cmd_size) 325 - { 326 - u8 cmd_ver; 327 - int i, j, num_sub_bands; 328 - s8 *gain; 329 - bool send_ppag_always; 330 - 331 - /* many firmware images for JF lie about this */ 332 - if (CSR_HW_RFID_TYPE(fwrt->trans->info.hw_rf_id) == 333 - CSR_HW_RFID_TYPE(CSR_HW_RF_ID_TYPE_JF)) 334 - return -EOPNOTSUPP; 335 - 336 - if (!fw_has_capa(&fwrt->fw->ucode_capa, IWL_UCODE_TLV_CAPA_SET_PPAG)) { 337 - IWL_DEBUG_RADIO(fwrt, 338 - "PPAG capability not supported by FW, command not sent.\n"); 339 - return -EINVAL; 340 - } 341 - 342 - cmd_ver = iwl_fw_lookup_cmd_ver(fwrt->fw, 343 - WIDE_ID(PHY_OPS_GROUP, 344 - PER_PLATFORM_ANT_GAIN_CMD), 1); 345 - /* 346 - * Starting from ver 4, driver needs to send the PPAG CMD regardless 347 - * if PPAG is enabled/disabled or valid/invalid. 348 - */ 349 - send_ppag_always = cmd_ver > 3; 350 - 351 - /* Don't send PPAG if it is disabled */ 352 - if (!send_ppag_always && !fwrt->ppag_flags) { 353 - IWL_DEBUG_RADIO(fwrt, "PPAG not enabled, command not sent.\n"); 354 - return -EINVAL; 355 - } 356 - 357 - IWL_DEBUG_RADIO(fwrt, "PPAG cmd ver is %d\n", cmd_ver); 358 - if (cmd_ver == 1) { 359 - num_sub_bands = IWL_NUM_SUB_BANDS_V1; 360 - gain = cmd->v1.gain[0]; 361 - *cmd_size = sizeof(cmd->v1); 362 - cmd->v1.flags = cpu_to_le32(fwrt->ppag_flags & IWL_PPAG_CMD_V1_MASK); 363 - if (fwrt->ppag_bios_rev >= 1) { 364 - /* in this case FW supports revision 0 */ 365 - IWL_DEBUG_RADIO(fwrt, 366 - "PPAG table rev is %d, send truncated table\n", 367 - fwrt->ppag_bios_rev); 368 - } 369 - } else if (cmd_ver == 5) { 370 - num_sub_bands = IWL_NUM_SUB_BANDS_V2; 371 - gain = cmd->v5.gain[0]; 372 - *cmd_size = sizeof(cmd->v5); 373 - cmd->v5.flags = cpu_to_le32(fwrt->ppag_flags & IWL_PPAG_CMD_V5_MASK); 374 - if (fwrt->ppag_bios_rev == 0) { 375 - /* in this case FW supports revisions 1,2 or 3 */ 376 - IWL_DEBUG_RADIO(fwrt, 377 - "PPAG table rev is 0, send padded table\n"); 378 - } 379 - } else if (cmd_ver == 7) { 380 - num_sub_bands = IWL_NUM_SUB_BANDS_V2; 381 - gain = cmd->v7.gain[0]; 382 - *cmd_size = sizeof(cmd->v7); 383 - cmd->v7.ppag_config_info.hdr.table_source = 384 - fwrt->ppag_bios_source; 385 - cmd->v7.ppag_config_info.hdr.table_revision = 386 - fwrt->ppag_bios_rev; 387 - cmd->v7.ppag_config_info.value = cpu_to_le32(fwrt->ppag_flags); 388 - } else { 389 - IWL_DEBUG_RADIO(fwrt, "Unsupported PPAG command version\n"); 390 - return -EINVAL; 391 - } 392 - 393 - /* ppag mode */ 394 - IWL_DEBUG_RADIO(fwrt, 395 - "PPAG MODE bits were read from bios: %d\n", 396 - fwrt->ppag_flags); 397 - 398 - if (cmd_ver == 1 && 399 - !fw_has_capa(&fwrt->fw->ucode_capa, 400 - IWL_UCODE_TLV_CAPA_PPAG_CHINA_BIOS_SUPPORT)) { 401 - cmd->v1.flags &= cpu_to_le32(IWL_PPAG_ETSI_MASK); 402 - IWL_DEBUG_RADIO(fwrt, "masking ppag China bit\n"); 403 - } else { 404 - IWL_DEBUG_RADIO(fwrt, "isn't masking ppag China bit\n"); 405 - } 406 - 407 - /* The 'flags' field is the same in v1 and v5 so we can just 408 - * use v1 to access it. 409 - */ 410 - IWL_DEBUG_RADIO(fwrt, 411 - "PPAG MODE bits going to be sent: %d\n", 412 - (cmd_ver < 7) ? le32_to_cpu(cmd->v1.flags) : 413 - le32_to_cpu(cmd->v7.ppag_config_info.value)); 414 - 415 - for (i = 0; i < IWL_NUM_CHAIN_LIMITS; i++) { 416 - for (j = 0; j < num_sub_bands; j++) { 417 - if (!send_ppag_always && 418 - !iwl_ppag_value_valid(fwrt, i, j)) 419 - return -EINVAL; 420 - 421 - gain[i * num_sub_bands + j] = 422 - fwrt->ppag_chains[i].subbands[j]; 423 - IWL_DEBUG_RADIO(fwrt, 424 - "PPAG table: chain[%d] band[%d]: gain = %d\n", 425 - i, j, gain[i * num_sub_bands + j]); 426 - } 427 - } 428 - 429 - return 0; 430 - } 431 - IWL_EXPORT_SYMBOL(iwl_fill_ppag_table); 432 - 433 303 bool iwl_is_ppag_approved(struct iwl_fw_runtime *fwrt) 434 304 { 435 305 if (!dmi_check_system(dmi_ppag_approved_list)) { ··· 317 439 return true; 318 440 } 319 441 IWL_EXPORT_SYMBOL(iwl_is_ppag_approved); 442 + 443 + /* Print the PPAG table as read from BIOS */ 444 + void iwl_bios_print_ppag(struct iwl_fw_runtime *fwrt, int n_subbands) 445 + { 446 + int i, j; 447 + 448 + IWL_DEBUG_RADIO(fwrt, "PPAG table as read from BIOS:\n"); 449 + IWL_DEBUG_RADIO(fwrt, "PPAG revision = %d\n", fwrt->ppag_bios_rev); 450 + IWL_DEBUG_RADIO(fwrt, "PPAG flags = 0x%x\n", fwrt->ppag_flags); 451 + 452 + if (WARN_ON_ONCE(n_subbands > 453 + ARRAY_SIZE(fwrt->ppag_chains[0].subbands))) 454 + return; 455 + 456 + for (i = 0; i < ARRAY_SIZE(fwrt->ppag_chains); i++) 457 + for (j = 0; j < n_subbands; j++) 458 + IWL_DEBUG_RADIO(fwrt, 459 + "ppag_chains[%d].subbands[%d] = %d\n", 460 + i, j, 461 + fwrt->ppag_chains[i].subbands[j]); 462 + } 320 463 321 464 bool iwl_is_tas_approved(void) 322 465 {
+7 -7
drivers/net/wireless/intel/iwlwifi/fw/regulatory.h
··· 21 21 */ 22 22 #define BIOS_SAR_MAX_CHAINS_PER_PROFILE 4 23 23 #define BIOS_SAR_NUM_CHAINS 2 24 - #define BIOS_SAR_MAX_SUB_BANDS_NUM 11 24 + #define BIOS_SAR_MAX_SUB_BANDS_NUM 12 25 + #define BIOS_PPAG_MAX_SUB_BANDS_NUM 12 25 26 26 27 #define BIOS_GEO_NUM_CHAINS 2 27 - #define BIOS_GEO_MAX_NUM_BANDS 3 28 + #define BIOS_GEO_MAX_NUM_BANDS 4 28 29 #define BIOS_GEO_MAX_PROFILE_NUM 8 29 30 #define BIOS_GEO_MIN_PROFILE_NUM 3 30 31 ··· 101 100 102 101 /* Same thing as with SAR, all revisions fit in revision 2 */ 103 102 struct iwl_ppag_chain { 104 - s8 subbands[BIOS_SAR_MAX_SUB_BANDS_NUM]; 103 + s8 subbands[BIOS_PPAG_MAX_SUB_BANDS_NUM]; 105 104 }; 106 105 107 106 struct iwl_tas_data { ··· 181 180 182 181 struct iwl_fw_runtime; 183 182 183 + /* Print the PPAG table as read from BIOS */ 184 + void iwl_bios_print_ppag(struct iwl_fw_runtime *fwrt, int n_subbands); 185 + 184 186 bool iwl_sar_geo_support(struct iwl_fw_runtime *fwrt); 185 187 186 188 int iwl_sar_geo_fill_table(struct iwl_fw_runtime *fwrt, ··· 193 189 int iwl_sar_fill_profile(struct iwl_fw_runtime *fwrt, 194 190 __le16 *per_chain, u32 n_tables, u32 n_subbands, 195 191 int prof_a, int prof_b); 196 - 197 - int iwl_fill_ppag_table(struct iwl_fw_runtime *fwrt, 198 - union iwl_ppag_table_cmd *cmd, 199 - int *cmd_size); 200 192 201 193 bool iwl_is_ppag_approved(struct iwl_fw_runtime *fwrt); 202 194
+6 -4
drivers/net/wireless/intel/iwlwifi/fw/runtime.h
··· 106 106 * @cur_fw_img: current firmware image, must be maintained by 107 107 * the driver by calling &iwl_fw_set_current_image() 108 108 * @dump: debug dump data 109 - * @uats_table: AP type table 110 - * @uats_valid: is AP type table valid 109 + * @ap_type_cmd: AP type tables (for enablement on 6 GHz) 110 + * @ap_type_cmd_valid: if &ap_type_cmd is valid 111 111 * @uefi_tables_lock_status: The status of the WIFI GUID UEFI variables lock: 112 112 * 0: Unlocked, 1 and 2: Locked. 113 113 * Only read the UEFI variables if locked. 114 114 * @sar_profiles: sar profiles as read from WRDS/EWRD BIOS tables 115 115 * @geo_profiles: geographic profiles as read from WGDS BIOS table 116 + * @geo_bios_source: see &enum bios_source 116 117 * @phy_filters: specific phy filters as read from WPFC BIOS table 117 118 * @ppag_bios_rev: PPAG BIOS revision 118 119 * @ppag_bios_source: see &enum bios_source ··· 205 204 u8 sar_chain_b_profile; 206 205 u8 reduced_power_flags; 207 206 struct iwl_geo_profile geo_profiles[BIOS_GEO_MAX_PROFILE_NUM]; 207 + enum bios_source geo_bios_source; 208 208 u32 geo_rev; 209 209 u32 geo_num_profiles; 210 210 bool geo_enabled; ··· 215 213 u8 ppag_bios_source; 216 214 struct iwl_sar_offset_mapping_cmd sgom_table; 217 215 bool sgom_enabled; 218 - struct iwl_mcc_allowed_ap_type_cmd uats_table; 219 - bool uats_valid; 216 + struct iwl_mcc_allowed_ap_type_cmd ap_type_cmd; 217 + bool ap_type_cmd_valid; 220 218 u8 uefi_tables_lock_status; 221 219 struct iwl_phy_specific_cfg phy_filters; 222 220 enum bios_source dsm_source;
+203 -35
drivers/net/wireless/intel/iwlwifi/fw/uefi.c
··· 402 402 if (uats_data->revision != 1) 403 403 return -EINVAL; 404 404 405 - memcpy(fwrt->uats_table.mcc_to_ap_type_map, 405 + memcpy(fwrt->ap_type_cmd.mcc_to_ap_type_map, 406 406 uats_data->mcc_to_ap_type_map, 407 - sizeof(fwrt->uats_table.mcc_to_ap_type_map)); 407 + sizeof(fwrt->ap_type_cmd.mcc_to_ap_type_map)); 408 408 409 - fwrt->uats_valid = true; 409 + fwrt->ap_type_cmd_valid = true; 410 410 411 411 return 0; 412 412 } ··· 429 429 } 430 430 IWL_EXPORT_SYMBOL(iwl_uefi_get_uats_table); 431 431 432 - static void iwl_uefi_set_sar_profile(struct iwl_fw_runtime *fwrt, 433 - struct uefi_sar_profile *uefi_sar_prof, 434 - u8 prof_index, bool enabled) 432 + void iwl_uefi_get_uneb_table(struct iwl_trans *trans, 433 + struct iwl_fw_runtime *fwrt) 435 434 { 436 - memcpy(&fwrt->sar_profiles[prof_index].chains, uefi_sar_prof, 437 - sizeof(struct uefi_sar_profile)); 435 + struct uefi_cnv_wlan_uneb_data *data; 436 + 437 + data = iwl_uefi_get_verified_variable(trans, IWL_UEFI_UNEB_NAME, 438 + "UNEB", sizeof(*data), NULL); 439 + if (IS_ERR(data)) 440 + return; 441 + 442 + if (data->revision != 1) { 443 + IWL_DEBUG_RADIO(fwrt, 444 + "Cannot read UNEB table. rev is invalid\n"); 445 + goto out; 446 + } 447 + 448 + BUILD_BUG_ON(sizeof(data->mcc_to_ap_type_map) != 449 + sizeof(fwrt->ap_type_cmd.mcc_to_ap_type_unii9_map)); 450 + 451 + memcpy(fwrt->ap_type_cmd.mcc_to_ap_type_unii9_map, 452 + data->mcc_to_ap_type_map, 453 + sizeof(fwrt->ap_type_cmd.mcc_to_ap_type_unii9_map)); 454 + 455 + fwrt->ap_type_cmd_valid = true; 456 + 457 + out: 458 + kfree(data); 459 + } 460 + IWL_EXPORT_SYMBOL(iwl_uefi_get_uneb_table); 461 + 462 + static void iwl_uefi_set_sar_profile(struct iwl_fw_runtime *fwrt, 463 + const u8 *vals, u8 prof_index, 464 + u8 num_subbands, bool enabled) 465 + { 466 + struct iwl_sar_profile *sar_prof = &fwrt->sar_profiles[prof_index]; 467 + 468 + /* 469 + * Make sure fwrt has enough room to hold the data 470 + * coming from the UEFI table 471 + */ 472 + if (WARN_ON(ARRAY_SIZE(sar_prof->chains) * 473 + ARRAY_SIZE(sar_prof->chains[0].subbands) < 474 + UEFI_SAR_MAX_CHAINS_PER_PROFILE * num_subbands)) 475 + return; 476 + 477 + BUILD_BUG_ON(ARRAY_SIZE(sar_prof->chains) != 478 + UEFI_SAR_MAX_CHAINS_PER_PROFILE); 479 + 480 + for (int chain = 0; 481 + chain < UEFI_SAR_MAX_CHAINS_PER_PROFILE; 482 + chain++) { 483 + for (int subband = 0; subband < num_subbands; subband++) 484 + sar_prof->chains[chain].subbands[subband] = 485 + vals[chain * num_subbands + subband]; 486 + } 438 487 439 488 fwrt->sar_profiles[prof_index].enabled = enabled & IWL_SAR_ENABLE_MSK; 440 489 } ··· 491 442 int iwl_uefi_get_wrds_table(struct iwl_fw_runtime *fwrt) 492 443 { 493 444 struct uefi_cnv_var_wrds *data; 445 + unsigned long size; 446 + unsigned long expected_size; 447 + int num_subbands; 494 448 int ret = 0; 495 449 496 450 data = iwl_uefi_get_verified_variable(fwrt->trans, IWL_UEFI_WRDS_NAME, 497 - "WRDS", sizeof(*data), NULL); 451 + "WRDS", 452 + UEFI_SAR_WRDS_TABLE_SIZE_REV2, 453 + &size); 454 + 498 455 if (IS_ERR(data)) 499 456 return -EINVAL; 500 457 501 - if (data->revision != IWL_UEFI_WRDS_REVISION) { 502 - ret = -EINVAL; 503 - IWL_DEBUG_RADIO(fwrt, "Unsupported UEFI WRDS revision:%d\n", 458 + switch (data->revision) { 459 + case 2: 460 + expected_size = UEFI_SAR_WRDS_TABLE_SIZE_REV2; 461 + num_subbands = UEFI_SAR_SUB_BANDS_NUM_REV2; 462 + break; 463 + case 3: 464 + expected_size = UEFI_SAR_WRDS_TABLE_SIZE_REV3; 465 + num_subbands = UEFI_SAR_SUB_BANDS_NUM_REV3; 466 + break; 467 + default: 468 + IWL_DEBUG_RADIO(fwrt, 469 + "Unsupported UEFI WRDS revision:%d\n", 504 470 data->revision); 471 + ret = -EINVAL; 472 + goto out; 473 + } 474 + 475 + if (size != expected_size) { 476 + ret = -EINVAL; 505 477 goto out; 506 478 } 507 479 508 480 /* The profile from WRDS is officially profile 1, but goes 509 481 * into sar_profiles[0] (because we don't have a profile 0). 510 482 */ 511 - iwl_uefi_set_sar_profile(fwrt, &data->sar_profile, 0, data->mode); 483 + iwl_uefi_set_sar_profile(fwrt, data->vals, 0, 484 + num_subbands, data->mode); 512 485 out: 513 486 kfree(data); 514 487 return ret; ··· 539 468 int iwl_uefi_get_ewrd_table(struct iwl_fw_runtime *fwrt) 540 469 { 541 470 struct uefi_cnv_var_ewrd *data; 471 + unsigned long expected_size; 542 472 int i, ret = 0; 473 + unsigned long size; 474 + int num_subbands; 475 + int profile_size; 543 476 544 477 data = iwl_uefi_get_verified_variable(fwrt->trans, IWL_UEFI_EWRD_NAME, 545 - "EWRD", sizeof(*data), NULL); 478 + "EWRD", 479 + UEFI_SAR_EWRD_TABLE_SIZE_REV2, 480 + &size); 546 481 if (IS_ERR(data)) 547 482 return -EINVAL; 548 483 549 - if (data->revision != IWL_UEFI_EWRD_REVISION) { 550 - ret = -EINVAL; 551 - IWL_DEBUG_RADIO(fwrt, "Unsupported UEFI EWRD revision:%d\n", 484 + switch (data->revision) { 485 + case 2: 486 + expected_size = UEFI_SAR_EWRD_TABLE_SIZE_REV2; 487 + num_subbands = UEFI_SAR_SUB_BANDS_NUM_REV2; 488 + profile_size = UEFI_SAR_PROFILE_SIZE_REV2; 489 + break; 490 + case 3: 491 + expected_size = UEFI_SAR_EWRD_TABLE_SIZE_REV3; 492 + num_subbands = UEFI_SAR_SUB_BANDS_NUM_REV3; 493 + profile_size = UEFI_SAR_PROFILE_SIZE_REV3; 494 + break; 495 + default: 496 + IWL_DEBUG_RADIO(fwrt, 497 + "Unsupported UEFI EWRD revision:%d\n", 552 498 data->revision); 499 + ret = -EINVAL; 553 500 goto out; 554 501 } 555 502 556 - if (data->num_profiles >= BIOS_SAR_MAX_PROFILE_NUM) { 503 + if (size != expected_size || 504 + data->num_profiles >= BIOS_SAR_MAX_PROFILE_NUM) { 557 505 ret = -EINVAL; 558 506 goto out; 559 507 } ··· 582 492 * save them in sar_profiles[1-3] (because we don't 583 493 * have profile 0). So in the array we start from 1. 584 494 */ 585 - iwl_uefi_set_sar_profile(fwrt, &data->sar_profiles[i], i + 1, 586 - data->mode); 495 + iwl_uefi_set_sar_profile(fwrt, &data->vals[i * profile_size], 496 + i + 1, num_subbands, data->mode); 587 497 588 498 out: 589 499 kfree(data); ··· 593 503 int iwl_uefi_get_wgds_table(struct iwl_fw_runtime *fwrt) 594 504 { 595 505 struct uefi_cnv_var_wgds *data; 596 - int i, ret = 0; 506 + unsigned long expected_size; 507 + unsigned long size; 508 + int profile_size; 509 + int n_subbands; 510 + int ret = 0; 597 511 598 512 data = iwl_uefi_get_verified_variable(fwrt->trans, IWL_UEFI_WGDS_NAME, 599 - "WGDS", sizeof(*data), NULL); 513 + "WGDS", UEFI_WGDS_TABLE_SIZE_REV3, 514 + &size); 600 515 if (IS_ERR(data)) 601 516 return -EINVAL; 602 517 603 - if (data->revision != IWL_UEFI_WGDS_REVISION) { 518 + switch (data->revision) { 519 + case 3: 520 + expected_size = UEFI_WGDS_TABLE_SIZE_REV3; 521 + n_subbands = UEFI_GEO_NUM_BANDS_REV3; 522 + break; 523 + case 4: 524 + expected_size = UEFI_WGDS_TABLE_SIZE_REV4; 525 + n_subbands = UEFI_GEO_NUM_BANDS_REV4; 526 + break; 527 + default: 604 528 ret = -EINVAL; 605 529 IWL_DEBUG_RADIO(fwrt, "Unsupported UEFI WGDS revision:%d\n", 606 530 data->revision); 531 + goto out; 532 + } 533 + 534 + if (size != expected_size) { 535 + ret = -EINVAL; 607 536 goto out; 608 537 } 609 538 ··· 634 525 goto out; 635 526 } 636 527 528 + if (WARN_ON(BIOS_GEO_MAX_PROFILE_NUM > 529 + ARRAY_SIZE(fwrt->geo_profiles) || 530 + n_subbands > ARRAY_SIZE(fwrt->geo_profiles[0].bands) || 531 + BIOS_GEO_NUM_CHAINS > 532 + ARRAY_SIZE(fwrt->geo_profiles[0].bands[0].chains))) { 533 + ret = -EINVAL; 534 + goto out; 535 + } 536 + 637 537 fwrt->geo_rev = data->revision; 638 - for (i = 0; i < data->num_profiles; i++) 639 - memcpy(&fwrt->geo_profiles[i], &data->geo_profiles[i], 640 - sizeof(struct iwl_geo_profile)); 538 + fwrt->geo_bios_source = BIOS_SOURCE_UEFI; 539 + profile_size = 3 * n_subbands; 540 + for (int prof = 0; prof < data->num_profiles; prof++) { 541 + const u8 *val = &data->vals[profile_size * prof]; 542 + struct iwl_geo_profile *geo_prof = &fwrt->geo_profiles[prof]; 543 + 544 + for (int subband = 0; subband < n_subbands; subband++) { 545 + geo_prof->bands[subband].max = *val++; 546 + 547 + for (int chain = 0; 548 + chain < BIOS_GEO_NUM_CHAINS; 549 + chain++) 550 + geo_prof->bands[subband].chains[chain] = *val++; 551 + } 552 + } 641 553 642 554 fwrt->geo_num_profiles = data->num_profiles; 643 555 fwrt->geo_enabled = true; ··· 670 540 int iwl_uefi_get_ppag_table(struct iwl_fw_runtime *fwrt) 671 541 { 672 542 struct uefi_cnv_var_ppag *data; 543 + int n_subbands; 544 + u32 valid_rev; 673 545 int ret = 0; 674 546 675 547 data = iwl_uefi_get_verified_variable(fwrt->trans, IWL_UEFI_PPAG_NAME, 676 - "PPAG", sizeof(*data), NULL); 677 - if (IS_ERR(data)) 678 - return -EINVAL; 548 + "PPAG", UEFI_PPAG_DATA_SIZE_V5, 549 + NULL); 550 + if (!IS_ERR(data)) { 551 + n_subbands = UEFI_PPAG_SUB_BANDS_NUM_REV5; 552 + valid_rev = BIT(5); 679 553 680 - if (data->revision < IWL_UEFI_MIN_PPAG_REV || 681 - data->revision > IWL_UEFI_MAX_PPAG_REV) { 554 + goto parse_table; 555 + } 556 + 557 + data = iwl_uefi_get_verified_variable(fwrt->trans, 558 + IWL_UEFI_PPAG_NAME, 559 + "PPAG", 560 + UEFI_PPAG_DATA_SIZE_V4, 561 + NULL); 562 + if (!IS_ERR(data)) { 563 + n_subbands = UEFI_PPAG_SUB_BANDS_NUM_REV4; 564 + /* revisions 1-4 have all the same size */ 565 + valid_rev = BIT(1) | BIT(2) | BIT(3) | BIT(4); 566 + 567 + goto parse_table; 568 + } 569 + 570 + return -EINVAL; 571 + 572 + parse_table: 573 + if (!(BIT(data->revision) & valid_rev)) { 682 574 ret = -EINVAL; 683 - IWL_DEBUG_RADIO(fwrt, "Unsupported UEFI PPAG revision:%d\n", 575 + IWL_DEBUG_RADIO(fwrt, 576 + "Unsupported UEFI PPAG revision:%d\n", 684 577 data->revision); 578 + goto out; 579 + } 580 + 581 + /* 582 + * Make sure fwrt has enough room to hold 583 + * data coming from the UEFI table 584 + */ 585 + if (WARN_ON(ARRAY_SIZE(fwrt->ppag_chains) * 586 + ARRAY_SIZE(fwrt->ppag_chains[0].subbands) < 587 + UEFI_PPAG_NUM_CHAINS * n_subbands)) { 588 + ret = -EINVAL; 685 589 goto out; 686 590 } 687 591 ··· 723 559 fwrt->ppag_flags = iwl_bios_get_ppag_flags(data->ppag_modes, 724 560 fwrt->ppag_bios_rev); 725 561 726 - BUILD_BUG_ON(sizeof(fwrt->ppag_chains) != sizeof(data->ppag_chains)); 727 - memcpy(&fwrt->ppag_chains, &data->ppag_chains, 728 - sizeof(data->ppag_chains)); 562 + for (int chain = 0; chain < UEFI_PPAG_NUM_CHAINS; chain++) { 563 + for (int subband = 0; subband < n_subbands; subband++) 564 + fwrt->ppag_chains[chain].subbands[subband] = 565 + data->vals[chain * n_subbands + subband]; 566 + } 567 + 568 + iwl_bios_print_ppag(fwrt, n_subbands); 729 569 fwrt->ppag_bios_source = BIOS_SOURCE_UEFI; 730 570 out: 731 571 kfree(data);
+110 -31
drivers/net/wireless/intel/iwlwifi/fw/uefi.h
··· 25 25 #define IWL_UEFI_PUNCTURING_NAME L"UefiCnvWlanPuncturing" 26 26 #define IWL_UEFI_DSBR_NAME L"UefiCnvCommonDSBR" 27 27 #define IWL_UEFI_WPFC_NAME L"WPFC" 28 + #define IWL_UEFI_UNEB_NAME L"CnvUefiWlanUNEB" 28 29 29 30 30 31 #define IWL_SGOM_MAP_SIZE 339 31 32 #define IWL_UATS_MAP_SIZE 339 32 33 33 - #define IWL_UEFI_WRDS_REVISION 2 34 - #define IWL_UEFI_EWRD_REVISION 2 35 - #define IWL_UEFI_WGDS_REVISION 3 36 - #define IWL_UEFI_MIN_PPAG_REV 1 37 - #define IWL_UEFI_MAX_PPAG_REV 4 38 34 #define IWL_UEFI_MIN_WTAS_REVISION 1 39 35 #define IWL_UEFI_MAX_WTAS_REVISION 2 40 36 #define IWL_UEFI_SPLC_REVISION 0 ··· 59 63 u8 mcc_to_ap_type_map[IWL_UATS_MAP_SIZE - 1]; 60 64 } __packed; 61 65 66 + /* UNEB's layout is identical to UATS's */ 67 + #define uefi_cnv_wlan_uneb_data uefi_cnv_wlan_uats_data 68 + 62 69 struct uefi_cnv_common_step_data { 63 70 u8 revision; 64 71 u8 step_mode; ··· 71 72 u8 radio2; 72 73 } __packed; 73 74 74 - /* 75 - * struct uefi_sar_profile - a SAR profile as defined in UEFI 76 - * 77 - * @chains: a per-chain table of SAR values 78 - */ 79 - struct uefi_sar_profile { 80 - struct iwl_sar_profile_chain chains[BIOS_SAR_MAX_CHAINS_PER_PROFILE]; 81 - } __packed; 75 + #define UEFI_PPAG_SUB_BANDS_NUM_REV4 11 76 + #define UEFI_PPAG_SUB_BANDS_NUM_REV5 12 77 + #define UEFI_PPAG_NUM_CHAINS 2 82 78 83 - /* 79 + #define UEFI_SAR_SUB_BANDS_NUM_REV2 11 80 + #define UEFI_SAR_SUB_BANDS_NUM_REV3 12 81 + 82 + #define UEFI_SAR_MAX_CHAINS_PER_PROFILE 4 83 + 84 + #define UEFI_GEO_NUM_BANDS_REV3 3 85 + #define UEFI_GEO_NUM_BANDS_REV4 4 86 + 87 + /** 84 88 * struct uefi_cnv_var_wrds - WRDS table as defined in UEFI 85 89 * 86 90 * @revision: the revision of the table 87 91 * @mode: is WRDS enbaled/disabled 88 - * @sar_profile: sar profile #1 92 + * @vals: values for sar profile #1 as an array: 93 + * vals[chain * num_of_subbands + subband] will return the right value. 94 + * num_of_subbands depends on the revision. For revision 3, it is 95 + * %UEFI_SAR_SUB_BANDS_NUM_REV3, for earlier revision, it is 96 + * %UEFI_SAR_SUB_BANDS_NUM_REV2. 97 + * The max number of chains is currently 2 89 98 */ 90 99 struct uefi_cnv_var_wrds { 91 100 u8 revision; 92 101 u32 mode; 93 - struct uefi_sar_profile sar_profile; 102 + u8 vals[]; 94 103 } __packed; 95 104 96 - /* 105 + #define UEFI_SAR_PROFILE_SIZE_REV2 \ 106 + (sizeof(u8) * UEFI_SAR_MAX_CHAINS_PER_PROFILE * \ 107 + UEFI_SAR_SUB_BANDS_NUM_REV2) 108 + 109 + #define UEFI_SAR_PROFILE_SIZE_REV3 \ 110 + (sizeof(u8) * UEFI_SAR_MAX_CHAINS_PER_PROFILE * \ 111 + UEFI_SAR_SUB_BANDS_NUM_REV3) 112 + 113 + #define UEFI_SAR_WRDS_TABLE_SIZE_REV2 \ 114 + (offsetof(struct uefi_cnv_var_wrds, vals) + \ 115 + UEFI_SAR_PROFILE_SIZE_REV2) 116 + 117 + #define UEFI_SAR_WRDS_TABLE_SIZE_REV3 \ 118 + (offsetof(struct uefi_cnv_var_wrds, vals) + \ 119 + UEFI_SAR_PROFILE_SIZE_REV3) 120 + 121 + /** 97 122 * struct uefi_cnv_var_ewrd - EWRD table as defined in UEFI 98 123 * @revision: the revision of the table 99 124 * @mode: is WRDS enbaled/disabled 100 125 * @num_profiles: how many additional profiles we have in this table (0-3) 101 - * @sar_profiles: the additional SAR profiles (#2-#4) 126 + * @vals: the additional SAR profiles (#2-#4) as an array of SAR profiles. 127 + * A SAR profile is defined the &struct uefi_cnv_var_wrds::vals. The size 128 + * of each profile depends on the number of subbands which depends on the 129 + * revision. This is explained in &struct uefi_cnv_var_wrds. 102 130 */ 103 131 struct uefi_cnv_var_ewrd { 104 132 u8 revision; 105 133 u32 mode; 106 134 u32 num_profiles; 107 - struct uefi_sar_profile sar_profiles[BIOS_SAR_MAX_PROFILE_NUM - 1]; 135 + u8 vals[]; 108 136 } __packed; 109 137 110 - /* 138 + #define UEFI_SAR_EWRD_TABLE_SIZE_REV2 \ 139 + (offsetof(struct uefi_cnv_var_ewrd, vals) + \ 140 + UEFI_SAR_PROFILE_SIZE_REV2 * (BIOS_SAR_MAX_PROFILE_NUM - 1)) 141 + 142 + #define UEFI_SAR_EWRD_TABLE_SIZE_REV3 \ 143 + (offsetof(struct uefi_cnv_var_ewrd, vals) + \ 144 + UEFI_SAR_PROFILE_SIZE_REV3 * (BIOS_SAR_MAX_PROFILE_NUM - 1)) 145 + 146 + /** 111 147 * struct uefi_cnv_var_wgds - WGDS table as defined in UEFI 112 148 * @revision: the revision of the table 113 149 * @num_profiles: the number of geo profiles we have in the table. 114 150 * The first 3 are mandatory, and can have up to 8. 115 - * @geo_profiles: a per-profile table of the offsets to add to SAR values. 151 + * @vals: a per-profile table of the offsets to add to SAR values. This is an 152 + * array of profiles, each profile is an array of 153 + * &struct iwl_geo_profile_band, one for each subband. 154 + * There are %UEFI_GEO_NUM_BANDS_REV3 or %UEFI_GEO_NUM_BANDS_REV4 subbands 155 + * depending on the revision. 116 156 */ 117 157 struct uefi_cnv_var_wgds { 118 158 u8 revision; 119 159 u8 num_profiles; 120 - struct iwl_geo_profile geo_profiles[BIOS_GEO_MAX_PROFILE_NUM]; 160 + u8 vals[]; 121 161 } __packed; 122 162 123 - /* 163 + /* struct iwl_geo_profile_band is 3 bytes-long, but since it is not packed, 164 + * we can't use sizeof() 165 + */ 166 + #define UEFI_WGDS_PROFILE_SIZE_REV3 (sizeof(u8) * 3 * UEFI_GEO_NUM_BANDS_REV3) 167 + 168 + #define UEFI_WGDS_PROFILE_SIZE_REV4 (sizeof(u8) * 3 * UEFI_GEO_NUM_BANDS_REV4) 169 + 170 + #define UEFI_WGDS_TABLE_SIZE_REV3 \ 171 + (offsetof(struct uefi_cnv_var_wgds, vals) + \ 172 + UEFI_WGDS_PROFILE_SIZE_REV3 * BIOS_GEO_MAX_PROFILE_NUM) 173 + 174 + #define UEFI_WGDS_TABLE_SIZE_REV4 \ 175 + (offsetof(struct uefi_cnv_var_wgds, vals) + \ 176 + UEFI_WGDS_PROFILE_SIZE_REV4 * BIOS_GEO_MAX_PROFILE_NUM) 177 + 178 + /** 124 179 * struct uefi_cnv_var_ppag - PPAG table as defined in UEFI 125 180 * @revision: the revision of the table 126 181 * @ppag_modes: values from &enum iwl_ppag_flags 127 - * @ppag_chains: the PPAG values per chain and band 182 + * @vals: the PPAG values per chain and band as an array. 183 + * vals[chain * num_of_subbands + subband] will return the right value. 184 + * num_of_subbands depends on the revision. For revision 5, it is 185 + * %UEFI_PPAG_SUB_BANDS_NUM_REV5, for earlier revision it is 186 + * %UEFI_PPAG_SUB_BANDS_NUM_REV4. 187 + * the max number of chains is currently 2 128 188 */ 129 189 struct uefi_cnv_var_ppag { 130 190 u8 revision; 131 191 u32 ppag_modes; 132 - struct iwl_ppag_chain ppag_chains[IWL_NUM_CHAIN_LIMITS]; 192 + s8 vals[]; 133 193 } __packed; 134 194 135 - /* struct uefi_cnv_var_wtas - WTAS tabled as defined in UEFI 195 + #define UEFI_PPAG_DATA_SIZE_V4 \ 196 + (offsetof(struct uefi_cnv_var_ppag, vals) + \ 197 + sizeof(s8) * UEFI_PPAG_NUM_CHAINS * UEFI_PPAG_SUB_BANDS_NUM_REV4) 198 + #define UEFI_PPAG_DATA_SIZE_V5 \ 199 + (offsetof(struct uefi_cnv_var_ppag, vals) + \ 200 + sizeof(s8) * UEFI_PPAG_NUM_CHAINS * UEFI_PPAG_SUB_BANDS_NUM_REV5) 201 + 202 + /** 203 + * struct uefi_cnv_var_wtas - WTAS tabled as defined in UEFI 136 204 * @revision: the revision of the table 137 205 * @tas_selection: different options of TAS enablement. 138 206 * @black_list_size: the number of defined entried in the black list ··· 212 146 u16 black_list[IWL_WTAS_BLACK_LIST_MAX]; 213 147 } __packed; 214 148 215 - /* struct uefi_cnv_var_splc - SPLC tabled as defined in UEFI 149 + /** 150 + * struct uefi_cnv_var_splc - SPLC tabled as defined in UEFI 216 151 * @revision: the revision of the table 217 152 * @default_pwr_limit: The default maximum power per device 218 153 */ ··· 222 155 u32 default_pwr_limit; 223 156 } __packed; 224 157 225 - /* struct uefi_cnv_var_wrdd - WRDD table as defined in UEFI 158 + /** 159 + * struct uefi_cnv_var_wrdd - WRDD table as defined in UEFI 226 160 * @revision: the revision of the table 227 161 * @mcc: country identifier as defined in ISO/IEC 3166-1 Alpha 2 code 228 162 */ ··· 232 164 u32 mcc; 233 165 } __packed; 234 166 235 - /* struct uefi_cnv_var_eckv - ECKV table as defined in UEFI 167 + /** 168 + * struct uefi_cnv_var_eckv - ECKV table as defined in UEFI 236 169 * @revision: the revision of the table 237 170 * @ext_clock_valid: indicates if external 32KHz clock is valid 238 171 */ ··· 244 175 245 176 #define UEFI_MAX_DSM_FUNCS 32 246 177 247 - /* struct uefi_cnv_var_general_cfg - DSM-like table as defined in UEFI 178 + /** 179 + * struct uefi_cnv_var_general_cfg - DSM-like table as defined in UEFI 248 180 * @revision: the revision of the table 249 181 * @functions: payload of the different DSM functions 250 182 */ ··· 255 185 } __packed; 256 186 257 187 #define IWL_UEFI_WBEM_REV0_MASK (BIT(0) | BIT(1)) 258 - /* struct uefi_cnv_wlan_wbem_data - Bandwidth enablement per MCC as defined 188 + 189 + /** 190 + * struct uefi_cnv_wlan_wbem_data - Bandwidth enablement per MCC as defined 259 191 * in UEFI 260 192 * @revision: the revision of the table 261 193 * @wbem_320mhz_per_mcc: enablement of 320MHz bandwidth per MCC ··· 345 273 u32 *value); 346 274 void iwl_uefi_get_sgom_table(struct iwl_trans *trans, struct iwl_fw_runtime *fwrt); 347 275 void iwl_uefi_get_uats_table(struct iwl_trans *trans, 276 + struct iwl_fw_runtime *fwrt); 277 + void iwl_uefi_get_uneb_table(struct iwl_trans *trans, 348 278 struct iwl_fw_runtime *fwrt); 349 279 int iwl_uefi_get_puncturing(struct iwl_fw_runtime *fwrt); 350 280 int iwl_uefi_get_dsbr(struct iwl_fw_runtime *fwrt, u32 *value); ··· 444 370 445 371 static inline void 446 372 iwl_uefi_get_uats_table(struct iwl_trans *trans, struct iwl_fw_runtime *fwrt) 373 + { 374 + } 375 + 376 + static inline void 377 + iwl_uefi_get_uneb_table(struct iwl_trans *trans, struct iwl_fw_runtime *fwrt) 447 378 { 448 379 } 449 380
-1
drivers/net/wireless/intel/iwlwifi/iwl-config.h
··· 85 85 #define IWL_WATCHDOG_DISABLED 0 86 86 #define IWL_DEF_WD_TIMEOUT 2500 87 87 #define IWL_LONG_WD_TIMEOUT 10000 88 - #define IWL_MAX_WD_TIMEOUT 120000 89 88 90 89 #define IWL_DEFAULT_MAX_TX_POWER 22 91 90 #define IWL_TX_CSUM_NETIF_FLAGS (NETIF_F_IPV6_CSUM | NETIF_F_IP_CSUM |\
+22 -1
drivers/net/wireless/intel/iwlwifi/iwl-drv.c
··· 133 133 kfree(drv->fw.dbg.mem_tlv); 134 134 kfree(drv->fw.iml); 135 135 kfree(drv->fw.ucode_capa.cmd_versions); 136 + kfree(drv->fw.ucode_capa.cmd_bios_tables); 136 137 kfree(drv->fw.phy_integration_ver); 137 138 kfree(drv->trans->dbg.pc_data); 138 139 drv->trans->dbg.pc_data = NULL; ··· 1315 1314 if (tlv_len != sizeof(u32)) 1316 1315 goto invalid_tlv_len; 1317 1316 if (le32_to_cpup((const __le32 *)tlv_data) > 1318 - IWL_FW_MAX_LINK_ID + 1) { 1317 + IWL_FW_MAX_LINKS) { 1319 1318 IWL_ERR(drv, 1320 1319 "%d is an invalid number of links\n", 1321 1320 le32_to_cpup((const __le32 *)tlv_data)); ··· 1426 1425 if (!drv->fw.pnvm_data) 1427 1426 return -ENOMEM; 1428 1427 drv->fw.pnvm_size = tlv_len; 1428 + break; 1429 + case IWL_UCODE_TLV_CMD_BIOS_TABLE: 1430 + if (tlv_len % sizeof(struct iwl_fw_cmd_bios_table)) { 1431 + IWL_ERR(drv, 1432 + "Invalid length for command bios table: %u\n", 1433 + tlv_len); 1434 + return -EINVAL; 1435 + } 1436 + 1437 + if (capa->cmd_bios_tables) { 1438 + IWL_ERR(drv, "Duplicate TLV type 0x%02X detected\n", 1439 + tlv_type); 1440 + return -EINVAL; 1441 + } 1442 + capa->cmd_bios_tables = kmemdup(tlv_data, tlv_len, 1443 + GFP_KERNEL); 1444 + if (!capa->cmd_bios_tables) 1445 + return -ENOMEM; 1446 + capa->n_cmd_bios_tables = 1447 + tlv_len / sizeof(struct iwl_fw_cmd_bios_table); 1429 1448 break; 1430 1449 default: 1431 1450 IWL_DEBUG_INFO(drv, "unknown TLV: %d\n", tlv_type);
+8 -1
drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
··· 23 23 #include "fw/api/commands.h" 24 24 #include "fw/api/cmdhdr.h" 25 25 #include "fw/img.h" 26 + #include "fw/dbg.h" 27 + 26 28 #include "mei/iwl-mei.h" 27 29 28 30 /* NVM offsets (in words) definitions */ ··· 1704 1702 band); 1705 1703 new_rule = false; 1706 1704 1705 + if (IWL_FW_CHECK(trans, !center_freq, 1706 + "Invalid channel %d (idx %d) in NVM\n", 1707 + nvm_chan[ch_idx], ch_idx)) 1708 + continue; 1709 + 1707 1710 if (!(ch_flags & NVM_CHANNEL_VALID)) { 1708 1711 iwl_nvm_print_channel_flags(dev, IWL_DL_LAR, 1709 1712 nvm_chan[ch_idx], ch_flags); ··· 2038 2031 if (empty_otp) 2039 2032 IWL_INFO(trans, "OTP is empty\n"); 2040 2033 2041 - nvm = kzalloc_flex(*nvm, channels, IWL_NUM_CHANNELS); 2034 + nvm = kzalloc_flex(*nvm, channels, IWL_NUM_CHANNELS_V2); 2042 2035 if (!nvm) { 2043 2036 ret = -ENOMEM; 2044 2037 goto out;
+5 -5
drivers/net/wireless/intel/iwlwifi/iwl-trans.c
··· 138 138 IWL_RESET_MODE_FUNC_RESET, 139 139 IWL_RESET_MODE_PROD_RESET, 140 140 }; 141 - static const enum iwl_reset_mode escalation_list_sc[] = { 141 + static const enum iwl_reset_mode escalation_list_top[] = { 142 142 IWL_RESET_MODE_SW_RESET, 143 143 IWL_RESET_MODE_REPROBE, 144 144 IWL_RESET_MODE_REPROBE, ··· 159 159 160 160 if (trans->request_top_reset) { 161 161 trans->request_top_reset = 0; 162 - if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_SC) 162 + if (iwl_trans_is_top_reset_supported(trans)) 163 163 return IWL_RESET_MODE_TOP_RESET; 164 164 return IWL_RESET_MODE_PROD_RESET; 165 165 } 166 166 167 - if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_SC) { 168 - escalation_list = escalation_list_sc; 169 - escalation_list_size = ARRAY_SIZE(escalation_list_sc); 167 + if (iwl_trans_is_top_reset_supported(trans)) { 168 + escalation_list = escalation_list_top; 169 + escalation_list_size = ARRAY_SIZE(escalation_list_top); 170 170 } else { 171 171 escalation_list = escalation_list_old; 172 172 escalation_list_size = ARRAY_SIZE(escalation_list_old);
+19 -1
drivers/net/wireless/intel/iwlwifi/iwl-trans.h
··· 1088 1088 */ 1089 1089 trans->restart.during_reset = test_bit(STATUS_IN_SW_RESET, 1090 1090 &trans->status); 1091 - queue_delayed_work(system_unbound_wq, &trans->restart.wk, 0); 1091 + queue_delayed_work(system_dfl_wq, &trans->restart.wk, 0); 1092 1092 } 1093 1093 1094 1094 static inline void iwl_trans_fw_error(struct iwl_trans *trans, ··· 1257 1257 bool iwl_trans_is_pm_supported(struct iwl_trans *trans); 1258 1258 1259 1259 bool iwl_trans_is_ltr_enabled(struct iwl_trans *trans); 1260 + 1261 + static inline bool iwl_trans_is_top_reset_supported(struct iwl_trans *trans) 1262 + { 1263 + /* not supported before Sc family */ 1264 + if (trans->mac_cfg->device_family < IWL_DEVICE_FAMILY_SC) 1265 + return false; 1266 + 1267 + /* for Sc family only supported for Sc2/Sc2f */ 1268 + if (trans->mac_cfg->device_family == IWL_DEVICE_FAMILY_SC && 1269 + CSR_HW_REV_TYPE(trans->info.hw_rev) == IWL_CFG_MAC_TYPE_SC) 1270 + return false; 1271 + 1272 + /* so far these numbers are increasing - not before Pe */ 1273 + if (CSR_HW_RFID_TYPE(trans->info.hw_rf_id) < IWL_CFG_RF_TYPE_PE) 1274 + return false; 1275 + 1276 + return true; 1277 + } 1260 1278 1261 1279 #endif /* __iwl_trans_h__ */
-1
drivers/net/wireless/intel/iwlwifi/mld/constants.h
··· 36 36 #define IWL_MLD_PS_HEAVY_RX_THLD_PACKETS 8 37 37 38 38 #define IWL_MLD_TRIGGER_LINK_SEL_TIME_SEC 30 39 - #define IWL_MLD_SCAN_EXPIRE_TIME_SEC 20 40 39 41 40 #define IWL_MLD_TPT_COUNT_WINDOW (5 * HZ) 42 41
+1 -1
drivers/net/wireless/intel/iwlwifi/mld/fw.c
··· 513 513 return ret; 514 514 515 515 iwl_mld_init_tas(mld); 516 - iwl_mld_init_uats(mld); 516 + iwl_mld_init_ap_type_tables(mld); 517 517 518 518 return 0; 519 519 }
+17 -5
drivers/net/wireless/intel/iwlwifi/mld/iface.c
··· 61 61 static int iwl_mld_send_mac_cmd(struct iwl_mld *mld, 62 62 struct iwl_mac_config_cmd *cmd) 63 63 { 64 + u16 cmd_id = WIDE_ID(MAC_CONF_GROUP, MAC_CONFIG_CMD); 65 + int len = sizeof(*cmd); 64 66 int ret; 65 67 66 68 lockdep_assert_wiphy(mld->wiphy); 67 69 68 - ret = iwl_mld_send_cmd_pdu(mld, 69 - WIDE_ID(MAC_CONF_GROUP, MAC_CONFIG_CMD), 70 - cmd); 70 + if (iwl_fw_lookup_cmd_ver(mld->fw, cmd_id, 0) < 4) { 71 + if (WARN_ON(cmd->mac_type == cpu_to_le32(FW_MAC_TYPE_NAN))) 72 + return -EINVAL; 73 + 74 + len = sizeof(struct iwl_mac_config_cmd_v3); 75 + } 76 + 77 + ret = iwl_mld_send_cmd_pdu(mld, cmd_id, cmd, len); 71 78 if (ret) 72 79 IWL_ERR(mld, "Failed to send MAC_CONFIG_CMD ret = %d\n", ret); 73 80 74 81 return ret; 75 82 } 76 83 77 - int iwl_mld_mac80211_iftype_to_fw(const struct ieee80211_vif *vif) 84 + static int iwl_mld_mac80211_iftype_to_fw(const struct ieee80211_vif *vif) 78 85 { 79 86 switch (vif->type) { 80 87 case NL80211_IFTYPE_STATION: ··· 393 386 iwl_mld_int_mlo_scan(mld, iwl_mld_vif_to_mac80211(mld_vif)); 394 387 } 395 388 396 - IWL_MLD_ALLOC_FN(vif, vif) 389 + static IWL_MLD_ALLOC_FN(vif, vif) 397 390 398 391 /* Constructor function for struct iwl_mld_vif */ 399 392 static void ··· 404 397 lockdep_assert_wiphy(mld->wiphy); 405 398 406 399 mld_vif->mld = mld; 400 + mld_vif->fw_id = IWL_MLD_INVALID_FW_ID; 407 401 mld_vif->roc_activity = ROC_NUM_ACTIVITIES; 408 402 409 403 if (!mld->fw_status.in_hw_restart) { ··· 451 443 struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif); 452 444 453 445 lockdep_assert_wiphy(mld->wiphy); 446 + 447 + /* NAN interface type is not known to FW */ 448 + if (vif->type == NL80211_IFTYPE_NAN) 449 + return; 454 450 455 451 iwl_mld_mac_fw_action(mld, vif, FW_CTXT_ACTION_REMOVE); 456 452
+12 -3
drivers/net/wireless/intel/iwlwifi/mld/iface.h
··· 1 1 /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ 2 2 /* 3 - * Copyright (C) 2024-2025 Intel Corporation 3 + * Copyright (C) 2024-2026 Intel Corporation 4 4 */ 5 5 #ifndef __iwl_mld_iface_h__ 6 6 #define __iwl_mld_iface_h__ ··· 33 33 * there is an indication that a non-BSS interface is to be added. 34 34 * @IWL_MLD_EMLSR_BLOCKED_TPT: throughput is too low to make EMLSR worthwhile 35 35 * @IWL_MLD_EMLSR_BLOCKED_NAN: NAN is preventing EMLSR. 36 + * @IWL_MLD_EMLSR_BLOCKED_TDLS: TDLS connection is preventing EMLSR. 36 37 */ 37 38 enum iwl_mld_emlsr_blocked { 38 39 IWL_MLD_EMLSR_BLOCKED_PREVENTION = 0x1, ··· 43 42 IWL_MLD_EMLSR_BLOCKED_TMP_NON_BSS = 0x10, 44 43 IWL_MLD_EMLSR_BLOCKED_TPT = 0x20, 45 44 IWL_MLD_EMLSR_BLOCKED_NAN = 0x40, 45 + IWL_MLD_EMLSR_BLOCKED_TDLS = 0x80, 46 46 }; 47 47 48 48 /** ··· 203 201 return container_of((void *)mld_vif, struct ieee80211_vif, drv_priv); 204 202 } 205 203 204 + /* Call only for interfaces that were added to the driver! */ 205 + static inline bool iwl_mld_vif_fw_id_valid(struct iwl_mld_vif *mld_vif) 206 + { 207 + if (WARN_ON(mld_vif->fw_id >= ARRAY_SIZE(mld_vif->mld->fw_id_to_vif))) 208 + return false; 209 + 210 + return true; 211 + } 212 + 206 213 #define iwl_mld_link_dereference_check(mld_vif, link_id) \ 207 214 rcu_dereference_check((mld_vif)->link[link_id], \ 208 215 lockdep_is_held(&mld_vif->mld->wiphy->mtx)) ··· 229 218 230 219 return iwl_mld_link_dereference_check(mld_vif, bss_conf->link_id); 231 220 } 232 - 233 - int iwl_mld_mac80211_iftype_to_fw(const struct ieee80211_vif *vif); 234 221 235 222 /* Cleanup function for struct iwl_mld_vif, will be called in restart */ 236 223 void iwl_mld_cleanup_vif(void *data, u8 *mac, struct ieee80211_vif *vif);
+1 -1
drivers/net/wireless/intel/iwlwifi/mld/link.c
··· 437 437 iwl_mld_send_link_cmd(mld, &cmd, FW_CTXT_ACTION_REMOVE); 438 438 } 439 439 440 - IWL_MLD_ALLOC_FN(link, bss_conf) 440 + static IWL_MLD_ALLOC_FN(link, bss_conf) 441 441 442 442 /* Constructor function for struct iwl_mld_link */ 443 443 static int
+2
drivers/net/wireless/intel/iwlwifi/mld/link.h
··· 40 40 * @bcast_sta: station used for broadcast packets. Used in AP, GO and IBSS. 41 41 * @mcast_sta: station used for multicast packets. Used in AP, GO and IBSS. 42 42 * @mon_sta: station used for TX injection in monitor interface. 43 + * @last_cqm_rssi_event: rssi of the last cqm rssi event 43 44 * @average_beacon_energy: average beacon energy for beacons received during 44 45 * client connections 45 46 * @ap_early_keys: The firmware cannot install keys before bcast/mcast STAs, ··· 67 66 struct iwl_mld_int_sta bcast_sta; 68 67 struct iwl_mld_int_sta mcast_sta; 69 68 struct iwl_mld_int_sta mon_sta; 69 + int last_cqm_rssi_event; 70 70 71 71 /* we can only have 2 GTK + 2 IGTK + 2 BIGTK active at a time */ 72 72 struct ieee80211_key_conf *ap_early_keys[6];
+9 -4
drivers/net/wireless/intel/iwlwifi/mld/low_latency.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause 2 2 /* 3 - * Copyright (C) 2024-2025 Intel Corporation 3 + * Copyright (C) 2024-2026 Intel Corporation 4 4 */ 5 5 #include "mld.h" 6 6 #include "iface.h" ··· 77 77 bool prev = mld_vif->low_latency_causes & LOW_LATENCY_TRAFFIC; 78 78 bool low_latency; 79 79 80 - if (WARN_ON(mld_vif->fw_id >= ARRAY_SIZE(mld->low_latency.result))) 80 + if (!iwl_mld_vif_fw_id_valid(mld_vif)) 81 81 return; 82 + 83 + BUILD_BUG_ON(ARRAY_SIZE(mld->fw_id_to_vif) != 84 + ARRAY_SIZE(mld->low_latency.result)); 82 85 83 86 low_latency = mld->low_latency.result[mld_vif->fw_id]; 84 87 ··· 275 272 if (WARN_ON_ONCE(!mld->low_latency.pkts_counters)) 276 273 return; 277 274 278 - if (WARN_ON_ONCE(fw_id >= ARRAY_SIZE(counters->vo_vi) || 279 - queue >= mld->trans->info.num_rxqs)) 275 + if (!iwl_mld_vif_fw_id_valid(mld_vif)) 276 + return; 277 + 278 + if (WARN_ON_ONCE(queue >= mld->trans->info.num_rxqs)) 280 279 return; 281 280 282 281 if (mld->low_latency.stopped)
+48 -4
drivers/net/wireless/intel/iwlwifi/mld/mac80211.c
··· 754 754 mld->monitor.phy.valid = false; 755 755 } 756 756 757 + static 758 + int iwl_mld_mac80211_change_interface(struct ieee80211_hw *hw, 759 + struct ieee80211_vif *vif, 760 + enum nl80211_iftype new_type, bool p2p) 761 + { 762 + enum nl80211_iftype old_type = vif->type; 763 + bool old_p2p = vif->p2p; 764 + int ret; 765 + 766 + iwl_mld_mac80211_remove_interface(hw, vif); 767 + 768 + /* set the new type for adding it cleanly */ 769 + vif->type = new_type; 770 + vif->p2p = p2p; 771 + 772 + ret = iwl_mld_mac80211_add_interface(hw, vif); 773 + 774 + /* restore for mac80211, it will change it again */ 775 + vif->type = old_type; 776 + vif->p2p = old_p2p; 777 + 778 + return ret; 779 + } 780 + 757 781 struct iwl_mld_mc_iter_data { 758 782 struct iwl_mld *mld; 759 783 int port_id; ··· 1148 1124 1149 1125 /* Now activate the link */ 1150 1126 if (iwl_mld_can_activate_link(mld, vif, link)) { 1127 + iwl_mld_tlc_update_phy(mld, vif, link); 1128 + 1151 1129 ret = iwl_mld_activate_link(mld, link); 1152 1130 if (ret) 1153 1131 goto err; ··· 1210 1184 } 1211 1185 1212 1186 RCU_INIT_POINTER(mld_link->chan_ctx, NULL); 1187 + 1188 + iwl_mld_tlc_update_phy(mld, vif, link); 1213 1189 1214 1190 /* in the non-MLO case, remove/re-add the link to clean up FW state. 1215 1191 * In MLO, it'll be done in drv_change_vif_link ··· 1755 1727 return -EBUSY; 1756 1728 } 1757 1729 1758 - ret = iwl_mld_add_sta(mld, sta, vif, STATION_TYPE_PEER); 1730 + ret = iwl_mld_add_sta(mld, sta, vif); 1759 1731 if (ret) 1760 1732 return ret; 1761 1733 1762 - /* just added first TDLS STA, so disable PM */ 1763 - if (sta->tdls && tdls_count == 0) 1734 + /* just added first TDLS STA, so disable PM and block EMLSR */ 1735 + if (sta->tdls && tdls_count == 0) { 1764 1736 iwl_mld_update_mac_power(mld, vif, false); 1737 + 1738 + /* TDLS requires single-link operation with 1739 + * direct peer communication. 1740 + * Block and exit EMLSR when TDLS is established. 1741 + */ 1742 + iwl_mld_block_emlsr(mld, vif, 1743 + IWL_MLD_EMLSR_BLOCKED_TDLS, 1744 + iwl_mld_get_primary_link(vif)); 1745 + } 1765 1746 1766 1747 if (vif->type == NL80211_IFTYPE_STATION && !sta->tdls) 1767 1748 mld_vif->ap_sta = sta; ··· 1907 1870 iwl_mld_remove_sta(mld, sta); 1908 1871 1909 1872 if (sta->tdls && iwl_mld_tdls_sta_count(mld) == 0) { 1910 - /* just removed last TDLS STA, so enable PM */ 1873 + /* just removed last TDLS STA, so enable PM 1874 + * and unblock EMLSR 1875 + */ 1911 1876 iwl_mld_update_mac_power(mld, vif, false); 1877 + 1878 + /* Unblock EMLSR when TDLS connection is torn down */ 1879 + iwl_mld_unblock_emlsr(mld, vif, 1880 + IWL_MLD_EMLSR_BLOCKED_TDLS); 1912 1881 } 1913 1882 } else { 1914 1883 return -EINVAL; ··· 2759 2716 .get_antenna = iwl_mld_get_antenna, 2760 2717 .set_antenna = iwl_mld_set_antenna, 2761 2718 .add_interface = iwl_mld_mac80211_add_interface, 2719 + .change_interface = iwl_mld_mac80211_change_interface, 2762 2720 .remove_interface = iwl_mld_mac80211_remove_interface, 2763 2721 .conf_tx = iwl_mld_mac80211_conf_tx, 2764 2722 .prepare_multicast = iwl_mld_mac80211_prepare_multicast,
+3 -3
drivers/net/wireless/intel/iwlwifi/mld/mld.h
··· 205 205 struct iwl_mld { 206 206 /* Add here fields that need clean up on restart */ 207 207 struct_group(zeroed_on_hw_restart, 208 - struct ieee80211_bss_conf __rcu *fw_id_to_bss_conf[IWL_FW_MAX_LINK_ID + 1]; 208 + struct ieee80211_bss_conf __rcu *fw_id_to_bss_conf[IWL_FW_MAX_LINKS]; 209 209 struct ieee80211_vif __rcu *fw_id_to_vif[NUM_MAC_INDEX_DRIVER]; 210 210 struct ieee80211_txq __rcu *fw_id_to_txq[IWL_MAX_TVQM_QUEUES]; 211 211 u8 used_phy_ids: NUM_PHY_CTX; ··· 530 530 #define IWL_MLD_INVALID_FW_ID 0xff 531 531 532 532 #define IWL_MLD_ALLOC_FN(_type, _mac80211_type) \ 533 - static int \ 533 + int \ 534 534 iwl_mld_allocate_##_type##_fw_id(struct iwl_mld *mld, \ 535 - u8 *fw_id, \ 535 + u8 *fw_id, \ 536 536 struct ieee80211_##_mac80211_type *mac80211_ptr) \ 537 537 { \ 538 538 u8 rand = IWL_MLD_DIS_RANDOM_FW_ID ? 0 : get_random_u8(); \
+2 -2
drivers/net/wireless/intel/iwlwifi/mld/mlo.c
··· 13 13 HOW(NON_BSS) \ 14 14 HOW(TMP_NON_BSS) \ 15 15 HOW(TPT) \ 16 - HOW(NAN) 16 + HOW(NAN) \ 17 + HOW(TDLS) 17 18 18 19 static const char * 19 20 iwl_mld_get_emlsr_blocked_string(enum iwl_mld_emlsr_blocked blocked) ··· 111 110 } 112 111 113 112 #define IWL_MLD_TRIGGER_LINK_SEL_TIME (HZ * IWL_MLD_TRIGGER_LINK_SEL_TIME_SEC) 114 - #define IWL_MLD_SCAN_EXPIRE_TIME (HZ * IWL_MLD_SCAN_EXPIRE_TIME_SEC) 115 113 116 114 /* Exit reasons that can cause longer EMLSR prevention */ 117 115 #define IWL_MLD_PREVENT_EMLSR_REASONS (IWL_MLD_EMLSR_EXIT_MISSED_BEACON | \
+4 -1
drivers/net/wireless/intel/iwlwifi/mld/nan.h
··· 2 2 /* 3 3 * Copyright (C) 2025 Intel Corporation 4 4 */ 5 - 5 + #ifndef __iwl_mld_nan_h__ 6 + #define __iwl_mld_nan_h__ 6 7 #include <net/cfg80211.h> 7 8 #include <linux/etherdevice.h> 8 9 ··· 27 26 bool iwl_mld_cancel_nan_dw_end_notif(struct iwl_mld *mld, 28 27 struct iwl_rx_packet *pkt, 29 28 u32 obj_id); 29 + 30 + #endif /* __iwl_mld_nan_h__ */
+2 -2
drivers/net/wireless/intel/iwlwifi/mld/phy.h
··· 32 32 }; 33 33 34 34 static inline struct iwl_mld_phy * 35 - iwl_mld_phy_from_mac80211(struct ieee80211_chanctx_conf *channel) 35 + iwl_mld_phy_from_mac80211(struct ieee80211_chanctx_conf *chanctx) 36 36 { 37 - return (void *)channel->drv_priv; 37 + return (void *)chanctx->drv_priv; 38 38 } 39 39 40 40 /* Cleanup function for struct iwl_mld_phy, will be called in restart */
+4 -1
drivers/net/wireless/intel/iwlwifi/mld/power.c
··· 405 405 .common.set_mode = cpu_to_le32(IWL_TX_POWER_MODE_SET_LINK), 406 406 .common.pwr_restriction = cpu_to_le16(u_tx_power), 407 407 }; 408 - int len = sizeof(cmd.common) + sizeof(cmd.v10); 408 + int len = sizeof(cmd.common) + sizeof(cmd.v11); 409 + 410 + if (iwl_fw_lookup_cmd_ver(mld->fw, cmd_id, 10) == 10) 411 + len = sizeof(cmd.common) + sizeof(cmd.v10); 409 412 410 413 if (WARN_ON(!mld_link)) 411 414 return -ENODEV;
+3 -1
drivers/net/wireless/intel/iwlwifi/mld/ptp.c
··· 301 301 mld->ptp_data.ptp_clock = 302 302 ptp_clock_register(&mld->ptp_data.ptp_clock_info, mld->dev); 303 303 304 - if (IS_ERR_OR_NULL(mld->ptp_data.ptp_clock)) { 304 + if (IS_ERR(mld->ptp_data.ptp_clock)) { 305 305 IWL_ERR(mld, "Failed to register PHC clock (%ld)\n", 306 306 PTR_ERR(mld->ptp_data.ptp_clock)); 307 307 mld->ptp_data.ptp_clock = NULL; 308 + } else if (!mld->ptp_data.ptp_clock) { 309 + IWL_DEBUG_INFO(mld, "PTP module unavailable on this kernel\n"); 308 310 } else { 309 311 IWL_DEBUG_INFO(mld, "Registered PHC clock: %s, with index: %d\n", 310 312 mld->ptp_data.ptp_clock_info.name,
+148 -30
drivers/net/wireless/intel/iwlwifi/mld/regulatory.c
··· 64 64 } 65 65 66 66 iwl_uefi_get_uats_table(mld->trans, &mld->fwrt); 67 + iwl_uefi_get_uneb_table(mld->trans, &mld->fwrt); 67 68 68 69 iwl_bios_get_phy_filters(&mld->fwrt); 69 70 } ··· 73 72 { 74 73 u32 cmd_id = WIDE_ID(PHY_OPS_GROUP, PER_CHAIN_LIMIT_OFFSET_CMD); 75 74 /* Only set to South Korea if the table revision is 1 */ 76 - __le32 sk = cpu_to_le32(mld->fwrt.geo_rev == 1 ? 1 : 0); 75 + u8 sk = mld->fwrt.geo_rev == 1 ? 1 : 0; 77 76 union iwl_geo_tx_power_profiles_cmd cmd = { 78 77 .v5.ops = cpu_to_le32(IWL_PER_CHAIN_OFFSET_SET_TABLES), 79 - .v5.table_revision = sk, 80 78 }; 79 + u32 cmd_ver = iwl_fw_lookup_cmd_ver(mld->fw, cmd_id, 0); 80 + int n_subbands; 81 + int cmd_size; 81 82 int ret; 82 83 83 - ret = iwl_sar_geo_fill_table(&mld->fwrt, &cmd.v5.table[0][0], 84 - ARRAY_SIZE(cmd.v5.table[0]), 85 - BIOS_GEO_MAX_PROFILE_NUM); 84 + switch (cmd_ver) { 85 + case 5: 86 + n_subbands = ARRAY_SIZE(cmd.v5.table[0]); 87 + cmd.v5.table_revision = cpu_to_le32(sk); 88 + cmd_size = sizeof(cmd.v5); 89 + break; 90 + case 6: 91 + n_subbands = ARRAY_SIZE(cmd.v6.table[0]); 92 + cmd.v6.bios_hdr.table_revision = mld->fwrt.geo_rev; 93 + cmd.v6.bios_hdr.table_source = mld->fwrt.geo_bios_source; 94 + cmd_size = sizeof(cmd.v6); 95 + break; 96 + default: 97 + WARN(false, "unsupported version: %d", cmd_ver); 98 + return -EINVAL; 99 + } 100 + 101 + BUILD_BUG_ON(offsetof(typeof(cmd), v6.table) != 102 + offsetof(typeof(cmd), v5.table)); 103 + ret = iwl_sar_geo_fill_table(&mld->fwrt, &cmd.v6.table[0][0], 104 + n_subbands, BIOS_GEO_MAX_PROFILE_NUM); 86 105 87 106 /* It is a valid scenario to not support SAR, or miss wgds table, 88 107 * but in that case there is no need to send the command. ··· 110 89 if (ret) 111 90 return 0; 112 91 113 - return iwl_mld_send_cmd_pdu(mld, cmd_id, &cmd, sizeof(cmd.v5)); 92 + return iwl_mld_send_cmd_pdu(mld, cmd_id, &cmd, cmd_size); 114 93 } 115 94 116 95 int iwl_mld_config_sar_profile(struct iwl_mld *mld, int prof_a, int prof_b) 117 96 { 118 - u32 cmd_id = REDUCE_TX_POWER_CMD; 119 97 struct iwl_dev_tx_power_cmd cmd = { 120 98 .common.set_mode = cpu_to_le32(IWL_TX_POWER_MODE_SET_CHAINS), 121 - .v10.flags = cpu_to_le32(mld->fwrt.reduced_power_flags), 122 99 }; 100 + u8 cmd_ver = iwl_fw_lookup_cmd_ver(mld->fw, REDUCE_TX_POWER_CMD, 10); 101 + int num_subbands; 102 + int cmd_size; 123 103 int ret; 124 104 105 + switch (cmd_ver) { 106 + case 10: 107 + cmd.v10.flags = cpu_to_le32(mld->fwrt.reduced_power_flags); 108 + cmd_size = sizeof(cmd.common) + sizeof(cmd.v10); 109 + num_subbands = IWL_NUM_SUB_BANDS_V2; 110 + break; 111 + case 11: 112 + cmd.v11.flags = cpu_to_le32(mld->fwrt.reduced_power_flags); 113 + cmd_size = sizeof(cmd.common) + sizeof(cmd.v11); 114 + num_subbands = IWL_NUM_SUB_BANDS_V3; 115 + break; 116 + default: 117 + WARN_ONCE(1, "Bad version for REDUCE_TX_POWER_CMD: %d\n", 118 + cmd_ver); 119 + return -EOPNOTSUPP; 120 + } 121 + 125 122 /* TODO: CDB - support IWL_NUM_CHAIN_TABLES_V2 */ 126 - ret = iwl_sar_fill_profile(&mld->fwrt, &cmd.v10.per_chain[0][0][0], 127 - IWL_NUM_CHAIN_TABLES, IWL_NUM_SUB_BANDS_V2, 123 + /* v10 and v11 have the same position for per_chain */ 124 + BUILD_BUG_ON(offsetof(typeof(cmd), v11.per_chain) != 125 + offsetof(typeof(cmd), v10.per_chain)); 126 + ret = iwl_sar_fill_profile(&mld->fwrt, &cmd.v11.per_chain[0][0][0], 127 + IWL_NUM_CHAIN_TABLES, num_subbands, 128 128 prof_a, prof_b); 129 129 /* return on error or if the profile is disabled (positive number) */ 130 130 if (ret) 131 131 return ret; 132 132 133 - return iwl_mld_send_cmd_pdu(mld, cmd_id, &cmd, 134 - sizeof(cmd.common) + sizeof(cmd.v10)); 133 + return iwl_mld_send_cmd_pdu(mld, REDUCE_TX_POWER_CMD, &cmd, cmd_size); 135 134 } 136 135 137 136 int iwl_mld_init_sar(struct iwl_mld *mld) ··· 206 165 { 207 166 struct iwl_fw_runtime *fwrt = &mld->fwrt; 208 167 union iwl_ppag_table_cmd cmd = { 209 - .v7.ppag_config_info.hdr.table_source = fwrt->ppag_bios_source, 210 - .v7.ppag_config_info.hdr.table_revision = fwrt->ppag_bios_rev, 211 - .v7.ppag_config_info.value = cpu_to_le32(fwrt->ppag_flags), 168 + /* v7 and v8 have the same layout for the ppag_config_info */ 169 + .v8.ppag_config_info.hdr.table_source = fwrt->ppag_bios_source, 170 + .v8.ppag_config_info.hdr.table_revision = fwrt->ppag_bios_rev, 171 + .v8.ppag_config_info.value = cpu_to_le32(fwrt->ppag_flags), 212 172 }; 173 + u32 cmd_id = WIDE_ID(PHY_OPS_GROUP, PER_PLATFORM_ANT_GAIN_CMD); 174 + int cmd_ver = iwl_fw_lookup_cmd_ver(mld->fw, cmd_id, 1); 175 + int cmd_len = sizeof(cmd.v8); 176 + u8 cmd_bios_rev; 213 177 int ret; 178 + 179 + BUILD_BUG_ON(offsetof(typeof(cmd), v8.ppag_config_info.hdr) != 180 + offsetof(typeof(cmd), v7.ppag_config_info.hdr)); 181 + BUILD_BUG_ON(offsetof(typeof(cmd), v8.gain) != 182 + offsetof(typeof(cmd), v7.gain)); 183 + 184 + BUILD_BUG_ON(ARRAY_SIZE(cmd.v7.gain) > ARRAY_SIZE(fwrt->ppag_chains)); 185 + BUILD_BUG_ON(ARRAY_SIZE(cmd.v7.gain[0]) > 186 + ARRAY_SIZE(fwrt->ppag_chains[0].subbands)); 187 + BUILD_BUG_ON(ARRAY_SIZE(cmd.v8.gain) > ARRAY_SIZE(fwrt->ppag_chains)); 188 + BUILD_BUG_ON(ARRAY_SIZE(cmd.v8.gain[0]) > 189 + ARRAY_SIZE(fwrt->ppag_chains[0].subbands)); 214 190 215 191 IWL_DEBUG_RADIO(fwrt, 216 192 "PPAG MODE bits going to be sent: %d\n", 217 193 fwrt->ppag_flags); 218 194 219 - for (int chain = 0; chain < IWL_NUM_CHAIN_LIMITS; chain++) { 220 - for (int subband = 0; subband < IWL_NUM_SUB_BANDS_V2; subband++) { 221 - cmd.v7.gain[chain][subband] = 222 - fwrt->ppag_chains[chain].subbands[subband]; 223 - IWL_DEBUG_RADIO(fwrt, 224 - "PPAG table: chain[%d] band[%d]: gain = %d\n", 225 - chain, subband, cmd.v7.gain[chain][subband]); 195 + /* Since ver 7 will be deprecated at some point, don't bother making 196 + * this code generic for both ver 7 and ver 8: duplicate the code. 197 + */ 198 + if (cmd_ver == 7) { 199 + for (int chain = 0; chain < ARRAY_SIZE(cmd.v7.gain); chain++) { 200 + for (int subband = 0; 201 + subband < ARRAY_SIZE(cmd.v7.gain[0]); 202 + subband++) { 203 + cmd.v7.gain[chain][subband] = 204 + fwrt->ppag_chains[chain].subbands[subband]; 205 + IWL_DEBUG_RADIO(fwrt, 206 + "PPAG table: chain[%d] band[%d]: gain = %d\n", 207 + chain, subband, 208 + cmd.v7.gain[chain][subband]); 209 + } 226 210 } 211 + cmd_len = sizeof(cmd.v7); 212 + cmd_bios_rev = 213 + iwl_fw_lookup_cmd_bios_supported_revision(fwrt->fw, 214 + fwrt->ppag_bios_source, 215 + cmd_id, 4); 216 + } else if (cmd_ver == 8) { 217 + for (int chain = 0; chain < ARRAY_SIZE(cmd.v8.gain); chain++) { 218 + for (int subband = 0; 219 + subband < ARRAY_SIZE(cmd.v8.gain[0]); 220 + subband++) { 221 + cmd.v8.gain[chain][subband] = 222 + fwrt->ppag_chains[chain].subbands[subband]; 223 + IWL_DEBUG_RADIO(fwrt, 224 + "PPAG table: chain[%d] band[%d]: gain = %d\n", 225 + chain, subband, 226 + cmd.v8.gain[chain][subband]); 227 + } 228 + } 229 + cmd_bios_rev = 230 + iwl_fw_lookup_cmd_bios_supported_revision(fwrt->fw, 231 + fwrt->ppag_bios_source, 232 + cmd_id, 5); 233 + } else { 234 + WARN(1, "Bad version for PER_PLATFORM_ANT_GAIN_CMD %d\n", 235 + cmd_ver); 236 + return -EINVAL; 237 + } 238 + 239 + if (cmd_bios_rev < fwrt->ppag_bios_rev) { 240 + IWL_ERR(mld, 241 + "BIOS revision compatibility check failed - Supported: %d, Current: %d\n", 242 + cmd_bios_rev, fwrt->ppag_bios_rev); 243 + return 0; 227 244 } 228 245 229 246 IWL_DEBUG_RADIO(mld, "Sending PER_PLATFORM_ANT_GAIN_CMD\n"); 230 - ret = iwl_mld_send_cmd_pdu(mld, WIDE_ID(PHY_OPS_GROUP, 231 - PER_PLATFORM_ANT_GAIN_CMD), 232 - &cmd, sizeof(cmd.v7)); 247 + ret = iwl_mld_send_cmd_pdu(mld, cmd_id, &cmd, cmd_len); 233 248 if (ret < 0) 234 249 IWL_ERR(mld, "failed to send PER_PLATFORM_ANT_GAIN_CMD (%d)\n", 235 250 ret); ··· 449 352 ret); 450 353 } 451 354 452 - void iwl_mld_init_uats(struct iwl_mld *mld) 355 + void iwl_mld_init_ap_type_tables(struct iwl_mld *mld) 453 356 { 454 357 int ret; 455 358 struct iwl_host_cmd cmd = { 456 359 .id = WIDE_ID(REGULATORY_AND_NVM_GROUP, 457 360 MCC_ALLOWED_AP_TYPE_CMD), 458 - .data[0] = &mld->fwrt.uats_table, 459 - .len[0] = sizeof(mld->fwrt.uats_table), 361 + .data[0] = &mld->fwrt.ap_type_cmd, 362 + .len[0] = sizeof(mld->fwrt.ap_type_cmd), 460 363 .dataflags[0] = IWL_HCMD_DFL_NOCOPY, 461 364 }; 462 365 463 - if (!mld->fwrt.uats_valid) 366 + if (!mld->fwrt.ap_type_cmd_valid) 464 367 return; 465 368 466 - ret = iwl_mld_send_cmd(mld, &cmd); 369 + if (iwl_fw_lookup_cmd_ver(mld->fw, cmd.id, 1) == 1) { 370 + struct iwl_mcc_allowed_ap_type_cmd_v1 *cmd_v1 = 371 + kzalloc(sizeof(*cmd_v1), GFP_KERNEL); 372 + 373 + if (!cmd_v1) 374 + return; 375 + 376 + BUILD_BUG_ON(sizeof(mld->fwrt.ap_type_cmd.mcc_to_ap_type_map) != 377 + sizeof(cmd_v1->mcc_to_ap_type_map)); 378 + 379 + memcpy(cmd_v1->mcc_to_ap_type_map, 380 + mld->fwrt.ap_type_cmd.mcc_to_ap_type_map, 381 + sizeof(mld->fwrt.ap_type_cmd.mcc_to_ap_type_map)); 382 + 383 + cmd.data[0] = cmd_v1; 384 + cmd.len[0] = sizeof(*cmd_v1); 385 + ret = iwl_mld_send_cmd(mld, &cmd); 386 + kfree(cmd_v1); 387 + } else { 388 + ret = iwl_mld_send_cmd(mld, &cmd); 389 + } 390 + 467 391 if (ret) 468 392 IWL_ERR(mld, "failed to send MCC_ALLOWED_AP_TYPE_CMD (%d)\n", 469 393 ret);
+1 -1
drivers/net/wireless/intel/iwlwifi/mld/regulatory.h
··· 9 9 10 10 void iwl_mld_get_bios_tables(struct iwl_mld *mld); 11 11 void iwl_mld_configure_lari(struct iwl_mld *mld); 12 - void iwl_mld_init_uats(struct iwl_mld *mld); 12 + void iwl_mld_init_ap_type_tables(struct iwl_mld *mld); 13 13 void iwl_mld_init_tas(struct iwl_mld *mld); 14 14 15 15 int iwl_mld_init_ppag(struct iwl_mld *mld);
+14 -11
drivers/net/wireless/intel/iwlwifi/mld/rx.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause 2 2 /* 3 - * Copyright (C) 2024-2025 Intel Corporation 3 + * Copyright (C) 2024-2026 Intel Corporation 4 4 */ 5 5 6 6 #include <net/mac80211.h> ··· 791 791 iwl_mld_radiotap_put_tlv(struct sk_buff *skb, u16 type, u16 len) 792 792 { 793 793 struct ieee80211_radiotap_tlv *tlv; 794 + struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb); 795 + 796 + rx_status->flag |= RX_FLAG_RADIOTAP_TLV_AT_END; 794 797 795 798 tlv = skb_put(skb, sizeof(*tlv)); 796 799 tlv->type = cpu_to_le16(type); ··· 1237 1234 1238 1235 eht = iwl_mld_radiotap_put_tlv(skb, IEEE80211_RADIOTAP_EHT, eht_len); 1239 1236 1240 - rx_status->flag |= RX_FLAG_RADIOTAP_TLV_AT_END; 1241 - 1242 1237 switch (u32_get_bits(rate_n_flags, RATE_MCS_HE_GI_LTF_MSK)) { 1243 1238 case 0: 1244 1239 if (he_type == RATE_MCS_HE_TYPE_TRIG) { ··· 1330 1329 static void iwl_mld_add_rtap_sniffer_config(struct iwl_mld *mld, 1331 1330 struct sk_buff *skb) 1332 1331 { 1333 - struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb); 1334 1332 struct ieee80211_radiotap_vendor_content *radiotap; 1335 1333 const u16 vendor_data_len = sizeof(mld->monitor.cur_aid); 1336 1334 ··· 1353 1353 /* fill the data now */ 1354 1354 memcpy(radiotap->data, &mld->monitor.cur_aid, 1355 1355 sizeof(mld->monitor.cur_aid)); 1356 - 1357 - rx_status->flag |= RX_FLAG_RADIOTAP_TLV_AT_END; 1358 1356 } 1359 1357 #endif 1360 1358 ··· 1360 1362 struct sk_buff *skb, 1361 1363 struct iwl_rx_phy_air_sniffer_ntfy *ntfy) 1362 1364 { 1363 - struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb); 1364 1365 struct ieee80211_radiotap_vendor_content *radiotap; 1365 1366 const u16 vendor_data_len = sizeof(*ntfy); 1366 1367 ··· 1379 1382 1380 1383 /* fill the data now */ 1381 1384 memcpy(radiotap->data, ntfy, vendor_data_len); 1382 - 1383 - rx_status->flag |= RX_FLAG_RADIOTAP_TLV_AT_END; 1384 1385 } 1385 1386 1386 1387 static void ··· 1402 1407 u32 rate_n_flags = phy_data->rate_n_flags; 1403 1408 u8 stbc = u32_get_bits(rate_n_flags, RATE_MCS_STBC_MSK); 1404 1409 u32 format = rate_n_flags & RATE_MCS_MOD_TYPE_MSK; 1410 + u32 he_type = u32_get_bits(rate_n_flags, RATE_MCS_HE_TYPE_MSK); 1405 1411 bool is_sgi = rate_n_flags & RATE_MCS_SGI_MSK; 1406 1412 1407 1413 /* bandwidth may be overridden to RU by PHY ntfy */ ··· 1476 1480 case RATE_MCS_MOD_TYPE_EHT: 1477 1481 rx_status->encoding = RX_ENC_EHT; 1478 1482 iwl_mld_set_rx_nonlegacy_rate_info(rate_n_flags, rx_status); 1483 + break; 1484 + case RATE_MCS_MOD_TYPE_UHR: 1485 + rx_status->encoding = RX_ENC_UHR; 1486 + iwl_mld_set_rx_nonlegacy_rate_info(rate_n_flags, rx_status); 1487 + if (he_type == RATE_MCS_HE_TYPE_UHR_ELR) 1488 + rx_status->uhr.elr = 1; 1479 1489 break; 1480 1490 default: 1481 1491 WARN_ON_ONCE(1); ··· 2206 2204 ret = wait_event_timeout(mld->rxq_sync.waitq, 2207 2205 READ_ONCE(mld->rxq_sync.state) == 0, 2208 2206 SYNC_RX_QUEUE_TIMEOUT); 2209 - WARN_ONCE(!ret, "RXQ sync failed: state=0x%lx, cookie=%d\n", 2210 - mld->rxq_sync.state, mld->rxq_sync.cookie); 2207 + IWL_FW_CHECK(mld, !ret, 2208 + "RXQ sync failed: state=0x%lx, cookie=%d\n", 2209 + mld->rxq_sync.state, mld->rxq_sync.cookie); 2211 2210 2212 2211 out: 2213 2212 mld->rxq_sync.state = 0;
+169 -55
drivers/net/wireless/intel/iwlwifi/mld/scan.c
··· 47 47 /* adaptive dwell number of APs override mask for social channels */ 48 48 #define IWL_SCAN_ADWELL_N_APS_SOCIAL_CHS_BIT BIT(21) 49 49 50 - #define SCAN_TIMEOUT_MSEC (30000 * HZ) 51 - 52 50 /* minimal number of 2GHz and 5GHz channels in the regular scan request */ 53 51 #define IWL_MLD_6GHZ_PASSIVE_SCAN_MIN_CHANS 4 54 52 ··· 112 114 bool scan_6ghz; 113 115 bool enable_6ghz_passive; 114 116 u8 bssid[ETH_ALEN] __aligned(2); 117 + }; 118 + 119 + struct iwl_scan_req_params_ptrs { 120 + struct iwl_scan_general_params_v11 *general_params; 121 + struct iwl_scan_channel_params_v8 *channel_params; 122 + struct iwl_scan_periodic_parms_v1 *periodic_params; 123 + struct iwl_scan_probe_params_v4 *probe_params; 115 124 }; 116 125 117 126 struct iwl_mld_scan_respect_p2p_go_iter_data { ··· 517 512 518 513 static void 519 514 iwl_mld_scan_cmd_set_dwell(struct iwl_mld *mld, 520 - struct iwl_scan_general_params_v11 *gp, 521 - struct iwl_mld_scan_params *params) 515 + struct iwl_mld_scan_params *params, 516 + struct iwl_scan_req_params_ptrs *scan_ptrs) 522 517 { 518 + struct iwl_scan_general_params_v11 *gp = scan_ptrs->general_params; 523 519 const struct iwl_mld_scan_timing_params *timing = 524 520 &scan_timing[params->type]; 525 521 ··· 557 551 iwl_mld_scan_cmd_set_gen_params(struct iwl_mld *mld, 558 552 struct iwl_mld_scan_params *params, 559 553 struct ieee80211_vif *vif, 560 - struct iwl_scan_general_params_v11 *gp, 554 + struct iwl_scan_req_params_ptrs *scan_ptrs, 561 555 enum iwl_mld_scan_status scan_status) 562 556 { 557 + struct iwl_scan_general_params_v11 *gp = scan_ptrs->general_params; 563 558 u16 gen_flags = iwl_mld_scan_get_cmd_gen_flags(mld, params, vif, 564 559 scan_status); 565 560 u8 gen_flags2 = iwl_mld_scan_get_cmd_gen_flags2(mld, params, vif, ··· 573 566 gp->flags = cpu_to_le16(gen_flags); 574 567 gp->flags2 = gen_flags2; 575 568 576 - iwl_mld_scan_cmd_set_dwell(mld, gp, params); 569 + iwl_mld_scan_cmd_set_dwell(mld, params, scan_ptrs); 577 570 578 571 if (gen_flags & IWL_UMAC_SCAN_GEN_FLAGS_V2_FRAGMENTED_LMAC1) 579 572 gp->num_of_fragments[SCAN_LB_LMAC_IDX] = IWL_SCAN_NUM_OF_FRAGS; ··· 584 577 585 578 static int 586 579 iwl_mld_scan_cmd_set_sched_params(struct iwl_mld_scan_params *params, 587 - struct iwl_scan_umac_schedule *schedule, 588 - __le16 *delay) 580 + struct iwl_scan_req_params_ptrs *scan_ptrs) 589 581 { 582 + struct iwl_scan_umac_schedule *schedule = 583 + scan_ptrs->periodic_params->schedule; 584 + __le16 *delay = &scan_ptrs->periodic_params->delay; 585 + 590 586 if (WARN_ON(!params->n_scan_plans || 591 587 params->n_scan_plans > IWL_MAX_SCHED_SCAN_PLANS)) 592 588 return -EINVAL; ··· 667 657 668 658 static void 669 659 iwl_mld_scan_fill_6g_chan_list(struct iwl_mld_scan_params *params, 670 - struct iwl_scan_probe_params_v4 *pp) 660 + struct iwl_scan_req_params_ptrs *scan_ptrs) 671 661 { 672 662 int j, idex_s = 0, idex_b = 0; 673 663 struct cfg80211_scan_6ghz_params *scan_6ghz_params = 674 664 params->scan_6ghz_params; 665 + struct iwl_scan_probe_params_v4 *pp = scan_ptrs->probe_params; 675 666 676 667 for (j = 0; 677 668 j < params->n_ssids && idex_s < SCAN_SHORT_SSID_MAX_SIZE; ··· 736 725 737 726 static void 738 727 iwl_mld_scan_cmd_set_probe_params(struct iwl_mld_scan_params *params, 739 - struct iwl_scan_probe_params_v4 *pp, 728 + struct iwl_scan_req_params_ptrs *scan_ptrs, 740 729 u32 *bitmap_ssid) 741 730 { 731 + struct iwl_scan_probe_params_v4 *pp = scan_ptrs->probe_params; 732 + 742 733 pp->preq = params->preq; 743 734 744 735 if (params->scan_6ghz) { 745 - iwl_mld_scan_fill_6g_chan_list(params, pp); 736 + iwl_mld_scan_fill_6g_chan_list(params, scan_ptrs); 746 737 return; 747 738 } 748 739 ··· 834 821 static void 835 822 iwl_mld_scan_cmd_set_channels(struct iwl_mld *mld, 836 823 struct ieee80211_channel **channels, 837 - struct iwl_scan_channel_params_v7 *cp, 824 + struct iwl_scan_req_params_ptrs *scan_ptrs, 838 825 int n_channels, u32 flags, 839 826 enum nl80211_iftype vif_type) 840 827 { 828 + struct iwl_scan_channel_params_v8 *cp = scan_ptrs->channel_params; 829 + 841 830 for (int i = 0; i < n_channels; i++) { 842 831 enum nl80211_band band = channels[i]->band; 843 832 struct iwl_scan_channel_cfg_umac *cfg = &cp->channel_config[i]; ··· 877 862 iwl_mld_scan_cfg_channels_6g(struct iwl_mld *mld, 878 863 struct iwl_mld_scan_params *params, 879 864 u32 n_channels, 880 - struct iwl_scan_probe_params_v4 *pp, 881 - struct iwl_scan_channel_params_v7 *cp, 865 + struct iwl_scan_req_params_ptrs *scan_ptrs, 882 866 enum nl80211_iftype vif_type) 883 867 { 868 + struct iwl_scan_probe_params_v4 *pp = scan_ptrs->probe_params; 869 + struct iwl_scan_channel_params_v8 *cp = scan_ptrs->channel_params; 884 870 struct cfg80211_scan_6ghz_params *scan_6ghz_params = 885 871 params->scan_6ghz_params; 886 872 u32 i; ··· 1079 1063 iwl_mld_scan_cmd_set_6ghz_chan_params(struct iwl_mld *mld, 1080 1064 struct iwl_mld_scan_params *params, 1081 1065 struct ieee80211_vif *vif, 1082 - struct iwl_scan_req_params_v17 *scan_p) 1066 + struct iwl_scan_req_params_ptrs *scan_ptrs) 1083 1067 { 1084 - struct iwl_scan_channel_params_v7 *chan_p = &scan_p->channel_params; 1085 - struct iwl_scan_probe_params_v4 *probe_p = &scan_p->probe_params; 1068 + struct iwl_scan_channel_params_v8 *cp = scan_ptrs->channel_params; 1086 1069 1087 1070 /* Explicitly clear the flags since most of them are not 1088 1071 * relevant for 6 GHz scan. 1089 1072 */ 1090 - chan_p->flags = 0; 1091 - chan_p->count = iwl_mld_scan_cfg_channels_6g(mld, params, 1092 - params->n_channels, 1093 - probe_p, chan_p, 1094 - vif->type); 1095 - if (!chan_p->count) 1073 + cp->flags = 0; 1074 + cp->count = iwl_mld_scan_cfg_channels_6g(mld, params, 1075 + params->n_channels, 1076 + scan_ptrs, vif->type); 1077 + if (!cp->count) 1096 1078 return -EINVAL; 1097 1079 1098 1080 if (!params->n_ssids || 1099 1081 (params->n_ssids == 1 && !params->ssids[0].ssid_len)) 1100 - chan_p->flags |= IWL_SCAN_CHANNEL_FLAG_6G_PSC_NO_FILTER; 1082 + cp->flags |= IWL_SCAN_CHANNEL_FLAG_6G_PSC_NO_FILTER; 1101 1083 1102 1084 return 0; 1103 1085 } ··· 1104 1090 iwl_mld_scan_cmd_set_chan_params(struct iwl_mld *mld, 1105 1091 struct iwl_mld_scan_params *params, 1106 1092 struct ieee80211_vif *vif, 1107 - struct iwl_scan_req_params_v17 *scan_p, 1093 + struct iwl_scan_req_params_ptrs *scan_ptrs, 1108 1094 bool low_latency, 1109 1095 enum iwl_mld_scan_status scan_status, 1110 1096 u32 channel_cfg_flags) 1111 1097 { 1112 - struct iwl_scan_channel_params_v7 *cp = &scan_p->channel_params; 1098 + struct iwl_scan_channel_params_v8 *cp = scan_ptrs->channel_params; 1113 1099 struct ieee80211_supported_band *sband = 1114 1100 &mld->nvm_data->bands[NL80211_BAND_6GHZ]; 1115 1101 ··· 1121 1107 1122 1108 if (params->scan_6ghz) 1123 1109 return iwl_mld_scan_cmd_set_6ghz_chan_params(mld, params, 1124 - vif, scan_p); 1110 + vif, scan_ptrs); 1125 1111 1126 1112 /* relevant only for 2.4 GHz/5 GHz scan */ 1127 1113 cp->flags = iwl_mld_scan_cmd_set_chan_flags(mld, params, vif, 1128 1114 low_latency); 1129 1115 cp->count = params->n_channels; 1130 1116 1131 - iwl_mld_scan_cmd_set_channels(mld, params->channels, cp, 1117 + iwl_mld_scan_cmd_set_channels(mld, params->channels, scan_ptrs, 1132 1118 params->n_channels, channel_cfg_flags, 1133 1119 vif->type); 1134 1120 ··· 1158 1144 return 0; 1159 1145 } 1160 1146 1147 + struct iwl_scan_umac_handler { 1148 + u8 version; 1149 + int (*handler)(struct iwl_mld *mld, struct ieee80211_vif *vif, 1150 + struct iwl_mld_scan_params *params, 1151 + enum iwl_mld_scan_status scan_status, 1152 + int uid, u32 ooc_priority, bool low_latency); 1153 + }; 1154 + 1155 + #define IWL_SCAN_UMAC_HANDLER(_ver) { \ 1156 + .version = _ver, \ 1157 + .handler = iwl_mld_scan_umac_v##_ver, \ 1158 + } 1159 + 1160 + static int iwl_mld_scan_umac_common(struct iwl_mld *mld, 1161 + struct ieee80211_vif *vif, 1162 + struct iwl_mld_scan_params *params, 1163 + struct iwl_scan_req_params_ptrs *scan_ptrs, 1164 + enum iwl_mld_scan_status scan_status, 1165 + bool low_latency) 1166 + { 1167 + u32 bitmap_ssid = 0; 1168 + int ret; 1169 + 1170 + iwl_mld_scan_cmd_set_gen_params(mld, params, vif, scan_ptrs, 1171 + scan_status); 1172 + 1173 + ret = iwl_mld_scan_cmd_set_sched_params(params, scan_ptrs); 1174 + if (ret) 1175 + return ret; 1176 + 1177 + iwl_mld_scan_cmd_set_probe_params(params, scan_ptrs, &bitmap_ssid); 1178 + 1179 + return iwl_mld_scan_cmd_set_chan_params(mld, params, vif, scan_ptrs, 1180 + low_latency, scan_status, 1181 + bitmap_ssid); 1182 + } 1183 + 1184 + static int iwl_mld_scan_umac_v18(struct iwl_mld *mld, struct ieee80211_vif *vif, 1185 + struct iwl_mld_scan_params *params, 1186 + enum iwl_mld_scan_status scan_status, 1187 + int uid, u32 ooc_priority, bool low_latency) 1188 + { 1189 + struct iwl_scan_req_umac_v18 *cmd = mld->scan.cmd; 1190 + struct iwl_scan_req_params_ptrs scan_ptrs = { 1191 + .general_params = &cmd->scan_params.general_params, 1192 + .probe_params = &cmd->scan_params.probe_params, 1193 + .channel_params = &cmd->scan_params.channel_params, 1194 + .periodic_params = &cmd->scan_params.periodic_params 1195 + }; 1196 + int ret; 1197 + 1198 + if (WARN_ON(params->n_channels > SCAN_MAX_NUM_CHANS_V4)) 1199 + return -EINVAL; 1200 + 1201 + cmd->uid = cpu_to_le32(uid); 1202 + cmd->ooc_priority = cpu_to_le32(ooc_priority); 1203 + 1204 + ret = iwl_mld_scan_umac_common(mld, vif, params, &scan_ptrs, 1205 + scan_status, low_latency); 1206 + if (ret) 1207 + return ret; 1208 + 1209 + return uid; 1210 + } 1211 + 1212 + static int iwl_mld_scan_umac_v17(struct iwl_mld *mld, struct ieee80211_vif *vif, 1213 + struct iwl_mld_scan_params *params, 1214 + enum iwl_mld_scan_status scan_status, 1215 + int uid, u32 ooc_priority, bool low_latency) 1216 + { 1217 + struct iwl_scan_req_umac_v17 *cmd = mld->scan.cmd; 1218 + struct iwl_scan_req_params_ptrs scan_ptrs = { 1219 + .general_params = &cmd->scan_params.general_params, 1220 + .probe_params = &cmd->scan_params.probe_params, 1221 + 1222 + /* struct iwl_scan_channel_params_v8 and struct 1223 + * iwl_scan_channel_params_v7 are almost identical. The only 1224 + * difference is that the newer version allows configuration of 1225 + * more channels. So casting here is ok as long as we ensure 1226 + * that we don't exceed the max number of channels supported by 1227 + * the older version (see the WARN_ON below). 1228 + */ 1229 + .channel_params = (struct iwl_scan_channel_params_v8 *) 1230 + &cmd->scan_params.channel_params, 1231 + .periodic_params = &cmd->scan_params.periodic_params 1232 + }; 1233 + int ret; 1234 + 1235 + if (WARN_ON(params->n_channels > SCAN_MAX_NUM_CHANS_V3)) 1236 + return -EINVAL; 1237 + 1238 + cmd->uid = cpu_to_le32(uid); 1239 + cmd->ooc_priority = cpu_to_le32(ooc_priority); 1240 + 1241 + ret = iwl_mld_scan_umac_common(mld, vif, params, &scan_ptrs, 1242 + scan_status, low_latency); 1243 + if (ret) 1244 + return ret; 1245 + 1246 + return uid; 1247 + } 1248 + 1249 + static const struct iwl_scan_umac_handler iwl_scan_umac_handlers[] = { 1250 + /* set the newest version first to shorten the list traverse time */ 1251 + IWL_SCAN_UMAC_HANDLER(18), 1252 + IWL_SCAN_UMAC_HANDLER(17), 1253 + }; 1254 + 1161 1255 static int 1162 1256 iwl_mld_scan_build_cmd(struct iwl_mld *mld, struct ieee80211_vif *vif, 1163 1257 struct iwl_mld_scan_params *params, 1164 1258 enum iwl_mld_scan_status scan_status, 1165 1259 bool low_latency) 1166 1260 { 1167 - struct iwl_scan_req_umac_v17 *cmd = mld->scan.cmd; 1168 - struct iwl_scan_req_params_v17 *scan_p = &cmd->scan_params; 1169 - u32 bitmap_ssid = 0; 1170 - int uid, ret; 1261 + int uid, err; 1262 + u32 ooc_priority; 1171 1263 1172 1264 memset(mld->scan.cmd, 0, mld->scan.cmd_size); 1173 - 1174 - /* find a free UID entry */ 1175 1265 uid = iwl_mld_scan_uid_by_status(mld, IWL_MLD_SCAN_NONE); 1176 1266 if (uid < 0) 1177 1267 return uid; 1178 1268 1179 - cmd->uid = cpu_to_le32(uid); 1180 - cmd->ooc_priority = 1181 - cpu_to_le32(iwl_mld_scan_ooc_priority(scan_status)); 1269 + ooc_priority = iwl_mld_scan_ooc_priority(scan_status); 1182 1270 1183 - iwl_mld_scan_cmd_set_gen_params(mld, params, vif, 1184 - &scan_p->general_params, scan_status); 1271 + for (size_t i = 0; i < ARRAY_SIZE(iwl_scan_umac_handlers); i++) { 1272 + const struct iwl_scan_umac_handler *ver_handler = 1273 + &iwl_scan_umac_handlers[i]; 1185 1274 1186 - ret = iwl_mld_scan_cmd_set_sched_params(params, 1187 - scan_p->periodic_params.schedule, 1188 - &scan_p->periodic_params.delay); 1189 - if (ret) 1190 - return ret; 1275 + if (ver_handler->version != mld->scan.cmd_ver) 1276 + continue; 1191 1277 1192 - iwl_mld_scan_cmd_set_probe_params(params, &scan_p->probe_params, 1193 - &bitmap_ssid); 1278 + err = ver_handler->handler(mld, vif, params, scan_status, 1279 + uid, ooc_priority, low_latency); 1280 + return err ? : uid; 1281 + } 1194 1282 1195 - ret = iwl_mld_scan_cmd_set_chan_params(mld, params, vif, scan_p, 1196 - low_latency, scan_status, 1197 - bitmap_ssid); 1198 - if (ret) 1199 - return ret; 1283 + IWL_ERR(mld, "No handler for UMAC scan cmd version %d\n", 1284 + mld->scan.cmd_ver); 1200 1285 1201 - return uid; 1286 + return -EINVAL; 1202 1287 } 1203 1288 1204 1289 static bool ··· 2055 1942 struct ieee80211_bss_conf *link_conf = NULL; 2056 1943 2057 1944 if (fw_link_id != IWL_MLD_INVALID_FW_ID) 2058 - link_conf = 2059 - wiphy_dereference(mld->wiphy, 2060 - mld->fw_id_to_bss_conf[fw_link_id]); 1945 + link_conf = iwl_mld_fw_id_to_link_conf(mld, fw_link_id); 2061 1946 2062 1947 /* It is possible that by the time the scan is complete the 2063 1948 * link was already removed and is not valid. ··· 2142 2031 2143 2032 if (scan_cmd_ver == 17) { 2144 2033 scan_cmd_size = sizeof(struct iwl_scan_req_umac_v17); 2034 + } else if (scan_cmd_ver == 18) { 2035 + scan_cmd_size = sizeof(struct iwl_scan_req_umac_v18); 2145 2036 } else { 2146 2037 IWL_ERR(mld, "Unexpected scan cmd version %d\n", scan_cmd_ver); 2147 2038 return -EINVAL; ··· 2154 2041 return -ENOMEM; 2155 2042 2156 2043 mld->scan.cmd_size = scan_cmd_size; 2044 + mld->scan.cmd_ver = scan_cmd_ver; 2157 2045 2158 2046 return 0; 2159 2047 }
+2
drivers/net/wireless/intel/iwlwifi/mld/scan.h
··· 109 109 * @traffic_load.status: The current traffic load status, see 110 110 * &enum iwl_mld_traffic_load 111 111 * @cmd_size: size of %cmd. 112 + * @cmd_ver: version of the scan command format. 112 113 * @cmd: pointer to scan cmd buffer (allocated once in op mode start). 113 114 * @last_6ghz_passive_jiffies: stores the last 6GHz passive scan time 114 115 * in jiffies. ··· 135 134 /* And here fields that survive a fw restart */ 136 135 size_t cmd_size; 137 136 void *cmd; 137 + u8 cmd_ver; 138 138 unsigned long last_6ghz_passive_jiffies; 139 139 unsigned long last_start_time_jiffies; 140 140 u64 last_mlo_scan_time;
+40 -10
drivers/net/wireless/intel/iwlwifi/mld/sta.c
··· 398 398 return htc_flags; 399 399 } 400 400 401 + /* Note: modifies the command depending on FW command version */ 401 402 static int iwl_mld_send_sta_cmd(struct iwl_mld *mld, 402 - const struct iwl_sta_cfg_cmd *cmd) 403 + struct iwl_sta_cfg_cmd *cmd) 403 404 { 404 - int ret = iwl_mld_send_cmd_pdu(mld, 405 - WIDE_ID(MAC_CONF_GROUP, STA_CONFIG_CMD), 406 - cmd); 405 + int cmd_id = WIDE_ID(MAC_CONF_GROUP, STA_CONFIG_CMD); 406 + int cmd_ver = iwl_fw_lookup_cmd_ver(mld->fw, cmd_id, 0); 407 + int len = sizeof(*cmd); 408 + int ret; 409 + 410 + if (cmd_ver < 2) { 411 + IWL_ERR(mld, "Unsupported STA_CONFIG_CMD version %d\n", 412 + cmd_ver); 413 + return -EINVAL; 414 + } else if (cmd_ver == 2) { 415 + struct iwl_sta_cfg_cmd_v2 *cmd_v2 = (void *)cmd; 416 + 417 + if (WARN_ON(cmd->station_type == cpu_to_le32(STATION_TYPE_NAN_PEER_NMI) || 418 + cmd->station_type == cpu_to_le32(STATION_TYPE_NAN_PEER_NDI) || 419 + hweight32(le32_to_cpu(cmd->link_mask)) != 1)) 420 + return -EINVAL; 421 + /* 422 + * These fields are located in a different place in the struct of v2. 423 + * The assumption is that UHR won't be used with FW that has v2. 424 + */ 425 + if (WARN_ON(cmd->mic_prep_pad_delay || cmd->mic_compute_pad_delay)) 426 + return -EINVAL; 427 + 428 + len = sizeof(struct iwl_sta_cfg_cmd_v2); 429 + cmd_v2->link_id = cpu_to_le32(__ffs(le32_to_cpu(cmd->link_mask))); 430 + } else if (WARN_ON(cmd->station_type != cpu_to_le32(STATION_TYPE_NAN_PEER_NMI) && 431 + cmd->station_type != cpu_to_le32(STATION_TYPE_NAN_PEER_NDI) && 432 + hweight32(le32_to_cpu(cmd->link_mask)) != 1)) { 433 + return -EINVAL; 434 + } 435 + 436 + ret = iwl_mld_send_cmd_pdu(mld, cmd_id, cmd, len); 407 437 if (ret) 408 438 IWL_ERR(mld, "STA_CONFIG_CMD send failed, ret=0x%x\n", ret); 409 439 return ret; ··· 461 431 return -EINVAL; 462 432 463 433 cmd.sta_id = cpu_to_le32(fw_id); 434 + cmd.link_mask = cpu_to_le32(BIT(mld_link->fw_id)); 464 435 cmd.station_type = cpu_to_le32(mld_sta->sta_type); 465 - cmd.link_id = cpu_to_le32(mld_link->fw_id); 466 436 467 437 memcpy(&cmd.peer_mld_address, sta->addr, ETH_ALEN); 468 438 memcpy(&cmd.peer_link_address, link_sta->addr, ETH_ALEN); ··· 528 498 return iwl_mld_send_sta_cmd(mld, &cmd); 529 499 } 530 500 531 - IWL_MLD_ALLOC_FN(link_sta, link_sta) 501 + static IWL_MLD_ALLOC_FN(link_sta, link_sta) 532 502 533 503 static int 534 504 iwl_mld_add_link_sta(struct iwl_mld *mld, struct ieee80211_link_sta *link_sta) ··· 755 725 } 756 726 757 727 int iwl_mld_add_sta(struct iwl_mld *mld, struct ieee80211_sta *sta, 758 - struct ieee80211_vif *vif, enum iwl_fw_sta_type type) 728 + struct ieee80211_vif *vif) 759 729 { 760 730 struct iwl_mld_sta *mld_sta = iwl_mld_sta_from_mac80211(sta); 761 731 struct ieee80211_link_sta *link_sta; 762 732 int link_id; 763 733 int ret; 764 734 765 - ret = iwl_mld_init_sta(mld, sta, vif, type); 735 + ret = iwl_mld_init_sta(mld, sta, vif, STATION_TYPE_PEER); 766 736 if (ret) 767 737 return ret; 768 738 ··· 938 908 if (!(mld_vif->emlsr.blocked_reasons & IWL_MLD_EMLSR_BLOCKED_TPT)) 939 909 goto unlock; 940 910 941 - for (int i = 0; i <= IWL_FW_MAX_LINK_ID; i++) 911 + for (int i = 0; i < IWL_FW_MAX_LINKS; i++) 942 912 total_mpdus += tx ? queue_counter->per_link[i].tx : 943 913 queue_counter->per_link[i].rx; 944 914 ··· 1012 982 return iwl_mld_send_aux_sta_cmd(mld, internal_sta); 1013 983 1014 984 cmd.sta_id = cpu_to_le32((u8)internal_sta->sta_id); 1015 - cmd.link_id = cpu_to_le32(fw_link_id); 985 + cmd.link_mask = cpu_to_le32(BIT(fw_link_id)); 1016 986 cmd.station_type = cpu_to_le32(internal_sta->sta_type); 1017 987 1018 988 /* FW doesn't allow to add a IGTK/BIGTK if the sta isn't marked as MFP.
+2 -2
drivers/net/wireless/intel/iwlwifi/mld/sta.h
··· 89 89 */ 90 90 struct iwl_mld_per_q_mpdu_counter { 91 91 spinlock_t lock; 92 - struct iwl_mld_per_link_mpdu_counter per_link[IWL_FW_MAX_LINK_ID + 1]; 92 + struct iwl_mld_per_link_mpdu_counter per_link[IWL_FW_MAX_LINKS]; 93 93 unsigned long window_start_time; 94 94 } ____cacheline_aligned_in_smp; 95 95 ··· 190 190 } 191 191 192 192 int iwl_mld_add_sta(struct iwl_mld *mld, struct ieee80211_sta *sta, 193 - struct ieee80211_vif *vif, enum iwl_fw_sta_type type); 193 + struct ieee80211_vif *vif); 194 194 void iwl_mld_remove_sta(struct iwl_mld *mld, struct ieee80211_sta *sta); 195 195 int iwl_mld_fw_sta_id_from_link_sta(struct iwl_mld *mld, 196 196 struct ieee80211_link_sta *link_sta);
+27 -4
drivers/net/wireless/intel/iwlwifi/mld/stats.c
··· 369 369 static void iwl_mld_update_link_sig(struct ieee80211_vif *vif, int sig, 370 370 struct ieee80211_bss_conf *bss_conf) 371 371 { 372 + struct iwl_mld_link *link = iwl_mld_link_from_mac80211(bss_conf); 372 373 struct iwl_mld *mld = iwl_mld_vif_from_mac80211(vif)->mld; 373 374 int exit_emlsr_thresh; 375 + int last_event; 374 376 375 377 if (sig == 0) { 376 378 IWL_DEBUG_RX(mld, "RSSI is 0 - skip signal based decision\n"); 377 379 return; 378 380 } 379 381 380 - /* TODO: task=statistics handle CQM notifications */ 382 + if (WARN_ON(!link)) 383 + return; 384 + 385 + /* CQM Notification */ 386 + if (vif->driver_flags & IEEE80211_VIF_SUPPORTS_CQM_RSSI) { 387 + int thold = bss_conf->cqm_rssi_thold; 388 + int hyst = bss_conf->cqm_rssi_hyst; 389 + 390 + last_event = link->last_cqm_rssi_event; 391 + if (thold && sig < thold && 392 + (last_event == 0 || sig < last_event - hyst)) { 393 + link->last_cqm_rssi_event = sig; 394 + ieee80211_cqm_rssi_notify(vif, 395 + NL80211_CQM_RSSI_THRESHOLD_EVENT_LOW, 396 + sig, GFP_KERNEL); 397 + } else if (sig > thold && 398 + (last_event == 0 || sig > last_event + hyst)) { 399 + link->last_cqm_rssi_event = sig; 400 + ieee80211_cqm_rssi_notify(vif, 401 + NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH, 402 + sig, GFP_KERNEL); 403 + } 404 + } 381 405 382 406 if (!iwl_mld_emlsr_active(vif)) { 383 407 /* We're not in EMLSR and our signal is bad, ··· 431 407 u32 total_airtime_usec = 0; 432 408 433 409 for (u32 fw_id = 0; 434 - fw_id < ARRAY_SIZE(mld->fw_id_to_bss_conf); 410 + fw_id < mld->fw->ucode_capa.num_links; 435 411 fw_id++) { 436 412 const struct iwl_stats_ntfy_per_link *link_stats; 437 413 struct ieee80211_bss_conf *bss_conf; 438 414 int sig; 439 415 440 - bss_conf = wiphy_dereference(mld->wiphy, 441 - mld->fw_id_to_bss_conf[fw_id]); 416 + bss_conf = iwl_mld_fw_id_to_link_conf(mld, fw_id); 442 417 if (!bss_conf || bss_conf->vif->type != NL80211_IFTYPE_STATION) 443 418 continue; 444 419
+4 -4
drivers/net/wireless/intel/iwlwifi/mld/tests/utils.c
··· 42 42 iwl_construct_mld(mld, trans, cfg, fw, hw, NULL); 43 43 44 44 fw->ucode_capa.num_stations = IWL_STATION_COUNT_MAX; 45 - fw->ucode_capa.num_links = IWL_FW_MAX_LINK_ID + 1; 45 + fw->ucode_capa.num_links = IWL_FW_MAX_LINKS; 46 46 47 47 mld->fwrt.trans = trans; 48 48 mld->fwrt.fw = fw; ··· 68 68 return 0; 69 69 } 70 70 71 - IWL_MLD_ALLOC_FN(link, bss_conf) 71 + static IWL_MLD_ALLOC_FN(link, bss_conf) 72 72 73 73 static void iwlmld_kunit_init_link(struct ieee80211_vif *vif, 74 74 struct ieee80211_bss_conf *link, ··· 94 94 rcu_assign_pointer(vif->link_conf[link_id], link); 95 95 } 96 96 97 - IWL_MLD_ALLOC_FN(vif, vif) 97 + static IWL_MLD_ALLOC_FN(vif, vif) 98 98 99 99 /* Helper function to add and initialize a VIF for KUnit tests */ 100 100 struct ieee80211_vif *iwlmld_kunit_add_vif(bool mlo, enum nl80211_iftype type) ··· 199 199 vif->active_links |= BIT(link->link_id); 200 200 } 201 201 202 - IWL_MLD_ALLOC_FN(link_sta, link_sta) 202 + static IWL_MLD_ALLOC_FN(link_sta, link_sta) 203 203 204 204 static void iwlmld_kunit_add_link_sta(struct ieee80211_sta *sta, 205 205 struct ieee80211_link_sta *link_sta,
+73 -5
drivers/net/wireless/intel/iwlwifi/mld/tlc.c
··· 9 9 #include "hcmd.h" 10 10 #include "sta.h" 11 11 #include "phy.h" 12 + #include "iface.h" 12 13 13 14 #include "fw/api/rs.h" 14 15 #include "fw/api/context.h" ··· 37 36 struct ieee80211_vif *vif, 38 37 struct ieee80211_link_sta *link_sta, 39 38 const struct ieee80211_sta_he_cap *own_he_cap, 40 - const struct ieee80211_sta_eht_cap *own_eht_cap) 39 + const struct ieee80211_sta_eht_cap *own_eht_cap, 40 + const struct ieee80211_sta_uhr_cap *own_uhr_cap) 41 41 { 42 42 struct ieee80211_sta_ht_cap *ht_cap = &link_sta->ht_cap; 43 43 struct ieee80211_sta_vht_cap *vht_cap = &link_sta->vht_cap; ··· 91 89 IEEE80211_EHT_PHY_CAP5_SUPP_EXTRA_EHT_LTF) { 92 90 flags |= IWL_TLC_MNG_CFG_FLAGS_EHT_EXTRA_LTF_MSK; 93 91 } 92 + 93 + if (link_sta->uhr_cap.has_uhr && own_uhr_cap && 94 + link_sta->uhr_cap.phy.cap & IEEE80211_UHR_PHY_CAP_ELR_RX && 95 + own_uhr_cap->phy.cap & IEEE80211_UHR_PHY_CAP_ELR_TX) 96 + flags |= IWL_TLC_MNG_CFG_FLAGS_UHR_ELR_1_5_MBPS_MSK | 97 + IWL_TLC_MNG_CFG_FLAGS_UHR_ELR_3_MBPS_MSK; 94 98 95 99 return cpu_to_le16(flags); 96 100 } ··· 414 406 struct ieee80211_supported_band *sband, 415 407 const struct ieee80211_sta_he_cap *own_he_cap, 416 408 const struct ieee80211_sta_eht_cap *own_eht_cap, 409 + const struct ieee80211_sta_uhr_cap *own_uhr_cap, 417 410 struct iwl_tlc_config_cmd *cmd) 418 411 { 419 412 int i; ··· 432 423 cmd->non_ht_rates = cpu_to_le16(non_ht_rates); 433 424 cmd->mode = IWL_TLC_MNG_MODE_NON_HT; 434 425 435 - if (link_sta->eht_cap.has_eht && own_he_cap && own_eht_cap) { 426 + if (link_sta->uhr_cap.has_uhr && own_uhr_cap) { 427 + cmd->mode = IWL_TLC_MNG_MODE_UHR; 428 + /* 429 + * FIXME: spec currently inherits from EHT but has no 430 + * finer MCS bits. Once that's there, need to add them 431 + * to the bitmaps (and maybe copy this to UHR, or so.) 432 + */ 433 + iwl_mld_fill_eht_rates(vif, link_sta, own_he_cap, 434 + own_eht_cap, cmd); 435 + } else if (link_sta->eht_cap.has_eht && own_he_cap && own_eht_cap) { 436 436 cmd->mode = IWL_TLC_MNG_MODE_EHT; 437 437 iwl_mld_fill_eht_rates(vif, link_sta, own_he_cap, 438 438 own_eht_cap, cmd); ··· 531 513 struct ieee80211_bss_conf *link) 532 514 { 533 515 struct iwl_mld_sta *mld_sta = iwl_mld_sta_from_mac80211(link_sta->sta); 516 + struct iwl_mld_link *mld_link = iwl_mld_link_from_mac80211(link); 534 517 enum nl80211_band band = link->chanreq.oper.chan->band; 535 518 struct ieee80211_supported_band *sband = mld->hw->wiphy->bands[band]; 536 519 const struct ieee80211_sta_he_cap *own_he_cap = 537 520 ieee80211_get_he_iftype_cap_vif(sband, vif); 538 521 const struct ieee80211_sta_eht_cap *own_eht_cap = 539 522 ieee80211_get_eht_iftype_cap_vif(sband, vif); 523 + const struct ieee80211_sta_uhr_cap *own_uhr_cap = 524 + ieee80211_get_uhr_iftype_cap_vif(sband, vif); 540 525 struct iwl_tlc_config_cmd cmd = { 541 526 /* For AP mode, use 20 MHz until the STA is authorized */ 542 527 .max_ch_width = mld_sta->sta_state > IEEE80211_STA_ASSOC ? 543 528 iwl_mld_fw_bw_from_sta_bw(link_sta) : 544 529 IWL_TLC_MNG_CH_WIDTH_20MHZ, 545 530 .flags = iwl_mld_get_tlc_cmd_flags(mld, vif, link_sta, 546 - own_he_cap, own_eht_cap), 531 + own_he_cap, own_eht_cap, 532 + own_uhr_cap), 547 533 .chains = iwl_mld_get_fw_chains(mld), 548 534 .sgi_ch_width_supp = iwl_mld_get_fw_sgi(link_sta), 549 535 .max_mpdu_len = cpu_to_le16(link_sta->agg.max_amsdu_len), ··· 568 546 569 547 cmd.sta_mask = cpu_to_le32(BIT(fw_sta_id)); 570 548 571 - chan_ctx = rcu_dereference_wiphy(mld->wiphy, link->chanctx_conf); 549 + if (WARN_ON_ONCE(!mld_link)) 550 + return; 551 + 552 + chan_ctx = rcu_dereference_wiphy(mld->wiphy, mld_link->chan_ctx); 572 553 if (WARN_ON(!chan_ctx)) 573 554 return; 574 555 ··· 580 555 581 556 iwl_mld_fill_supp_rates(mld, vif, link_sta, sband, 582 557 own_he_cap, own_eht_cap, 583 - &cmd); 558 + own_uhr_cap, &cmd); 584 559 585 560 if (cmd_ver == 6) { 586 561 cmd_ptr = &cmd; ··· 661 636 } 662 637 663 638 iwl_mld_send_tlc_cmd(mld, vif, link_sta, link_conf); 639 + } 640 + 641 + void iwl_mld_tlc_update_phy(struct iwl_mld *mld, struct ieee80211_vif *vif, 642 + struct ieee80211_bss_conf *link_conf) 643 + { 644 + struct iwl_mld_link *mld_link = iwl_mld_link_from_mac80211(link_conf); 645 + struct ieee80211_chanctx_conf *chan_ctx; 646 + int link_id = link_conf->link_id; 647 + struct ieee80211_sta *sta; 648 + 649 + lockdep_assert_wiphy(mld->wiphy); 650 + 651 + if (WARN_ON(!mld_link)) 652 + return; 653 + 654 + chan_ctx = rcu_dereference_wiphy(mld->wiphy, mld_link->chan_ctx); 655 + 656 + for_each_station(sta, mld->hw) { 657 + struct iwl_mld_sta *mld_sta = iwl_mld_sta_from_mac80211(sta); 658 + struct iwl_mld_link_sta *mld_link_sta; 659 + struct ieee80211_link_sta *link_sta; 660 + 661 + if (mld_sta->vif != vif) 662 + continue; 663 + 664 + link_sta = link_sta_dereference_protected(sta, link_id); 665 + if (!link_sta) 666 + continue; 667 + 668 + mld_link_sta = iwl_mld_link_sta_dereference_check(mld_sta, 669 + link_id); 670 + 671 + /* In recovery flow, the station may not be (yet) in the 672 + * firmware, don't send a TLC command for a station the 673 + * firmware does not know. 674 + */ 675 + if (!mld_link_sta || !mld_link_sta->in_fw) 676 + continue; 677 + 678 + if (chan_ctx) 679 + iwl_mld_config_tlc_link(mld, vif, link_conf, link_sta); 680 + /* TODO: else, remove the TLC object in the firmware */ 681 + } 664 682 } 665 683 666 684 void iwl_mld_config_tlc(struct iwl_mld *mld, struct ieee80211_vif *vif,
+3
drivers/net/wireless/intel/iwlwifi/mld/tlc.h
··· 20 20 21 21 int iwl_mld_send_tlc_dhc(struct iwl_mld *mld, u8 sta_id, u32 type, u32 data); 22 22 23 + void iwl_mld_tlc_update_phy(struct iwl_mld *mld, struct ieee80211_vif *vif, 24 + struct ieee80211_bss_conf *link_conf); 25 + 23 26 #endif /* __iwl_mld_tlc_h__ */
+143 -14
drivers/net/wireless/intel/iwlwifi/mvm/fw.c
··· 459 459 460 460 static void iwl_mvm_uats_init(struct iwl_mvm *mvm) 461 461 { 462 + int cmd_id = WIDE_ID(REGULATORY_AND_NVM_GROUP, 463 + MCC_ALLOWED_AP_TYPE_CMD); 464 + struct iwl_mcc_allowed_ap_type_cmd_v1 cmd = {}; 462 465 u8 cmd_ver; 463 466 int ret; 464 - struct iwl_host_cmd cmd = { 465 - .id = WIDE_ID(REGULATORY_AND_NVM_GROUP, 466 - MCC_ALLOWED_AP_TYPE_CMD), 467 - .flags = 0, 468 - .data[0] = &mvm->fwrt.uats_table, 469 - .len[0] = sizeof(mvm->fwrt.uats_table), 470 - .dataflags[0] = IWL_HCMD_DFL_NOCOPY, 471 - }; 472 467 473 468 if (mvm->trans->mac_cfg->device_family < IWL_DEVICE_FAMILY_AX210) { 474 469 IWL_DEBUG_RADIO(mvm, "UATS feature is not supported\n"); 475 470 return; 476 471 } 477 472 478 - cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd.id, 473 + cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id, 479 474 IWL_FW_CMD_VER_UNKNOWN); 480 475 if (cmd_ver != 1) { 481 476 IWL_DEBUG_RADIO(mvm, ··· 481 486 482 487 iwl_uefi_get_uats_table(mvm->trans, &mvm->fwrt); 483 488 484 - if (!mvm->fwrt.uats_valid) 489 + if (!mvm->fwrt.ap_type_cmd_valid) 485 490 return; 486 491 487 - ret = iwl_mvm_send_cmd(mvm, &cmd); 492 + BUILD_BUG_ON(sizeof(mvm->fwrt.ap_type_cmd.mcc_to_ap_type_map) != 493 + sizeof(cmd.mcc_to_ap_type_map)); 494 + 495 + memcpy(cmd.mcc_to_ap_type_map, 496 + mvm->fwrt.ap_type_cmd.mcc_to_ap_type_map, 497 + sizeof(mvm->fwrt.ap_type_cmd.mcc_to_ap_type_map)); 498 + 499 + ret = iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, sizeof(cmd), &cmd); 488 500 if (ret < 0) 489 501 IWL_ERR(mvm, "failed to send MCC_ALLOWED_AP_TYPE_CMD (%d)\n", 490 502 ret); ··· 908 906 909 907 int iwl_mvm_get_sar_geo_profile(struct iwl_mvm *mvm) 910 908 { 911 - union iwl_geo_tx_power_profiles_cmd geo_tx_cmd; 909 + union iwl_geo_tx_power_profiles_cmd geo_tx_cmd = {}; 912 910 struct iwl_geo_tx_power_profiles_resp *resp; 913 911 u16 len; 914 912 int ret; ··· 960 958 static int iwl_mvm_sar_geo_init(struct iwl_mvm *mvm) 961 959 { 962 960 u32 cmd_id = WIDE_ID(PHY_OPS_GROUP, PER_CHAIN_LIMIT_OFFSET_CMD); 963 - union iwl_geo_tx_power_profiles_cmd cmd; 961 + union iwl_geo_tx_power_profiles_cmd cmd = {}; 964 962 u16 len; 965 963 u32 n_bands; 966 964 u32 n_profiles; ··· 1034 1032 return iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, len, &cmd); 1035 1033 } 1036 1034 1035 + static bool iwl_mvm_ppag_value_valid(struct iwl_fw_runtime *fwrt, int chain, 1036 + int subband) 1037 + { 1038 + s8 ppag_val = fwrt->ppag_chains[chain].subbands[subband]; 1039 + 1040 + if ((subband == 0 && 1041 + (ppag_val > IWL_PPAG_MAX_LB || ppag_val < IWL_PPAG_MIN_LB)) || 1042 + (subband != 0 && 1043 + (ppag_val > IWL_PPAG_MAX_HB || ppag_val < IWL_PPAG_MIN_HB))) { 1044 + IWL_DEBUG_RADIO(fwrt, "Invalid PPAG value: %d\n", ppag_val); 1045 + return false; 1046 + } 1047 + return true; 1048 + } 1049 + 1050 + static int iwl_mvm_fill_ppag_table(struct iwl_fw_runtime *fwrt, 1051 + union iwl_ppag_table_cmd *cmd, 1052 + int *cmd_size) 1053 + { 1054 + u8 cmd_ver; 1055 + int i, j, num_sub_bands; 1056 + s8 *gain; 1057 + bool send_ppag_always; 1058 + 1059 + /* many firmware images for JF lie about this */ 1060 + if (CSR_HW_RFID_TYPE(fwrt->trans->info.hw_rf_id) == 1061 + CSR_HW_RFID_TYPE(CSR_HW_RF_ID_TYPE_JF)) 1062 + return -EOPNOTSUPP; 1063 + 1064 + if (!fw_has_capa(&fwrt->fw->ucode_capa, IWL_UCODE_TLV_CAPA_SET_PPAG)) { 1065 + IWL_DEBUG_RADIO(fwrt, 1066 + "PPAG capability not supported by FW, command not sent.\n"); 1067 + return -EINVAL; 1068 + } 1069 + 1070 + cmd_ver = iwl_fw_lookup_cmd_ver(fwrt->fw, 1071 + WIDE_ID(PHY_OPS_GROUP, 1072 + PER_PLATFORM_ANT_GAIN_CMD), 1); 1073 + /* 1074 + * Starting from ver 4, driver needs to send the PPAG CMD regardless 1075 + * if PPAG is enabled/disabled or valid/invalid. 1076 + */ 1077 + send_ppag_always = cmd_ver > 3; 1078 + 1079 + /* Don't send PPAG if it is disabled */ 1080 + if (!send_ppag_always && !fwrt->ppag_flags) { 1081 + IWL_DEBUG_RADIO(fwrt, "PPAG not enabled, command not sent.\n"); 1082 + return -EINVAL; 1083 + } 1084 + 1085 + IWL_DEBUG_RADIO(fwrt, "PPAG cmd ver is %d\n", cmd_ver); 1086 + if (cmd_ver == 1) { 1087 + num_sub_bands = IWL_NUM_SUB_BANDS_V1; 1088 + gain = cmd->v1.gain[0]; 1089 + *cmd_size = sizeof(cmd->v1); 1090 + cmd->v1.flags = 1091 + cpu_to_le32(fwrt->ppag_flags & IWL_PPAG_CMD_V1_MASK); 1092 + if (fwrt->ppag_bios_rev >= 1) { 1093 + /* in this case FW supports revision 0 */ 1094 + IWL_DEBUG_RADIO(fwrt, 1095 + "PPAG table rev is %d, send truncated table\n", 1096 + fwrt->ppag_bios_rev); 1097 + } 1098 + } else if (cmd_ver == 5) { 1099 + num_sub_bands = IWL_NUM_SUB_BANDS_V2; 1100 + gain = cmd->v5.gain[0]; 1101 + *cmd_size = sizeof(cmd->v5); 1102 + cmd->v5.flags = 1103 + cpu_to_le32(fwrt->ppag_flags & IWL_PPAG_CMD_V5_MASK); 1104 + if (fwrt->ppag_bios_rev == 0) { 1105 + /* in this case FW supports revisions 1,2 or 3 */ 1106 + IWL_DEBUG_RADIO(fwrt, 1107 + "PPAG table rev is 0, send padded table\n"); 1108 + } 1109 + } else if (cmd_ver == 7) { 1110 + num_sub_bands = IWL_NUM_SUB_BANDS_V2; 1111 + gain = cmd->v7.gain[0]; 1112 + *cmd_size = sizeof(cmd->v7); 1113 + cmd->v7.ppag_config_info.hdr.table_source = 1114 + fwrt->ppag_bios_source; 1115 + cmd->v7.ppag_config_info.hdr.table_revision = 1116 + fwrt->ppag_bios_rev; 1117 + cmd->v7.ppag_config_info.value = cpu_to_le32(fwrt->ppag_flags); 1118 + } else { 1119 + IWL_DEBUG_RADIO(fwrt, "Unsupported PPAG command version\n"); 1120 + return -EINVAL; 1121 + } 1122 + 1123 + /* ppag mode */ 1124 + IWL_DEBUG_RADIO(fwrt, 1125 + "PPAG MODE bits were read from bios: %d\n", 1126 + fwrt->ppag_flags); 1127 + 1128 + if (cmd_ver == 1 && 1129 + !fw_has_capa(&fwrt->fw->ucode_capa, 1130 + IWL_UCODE_TLV_CAPA_PPAG_CHINA_BIOS_SUPPORT)) { 1131 + cmd->v1.flags &= cpu_to_le32(IWL_PPAG_ETSI_MASK); 1132 + IWL_DEBUG_RADIO(fwrt, "masking ppag China bit\n"); 1133 + } else { 1134 + IWL_DEBUG_RADIO(fwrt, "isn't masking ppag China bit\n"); 1135 + } 1136 + 1137 + /* The 'flags' field is the same in v1 and v5 so we can just 1138 + * use v1 to access it. 1139 + */ 1140 + IWL_DEBUG_RADIO(fwrt, 1141 + "PPAG MODE bits going to be sent: %d\n", 1142 + (cmd_ver < 7) ? le32_to_cpu(cmd->v1.flags) : 1143 + le32_to_cpu(cmd->v7.ppag_config_info.value)); 1144 + 1145 + for (i = 0; i < IWL_NUM_CHAIN_LIMITS; i++) { 1146 + for (j = 0; j < num_sub_bands; j++) { 1147 + if (!send_ppag_always && 1148 + !iwl_mvm_ppag_value_valid(fwrt, i, j)) 1149 + return -EINVAL; 1150 + 1151 + gain[i * num_sub_bands + j] = 1152 + fwrt->ppag_chains[i].subbands[j]; 1153 + IWL_DEBUG_RADIO(fwrt, 1154 + "PPAG table: chain[%d] band[%d]: gain = %d\n", 1155 + i, j, gain[i * num_sub_bands + j]); 1156 + } 1157 + } 1158 + 1159 + return 0; 1160 + } 1161 + 1037 1162 int iwl_mvm_ppag_send_cmd(struct iwl_mvm *mvm) 1038 1163 { 1039 1164 union iwl_ppag_table_cmd cmd; 1040 1165 int ret, cmd_size; 1041 1166 1042 - ret = iwl_fill_ppag_table(&mvm->fwrt, &cmd, &cmd_size); 1167 + ret = iwl_mvm_fill_ppag_table(&mvm->fwrt, &cmd, &cmd_size); 1043 1168 /* Not supporting PPAG table is a valid scenario */ 1044 1169 if (ret < 0) 1045 1170 return 0;
+4 -3
drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
··· 6229 6229 ret = wait_event_timeout(mvm->rx_sync_waitq, 6230 6230 READ_ONCE(mvm->queue_sync_state) == 0, 6231 6231 SYNC_RX_QUEUE_TIMEOUT); 6232 - WARN_ONCE(!ret, "queue sync: failed to sync, state is 0x%lx, cookie %d\n", 6233 - mvm->queue_sync_state, 6234 - mvm->queue_sync_cookie); 6232 + IWL_FW_CHECK(mvm, !ret, 6233 + "queue sync: failed to sync, state is 0x%lx, cookie %d\n", 6234 + mvm->queue_sync_state, 6235 + mvm->queue_sync_cookie); 6235 6236 } 6236 6237 6237 6238 out:
-46
drivers/net/wireless/intel/iwlwifi/mvm/mld-key.c
··· 121 121 int err; 122 122 }; 123 123 124 - static void iwl_mvm_mld_update_sta_key(struct ieee80211_hw *hw, 125 - struct ieee80211_vif *vif, 126 - struct ieee80211_sta *sta, 127 - struct ieee80211_key_conf *key, 128 - void *_data) 129 - { 130 - u32 cmd_id = WIDE_ID(DATA_PATH_GROUP, SEC_KEY_CMD); 131 - struct iwl_mvm_sta_key_update_data *data = _data; 132 - struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 133 - struct iwl_sec_key_cmd cmd = { 134 - .action = cpu_to_le32(FW_CTXT_ACTION_MODIFY), 135 - .u.modify.old_sta_mask = cpu_to_le32(data->old_sta_mask), 136 - .u.modify.new_sta_mask = cpu_to_le32(data->new_sta_mask), 137 - .u.modify.key_id = cpu_to_le32(key->keyidx), 138 - .u.modify.key_flags = 139 - cpu_to_le32(iwl_mvm_get_sec_flags(mvm, vif, sta, key)), 140 - }; 141 - int err; 142 - 143 - /* only need to do this for pairwise keys (link_id == -1) */ 144 - if (sta != data->sta || key->link_id >= 0) 145 - return; 146 - 147 - err = iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, sizeof(cmd), &cmd); 148 - 149 - if (err) 150 - data->err = err; 151 - } 152 - 153 - int iwl_mvm_mld_update_sta_keys(struct iwl_mvm *mvm, 154 - struct ieee80211_vif *vif, 155 - struct ieee80211_sta *sta, 156 - u32 old_sta_mask, 157 - u32 new_sta_mask) 158 - { 159 - struct iwl_mvm_sta_key_update_data data = { 160 - .sta = sta, 161 - .old_sta_mask = old_sta_mask, 162 - .new_sta_mask = new_sta_mask, 163 - }; 164 - 165 - ieee80211_iter_keys(mvm->hw, vif, iwl_mvm_mld_update_sta_key, 166 - &data); 167 - return data.err; 168 - } 169 - 170 124 static int __iwl_mvm_sec_key_del(struct iwl_mvm *mvm, u32 sta_mask, 171 125 u32 key_flags, u32 keyidx, u32 flags) 172 126 {
+9 -9
drivers/net/wireless/intel/iwlwifi/mvm/mld-mac.c
··· 6 6 7 7 static void iwl_mvm_mld_set_he_support(struct iwl_mvm *mvm, 8 8 struct ieee80211_vif *vif, 9 - struct iwl_mac_config_cmd *cmd, 9 + struct iwl_mac_config_cmd_v3 *cmd, 10 10 int cmd_ver) 11 11 { 12 12 if (vif->type == NL80211_IFTYPE_AP) { ··· 24 24 25 25 static void iwl_mvm_mld_mac_ctxt_cmd_common(struct iwl_mvm *mvm, 26 26 struct ieee80211_vif *vif, 27 - struct iwl_mac_config_cmd *cmd, 27 + struct iwl_mac_config_cmd_v3 *cmd, 28 28 u32 action) 29 29 { 30 30 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); ··· 83 83 } 84 84 85 85 static int iwl_mvm_mld_mac_ctxt_send_cmd(struct iwl_mvm *mvm, 86 - struct iwl_mac_config_cmd *cmd) 86 + struct iwl_mac_config_cmd_v3 *cmd) 87 87 { 88 88 int ret = iwl_mvm_send_cmd_pdu(mvm, 89 89 WIDE_ID(MAC_CONF_GROUP, MAC_CONFIG_CMD), ··· 98 98 struct ieee80211_vif *vif, 99 99 u32 action, bool force_assoc_off) 100 100 { 101 - struct iwl_mac_config_cmd cmd = {}; 101 + struct iwl_mac_config_cmd_v3 cmd = {}; 102 102 103 103 WARN_ON(vif->type != NL80211_IFTYPE_STATION); 104 104 ··· 151 151 struct ieee80211_vif *vif, 152 152 u32 action) 153 153 { 154 - struct iwl_mac_config_cmd cmd = {}; 154 + struct iwl_mac_config_cmd_v3 cmd = {}; 155 155 156 156 WARN_ON(vif->type != NL80211_IFTYPE_MONITOR); 157 157 ··· 170 170 struct ieee80211_vif *vif, 171 171 u32 action) 172 172 { 173 - struct iwl_mac_config_cmd cmd = {}; 173 + struct iwl_mac_config_cmd_v3 cmd = {}; 174 174 175 175 WARN_ON(vif->type != NL80211_IFTYPE_ADHOC); 176 176 ··· 187 187 struct ieee80211_vif *vif, 188 188 u32 action) 189 189 { 190 - struct iwl_mac_config_cmd cmd = {}; 190 + struct iwl_mac_config_cmd_v3 cmd = {}; 191 191 192 192 WARN_ON(vif->type != NL80211_IFTYPE_P2P_DEVICE); 193 193 ··· 210 210 u32 action) 211 211 { 212 212 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 213 - struct iwl_mac_config_cmd cmd = {}; 213 + struct iwl_mac_config_cmd_v3 cmd = {}; 214 214 215 215 WARN_ON(vif->type != NL80211_IFTYPE_AP); 216 216 ··· 286 286 int iwl_mvm_mld_mac_ctxt_remove(struct iwl_mvm *mvm, struct ieee80211_vif *vif) 287 287 { 288 288 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 289 - struct iwl_mac_config_cmd cmd = { 289 + struct iwl_mac_config_cmd_v3 cmd = { 290 290 .action = cpu_to_le32(FW_CTXT_ACTION_REMOVE), 291 291 .id_and_color = cpu_to_le32(mvmvif->id), 292 292 };
-132
drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c
··· 886 886 return iwl_mvm_roc_common(hw, vif, channel, duration, type, &ops); 887 887 } 888 888 889 - static int 890 - iwl_mvm_mld_change_vif_links(struct ieee80211_hw *hw, 891 - struct ieee80211_vif *vif, 892 - u16 old_links, u16 new_links, 893 - struct ieee80211_bss_conf *old[IEEE80211_MLD_MAX_NUM_LINKS]) 894 - { 895 - struct iwl_mvm_vif_link_info *new_link[IEEE80211_MLD_MAX_NUM_LINKS] = {}; 896 - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 897 - struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 898 - u16 removed = old_links & ~new_links; 899 - u16 added = new_links & ~old_links; 900 - int err, i; 901 - 902 - for (i = 0; i < IEEE80211_MLD_MAX_NUM_LINKS; i++) { 903 - if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) 904 - break; 905 - 906 - if (!(added & BIT(i))) 907 - continue; 908 - new_link[i] = kzalloc_obj(*new_link[i]); 909 - if (!new_link[i]) { 910 - err = -ENOMEM; 911 - goto free; 912 - } 913 - 914 - new_link[i]->fw_link_id = IWL_MVM_FW_LINK_ID_INVALID; 915 - iwl_mvm_init_link(new_link[i]); 916 - } 917 - 918 - mutex_lock(&mvm->mutex); 919 - 920 - /* If we're in RESTART flow, the default link wasn't added in 921 - * drv_add_interface(), and link[0] doesn't point to it. 922 - */ 923 - if (old_links == 0 && !test_bit(IWL_MVM_STATUS_IN_HW_RESTART, 924 - &mvm->status)) { 925 - err = iwl_mvm_disable_link(mvm, vif, &vif->bss_conf); 926 - if (err) 927 - goto out_err; 928 - mvmvif->link[0] = NULL; 929 - } 930 - 931 - for (i = 0; i < IEEE80211_MLD_MAX_NUM_LINKS; i++) { 932 - if (removed & BIT(i)) { 933 - struct ieee80211_bss_conf *link_conf = old[i]; 934 - 935 - err = iwl_mvm_disable_link(mvm, vif, link_conf); 936 - if (err) 937 - goto out_err; 938 - kfree(mvmvif->link[i]); 939 - mvmvif->link[i] = NULL; 940 - } else if (added & BIT(i)) { 941 - struct ieee80211_bss_conf *link_conf; 942 - 943 - link_conf = link_conf_dereference_protected(vif, i); 944 - if (WARN_ON(!link_conf)) 945 - continue; 946 - 947 - if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, 948 - &mvm->status)) 949 - mvmvif->link[i] = new_link[i]; 950 - new_link[i] = NULL; 951 - err = iwl_mvm_add_link(mvm, vif, link_conf); 952 - if (err) 953 - goto out_err; 954 - } 955 - } 956 - 957 - err = 0; 958 - if (new_links == 0) { 959 - mvmvif->link[0] = &mvmvif->deflink; 960 - err = iwl_mvm_add_link(mvm, vif, &vif->bss_conf); 961 - } 962 - 963 - out_err: 964 - /* we really don't have a good way to roll back here ... */ 965 - mutex_unlock(&mvm->mutex); 966 - 967 - free: 968 - for (i = 0; i < IEEE80211_MLD_MAX_NUM_LINKS; i++) 969 - kfree(new_link[i]); 970 - return err; 971 - } 972 - 973 - static int 974 - iwl_mvm_mld_change_sta_links(struct ieee80211_hw *hw, 975 - struct ieee80211_vif *vif, 976 - struct ieee80211_sta *sta, 977 - u16 old_links, u16 new_links) 978 - { 979 - struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 980 - 981 - guard(mvm)(mvm); 982 - return iwl_mvm_mld_update_sta_links(mvm, vif, sta, old_links, new_links); 983 - } 984 - 985 - static bool iwl_mvm_mld_can_activate_links(struct ieee80211_hw *hw, 986 - struct ieee80211_vif *vif, 987 - u16 desired_links) 988 - { 989 - int n_links = hweight16(desired_links); 990 - 991 - if (n_links <= 1) 992 - return true; 993 - 994 - WARN_ON(1); 995 - return false; 996 - } 997 - 998 - static enum ieee80211_neg_ttlm_res 999 - iwl_mvm_mld_can_neg_ttlm(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 1000 - struct ieee80211_neg_ttlm *neg_ttlm) 1001 - { 1002 - u16 map; 1003 - u8 i; 1004 - 1005 - /* Verify all TIDs are mapped to the same links set */ 1006 - map = neg_ttlm->downlink[0]; 1007 - for (i = 0; i < IEEE80211_TTLM_NUM_TIDS; i++) { 1008 - if (neg_ttlm->downlink[i] != neg_ttlm->uplink[i] || 1009 - neg_ttlm->uplink[i] != map) 1010 - return NEG_TTLM_RES_REJECT; 1011 - } 1012 - 1013 - return NEG_TTLM_RES_ACCEPT; 1014 - } 1015 - 1016 889 const struct ieee80211_ops iwl_mvm_mld_hw_ops = { 1017 890 .tx = iwl_mvm_mac_tx, 1018 891 .wake_tx_queue = iwl_mvm_mac_wake_tx_queue, ··· 975 1102 .link_sta_add_debugfs = iwl_mvm_link_sta_add_debugfs, 976 1103 #endif 977 1104 .set_hw_timestamp = iwl_mvm_set_hw_timestamp, 978 - 979 - .change_vif_links = iwl_mvm_mld_change_vif_links, 980 - .change_sta_links = iwl_mvm_mld_change_sta_links, 981 - .can_activate_links = iwl_mvm_mld_can_activate_links, 982 - .can_neg_ttlm = iwl_mvm_mld_can_neg_ttlm, 983 1105 };
+3 -288
drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c
··· 20 20 } 21 21 22 22 static int iwl_mvm_mld_send_sta_cmd(struct iwl_mvm *mvm, 23 - struct iwl_sta_cfg_cmd *cmd) 23 + struct iwl_sta_cfg_cmd_v2 *cmd) 24 24 { 25 25 u32 cmd_id = WIDE_ID(MAC_CONF_GROUP, STA_CONFIG_CMD); 26 26 int cmd_len = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id, 0) > 1 ? ··· 41 41 struct iwl_mvm_int_sta *sta, 42 42 const u8 *addr, int link_id) 43 43 { 44 - struct iwl_sta_cfg_cmd cmd; 44 + struct iwl_sta_cfg_cmd_v2 cmd; 45 45 46 46 lockdep_assert_held(&mvm->mutex); 47 47 ··· 416 416 struct iwl_mvm_vif *mvm_vif = iwl_mvm_vif_from_mac80211(vif); 417 417 struct iwl_mvm_vif_link_info *link_info = 418 418 mvm_vif->link[link_conf->link_id]; 419 - struct iwl_sta_cfg_cmd cmd = { 419 + struct iwl_sta_cfg_cmd_v2 cmd = { 420 420 .sta_id = cpu_to_le32(mvm_link_sta->sta_id), 421 421 .station_type = cpu_to_le32(mvm_sta->sta_type), 422 422 }; ··· 912 912 } 913 913 914 914 rcu_read_unlock(); 915 - } 916 - 917 - static int iwl_mvm_mld_update_sta_queues(struct iwl_mvm *mvm, 918 - struct ieee80211_sta *sta, 919 - u32 old_sta_mask, 920 - u32 new_sta_mask) 921 - { 922 - struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); 923 - struct iwl_scd_queue_cfg_cmd cmd = { 924 - .operation = cpu_to_le32(IWL_SCD_QUEUE_MODIFY), 925 - .u.modify.old_sta_mask = cpu_to_le32(old_sta_mask), 926 - .u.modify.new_sta_mask = cpu_to_le32(new_sta_mask), 927 - }; 928 - struct iwl_host_cmd hcmd = { 929 - .id = WIDE_ID(DATA_PATH_GROUP, SCD_QUEUE_CONFIG_CMD), 930 - .len[0] = sizeof(cmd), 931 - .data[0] = &cmd 932 - }; 933 - int tid; 934 - int ret; 935 - 936 - lockdep_assert_held(&mvm->mutex); 937 - 938 - for (tid = 0; tid <= IWL_MAX_TID_COUNT; tid++) { 939 - struct iwl_mvm_tid_data *tid_data = &mvm_sta->tid_data[tid]; 940 - int txq_id = tid_data->txq_id; 941 - 942 - if (txq_id == IWL_MVM_INVALID_QUEUE) 943 - continue; 944 - 945 - if (tid == IWL_MAX_TID_COUNT) 946 - cmd.u.modify.tid = cpu_to_le32(IWL_MGMT_TID); 947 - else 948 - cmd.u.modify.tid = cpu_to_le32(tid); 949 - 950 - ret = iwl_mvm_send_cmd(mvm, &hcmd); 951 - if (ret) 952 - return ret; 953 - } 954 - 955 - return 0; 956 - } 957 - 958 - static int iwl_mvm_mld_update_sta_baids(struct iwl_mvm *mvm, 959 - u32 old_sta_mask, 960 - u32 new_sta_mask) 961 - { 962 - struct iwl_rx_baid_cfg_cmd cmd = { 963 - .action = cpu_to_le32(IWL_RX_BAID_ACTION_MODIFY), 964 - .modify.old_sta_id_mask = cpu_to_le32(old_sta_mask), 965 - .modify.new_sta_id_mask = cpu_to_le32(new_sta_mask), 966 - }; 967 - u32 cmd_id = WIDE_ID(DATA_PATH_GROUP, RX_BAID_ALLOCATION_CONFIG_CMD); 968 - int baid; 969 - 970 - /* mac80211 will remove sessions later, but we ignore all that */ 971 - if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) 972 - return 0; 973 - 974 - BUILD_BUG_ON(sizeof(struct iwl_rx_baid_cfg_resp) != sizeof(baid)); 975 - 976 - for (baid = 0; baid < ARRAY_SIZE(mvm->baid_map); baid++) { 977 - struct iwl_mvm_baid_data *data; 978 - int ret; 979 - 980 - data = rcu_dereference_protected(mvm->baid_map[baid], 981 - lockdep_is_held(&mvm->mutex)); 982 - if (!data) 983 - continue; 984 - 985 - if (!(data->sta_mask & old_sta_mask)) 986 - continue; 987 - 988 - WARN_ONCE(data->sta_mask != old_sta_mask, 989 - "BAID data for %d corrupted - expected 0x%x found 0x%x\n", 990 - baid, old_sta_mask, data->sta_mask); 991 - 992 - cmd.modify.tid = cpu_to_le32(data->tid); 993 - 994 - ret = iwl_mvm_send_cmd_pdu(mvm, cmd_id, CMD_SEND_IN_RFKILL, 995 - sizeof(cmd), &cmd); 996 - data->sta_mask = new_sta_mask; 997 - if (ret) 998 - return ret; 999 - } 1000 - 1001 - return 0; 1002 - } 1003 - 1004 - static int iwl_mvm_mld_update_sta_resources(struct iwl_mvm *mvm, 1005 - struct ieee80211_vif *vif, 1006 - struct ieee80211_sta *sta, 1007 - u32 old_sta_mask, 1008 - u32 new_sta_mask) 1009 - { 1010 - int ret; 1011 - 1012 - ret = iwl_mvm_mld_update_sta_queues(mvm, sta, 1013 - old_sta_mask, 1014 - new_sta_mask); 1015 - if (ret) 1016 - return ret; 1017 - 1018 - ret = iwl_mvm_mld_update_sta_keys(mvm, vif, sta, 1019 - old_sta_mask, 1020 - new_sta_mask); 1021 - if (ret) 1022 - return ret; 1023 - 1024 - return iwl_mvm_mld_update_sta_baids(mvm, old_sta_mask, new_sta_mask); 1025 - } 1026 - 1027 - int iwl_mvm_mld_update_sta_links(struct iwl_mvm *mvm, 1028 - struct ieee80211_vif *vif, 1029 - struct ieee80211_sta *sta, 1030 - u16 old_links, u16 new_links) 1031 - { 1032 - struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); 1033 - struct iwl_mvm_vif *mvm_vif = iwl_mvm_vif_from_mac80211(vif); 1034 - struct iwl_mvm_link_sta *mvm_sta_link; 1035 - struct iwl_mvm_vif_link_info *mvm_vif_link; 1036 - unsigned long links_to_add = ~old_links & new_links; 1037 - unsigned long links_to_rem = old_links & ~new_links; 1038 - unsigned long old_links_long = old_links; 1039 - u32 current_sta_mask = 0, sta_mask_added = 0, sta_mask_to_rem = 0; 1040 - unsigned long link_sta_added_to_fw = 0, link_sta_allocated = 0; 1041 - unsigned int link_id; 1042 - int ret; 1043 - 1044 - lockdep_assert_wiphy(mvm->hw->wiphy); 1045 - lockdep_assert_held(&mvm->mutex); 1046 - 1047 - for_each_set_bit(link_id, &old_links_long, 1048 - IEEE80211_MLD_MAX_NUM_LINKS) { 1049 - mvm_sta_link = 1050 - rcu_dereference_protected(mvm_sta->link[link_id], 1051 - lockdep_is_held(&mvm->mutex)); 1052 - 1053 - if (WARN_ON(!mvm_sta_link)) { 1054 - ret = -EINVAL; 1055 - goto err; 1056 - } 1057 - 1058 - current_sta_mask |= BIT(mvm_sta_link->sta_id); 1059 - if (links_to_rem & BIT(link_id)) 1060 - sta_mask_to_rem |= BIT(mvm_sta_link->sta_id); 1061 - } 1062 - 1063 - if (sta_mask_to_rem) { 1064 - ret = iwl_mvm_mld_update_sta_resources(mvm, vif, sta, 1065 - current_sta_mask, 1066 - current_sta_mask & 1067 - ~sta_mask_to_rem); 1068 - if (WARN_ON(ret)) 1069 - goto err; 1070 - 1071 - current_sta_mask &= ~sta_mask_to_rem; 1072 - } 1073 - 1074 - for_each_set_bit(link_id, &links_to_rem, IEEE80211_MLD_MAX_NUM_LINKS) { 1075 - mvm_sta_link = 1076 - rcu_dereference_protected(mvm_sta->link[link_id], 1077 - lockdep_is_held(&mvm->mutex)); 1078 - mvm_vif_link = mvm_vif->link[link_id]; 1079 - 1080 - if (WARN_ON(!mvm_sta_link || !mvm_vif_link)) { 1081 - ret = -EINVAL; 1082 - goto err; 1083 - } 1084 - 1085 - ret = iwl_mvm_mld_rm_sta_from_fw(mvm, mvm_sta_link->sta_id); 1086 - if (WARN_ON(ret)) 1087 - goto err; 1088 - 1089 - if (vif->type == NL80211_IFTYPE_STATION) 1090 - mvm_vif_link->ap_sta_id = IWL_INVALID_STA; 1091 - 1092 - iwl_mvm_mld_free_sta_link(mvm, mvm_sta, mvm_sta_link, link_id); 1093 - } 1094 - 1095 - for_each_set_bit(link_id, &links_to_add, IEEE80211_MLD_MAX_NUM_LINKS) { 1096 - struct ieee80211_bss_conf *link_conf = 1097 - link_conf_dereference_protected(vif, link_id); 1098 - struct ieee80211_link_sta *link_sta = 1099 - link_sta_dereference_protected(sta, link_id); 1100 - mvm_vif_link = mvm_vif->link[link_id]; 1101 - 1102 - if (WARN_ON(!mvm_vif_link || !link_conf || !link_sta)) { 1103 - ret = -EINVAL; 1104 - goto err; 1105 - } 1106 - 1107 - if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) { 1108 - struct iwl_mvm_link_sta *mvm_link_sta = 1109 - rcu_dereference_protected(mvm_sta->link[link_id], 1110 - lockdep_is_held(&mvm->mutex)); 1111 - u32 sta_id; 1112 - 1113 - if (WARN_ON(!mvm_link_sta)) { 1114 - ret = -EINVAL; 1115 - goto err; 1116 - } 1117 - 1118 - sta_id = mvm_link_sta->sta_id; 1119 - 1120 - rcu_assign_pointer(mvm->fw_id_to_mac_id[sta_id], sta); 1121 - rcu_assign_pointer(mvm->fw_id_to_link_sta[sta_id], 1122 - link_sta); 1123 - } else { 1124 - if (WARN_ON(mvm_sta->link[link_id])) { 1125 - ret = -EINVAL; 1126 - goto err; 1127 - } 1128 - ret = iwl_mvm_mld_alloc_sta_link(mvm, vif, sta, 1129 - link_id); 1130 - if (WARN_ON(ret)) 1131 - goto err; 1132 - } 1133 - 1134 - link_sta->agg.max_rc_amsdu_len = 1; 1135 - ieee80211_sta_recalc_aggregates(sta); 1136 - 1137 - mvm_sta_link = 1138 - rcu_dereference_protected(mvm_sta->link[link_id], 1139 - lockdep_is_held(&mvm->mutex)); 1140 - 1141 - if (WARN_ON(!mvm_sta_link)) { 1142 - ret = -EINVAL; 1143 - goto err; 1144 - } 1145 - 1146 - if (vif->type == NL80211_IFTYPE_STATION) 1147 - iwl_mvm_mld_set_ap_sta_id(sta, mvm_vif_link, 1148 - mvm_sta_link); 1149 - 1150 - link_sta_allocated |= BIT(link_id); 1151 - 1152 - sta_mask_added |= BIT(mvm_sta_link->sta_id); 1153 - 1154 - ret = iwl_mvm_mld_cfg_sta(mvm, sta, vif, link_sta, link_conf, 1155 - mvm_sta_link); 1156 - if (WARN_ON(ret)) 1157 - goto err; 1158 - 1159 - link_sta_added_to_fw |= BIT(link_id); 1160 - 1161 - iwl_mvm_rs_add_sta_link(mvm, mvm_sta_link); 1162 - 1163 - iwl_mvm_rs_rate_init(mvm, vif, sta, link_conf, link_sta, 1164 - link_conf->chanreq.oper.chan->band); 1165 - } 1166 - 1167 - if (sta_mask_added) { 1168 - ret = iwl_mvm_mld_update_sta_resources(mvm, vif, sta, 1169 - current_sta_mask, 1170 - current_sta_mask | 1171 - sta_mask_added); 1172 - if (WARN_ON(ret)) 1173 - goto err; 1174 - } 1175 - 1176 - return 0; 1177 - 1178 - err: 1179 - /* remove all already allocated stations in FW */ 1180 - for_each_set_bit(link_id, &link_sta_added_to_fw, 1181 - IEEE80211_MLD_MAX_NUM_LINKS) { 1182 - mvm_sta_link = 1183 - rcu_dereference_protected(mvm_sta->link[link_id], 1184 - lockdep_is_held(&mvm->mutex)); 1185 - 1186 - iwl_mvm_mld_rm_sta_from_fw(mvm, mvm_sta_link->sta_id); 1187 - } 1188 - 1189 - /* remove all already allocated station links in driver */ 1190 - for_each_set_bit(link_id, &link_sta_allocated, 1191 - IEEE80211_MLD_MAX_NUM_LINKS) { 1192 - mvm_sta_link = 1193 - rcu_dereference_protected(mvm_sta->link[link_id], 1194 - lockdep_is_held(&mvm->mutex)); 1195 - 1196 - iwl_mvm_mld_free_sta_link(mvm, mvm_sta, mvm_sta_link, link_id); 1197 - } 1198 - 1199 - return ret; 1200 915 }
-5
drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
··· 2450 2450 struct ieee80211_vif *vif, 2451 2451 struct iwl_mvm_vif_link_info *link, 2452 2452 unsigned int link_id); 2453 - int iwl_mvm_mld_update_sta_keys(struct iwl_mvm *mvm, 2454 - struct ieee80211_vif *vif, 2455 - struct ieee80211_sta *sta, 2456 - u32 old_sta_mask, 2457 - u32 new_sta_mask); 2458 2453 int iwl_mvm_mld_send_key(struct iwl_mvm *mvm, u32 sta_mask, u32 key_flags, 2459 2454 struct ieee80211_key_conf *keyconf); 2460 2455 u32 iwl_mvm_get_sec_flags(struct iwl_mvm *mvm,
+3 -1
drivers/net/wireless/intel/iwlwifi/mvm/ptp.c
··· 304 304 IWL_ERR(mvm, "Failed to register PHC clock (%ld)\n", 305 305 PTR_ERR(mvm->ptp_data.ptp_clock)); 306 306 mvm->ptp_data.ptp_clock = NULL; 307 - } else if (mvm->ptp_data.ptp_clock) { 307 + } else if (!mvm->ptp_data.ptp_clock) { 308 + IWL_DEBUG_INFO(mvm, "PTP module unavailable on this kernel\n"); 309 + } else { 308 310 IWL_DEBUG_INFO(mvm, "Registered PHC clock: %s, with index: %d\n", 309 311 mvm->ptp_data.ptp_clock_info.name, 310 312 ptp_clock_index(mvm->ptp_data.ptp_clock));
-4
drivers/net/wireless/intel/iwlwifi/mvm/sta.h
··· 637 637 struct iwl_mvm_link_sta *mvm_sta_link, 638 638 unsigned int link_id); 639 639 int iwl_mvm_mld_rm_sta_id(struct iwl_mvm *mvm, u8 sta_id); 640 - int iwl_mvm_mld_update_sta_links(struct iwl_mvm *mvm, 641 - struct ieee80211_vif *vif, 642 - struct ieee80211_sta *sta, 643 - u16 old_links, u16 new_links); 644 640 u32 iwl_mvm_sta_fw_id_mask(struct iwl_mvm *mvm, struct ieee80211_sta *sta, 645 641 int filter_link_id); 646 642 int iwl_mvm_mld_add_int_sta_with_queue(struct iwl_mvm *mvm,
+3 -3
drivers/net/wireless/intel/iwlwifi/mvm/tdls.c
··· 234 234 * Also convert TU to msec. 235 235 */ 236 236 delay = TU_TO_MS(vif->bss_conf.dtim_period * vif->bss_conf.beacon_int); 237 - mod_delayed_work(system_wq, &mvm->tdls_cs.dwork, 237 + mod_delayed_work(system_percpu_wq, &mvm->tdls_cs.dwork, 238 238 msecs_to_jiffies(delay)); 239 239 240 240 iwl_mvm_tdls_update_cs_state(mvm, IWL_MVM_TDLS_SW_ACTIVE); ··· 548 548 */ 549 549 delay = 2 * TU_TO_MS(vif->bss_conf.dtim_period * 550 550 vif->bss_conf.beacon_int); 551 - mod_delayed_work(system_wq, &mvm->tdls_cs.dwork, 551 + mod_delayed_work(system_percpu_wq, &mvm->tdls_cs.dwork, 552 552 msecs_to_jiffies(delay)); 553 553 return 0; 554 554 } ··· 659 659 /* register a timeout in case we don't succeed in switching */ 660 660 delay = vif->bss_conf.dtim_period * vif->bss_conf.beacon_int * 661 661 1024 / 1000; 662 - mod_delayed_work(system_wq, &mvm->tdls_cs.dwork, 662 + mod_delayed_work(system_percpu_wq, &mvm->tdls_cs.dwork, 663 663 msecs_to_jiffies(delay)); 664 664 }
+10 -3
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/trans-gen2.c
··· 95 95 CSR_GP_CNTRL_REG_FLAG_INIT_DONE); 96 96 } 97 97 98 - void iwl_trans_pcie_fw_reset_handshake(struct iwl_trans *trans) 98 + static void 99 + _iwl_trans_pcie_fw_reset_handshake(struct iwl_trans *trans, 100 + bool dump_on_timeout) 99 101 { 100 102 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 101 103 int ret; ··· 135 133 "timeout waiting for FW reset ACK (inta_hw=0x%x, reset_done %d)\n", 136 134 inta_hw, reset_done); 137 135 138 - if (!reset_done) { 136 + if (!reset_done && dump_on_timeout) { 139 137 struct iwl_fw_error_dump_mode mode = { 140 138 .type = IWL_ERR_TYPE_RESET_HS_TIMEOUT, 141 139 .context = IWL_ERR_CONTEXT_FROM_OPMODE, ··· 147 145 } 148 146 149 147 trans_pcie->fw_reset_state = FW_RESET_IDLE; 148 + } 149 + 150 + void iwl_trans_pcie_fw_reset_handshake(struct iwl_trans *trans) 151 + { 152 + _iwl_trans_pcie_fw_reset_handshake(trans, false); 150 153 } 151 154 152 155 static void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans) ··· 170 163 * should assume that the firmware is already dead. 171 164 */ 172 165 trans->state = IWL_TRANS_NO_FW; 173 - iwl_trans_pcie_fw_reset_handshake(trans); 166 + _iwl_trans_pcie_fw_reset_handshake(trans, true); 174 167 } 175 168 176 169 trans_pcie->is_down = true;
+1 -1
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/trans.c
··· 3197 3197 if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status)) 3198 3198 return -EINVAL; 3199 3199 if (mode == IWL_RESET_MODE_TOP_RESET) { 3200 - if (trans->mac_cfg->device_family < IWL_DEVICE_FAMILY_SC) 3200 + if (!iwl_trans_is_top_reset_supported(trans)) 3201 3201 return -EINVAL; 3202 3202 trans->request_top_reset = 1; 3203 3203 }
+25 -14
drivers/net/wireless/mediatek/mt76/channel.c
··· 88 88 IEEE80211_CHANCTX_CHANGE_RADAR))) 89 89 return; 90 90 91 + if (phy->roc_vif) 92 + mt76_abort_roc(phy); 93 + 91 94 cancel_delayed_work_sync(&phy->mac_work); 92 95 93 96 mutex_lock(&dev->mutex); ··· 158 155 { 159 156 struct mt76_chanctx *ctx = (struct mt76_chanctx *)conf->drv_priv; 160 157 struct mt76_vif_link *mlink = (struct mt76_vif_link *)vif->drv_priv; 161 - struct mt76_vif_data *mvif = mlink->mvif; 162 - int link_id = link_conf->link_id; 163 158 struct mt76_phy *phy = ctx->phy; 164 159 struct mt76_dev *dev = phy->dev; 165 160 ··· 174 173 if (!mlink) 175 174 goto out; 176 175 177 - if (mlink != (struct mt76_vif_link *)vif->drv_priv) 178 - rcu_assign_pointer(mvif->link[link_id], NULL); 179 - 180 176 dev->drv->vif_link_remove(phy, vif, link_conf, mlink); 181 177 mlink->ctx = NULL; 182 - 183 - if (mlink != (struct mt76_vif_link *)vif->drv_priv) 184 - kfree_rcu(mlink, rcu_head); 185 - 186 178 out: 187 179 mutex_unlock(&dev->mutex); 188 180 } ··· 248 254 continue; 249 255 250 256 mlink->ctx = vifs->new_ctx; 257 + if (mlink->beacon_mon_interval) 258 + WRITE_ONCE(mlink->beacon_mon_last, jiffies); 251 259 } 252 260 253 261 out: ··· 320 324 321 325 if (mlink) 322 326 mlink->mvif->roc_phy = NULL; 323 - if (phy->main_chandef.chan && 324 - !test_bit(MT76_MCU_RESET, &dev->phy.state)) 325 - mt76_set_channel(phy, &phy->main_chandef, false); 327 + if (phy->chanctx && phy->main_chandef.chan && phy->offchannel && 328 + !test_bit(MT76_MCU_RESET, &dev->phy.state)) { 329 + __mt76_set_channel(phy, &phy->main_chandef, false); 330 + mt76_offchannel_notify(phy, false); 331 + } 326 332 mt76_put_vif_phy_link(phy, phy->roc_vif, phy->roc_link); 327 333 phy->roc_vif = NULL; 328 334 phy->roc_link = NULL; ··· 362 364 struct mt76_phy *phy = hw->priv; 363 365 struct mt76_dev *dev = phy->dev; 364 366 struct mt76_vif_link *mlink; 367 + bool offchannel; 365 368 int ret = 0; 366 369 367 370 phy = dev->band_phys[chan->band]; 368 371 if (!phy) 369 372 return -EINVAL; 373 + 374 + cancel_delayed_work_sync(&phy->mac_work); 370 375 371 376 mutex_lock(&dev->mutex); 372 377 ··· 388 387 mlink->mvif->roc_phy = phy; 389 388 phy->roc_vif = vif; 390 389 phy->roc_link = mlink; 391 - cfg80211_chandef_create(&chandef, chan, NL80211_CHAN_HT20); 392 - mt76_set_channel(phy, &chandef, true); 390 + 391 + offchannel = mt76_offchannel_chandef(phy, chan, &chandef); 392 + if (offchannel) 393 + mt76_offchannel_notify(phy, true); 394 + ret = __mt76_set_channel(phy, &chandef, offchannel); 395 + if (ret) { 396 + mlink->mvif->roc_phy = NULL; 397 + phy->roc_vif = NULL; 398 + phy->roc_link = NULL; 399 + mt76_put_vif_phy_link(phy, vif, mlink); 400 + goto out; 401 + } 393 402 ieee80211_ready_on_channel(hw); 394 403 ieee80211_queue_delayed_work(phy->hw, &phy->roc_work, 395 404 msecs_to_jiffies(duration));
+25 -8
drivers/net/wireless/mediatek/mt76/dma.c
··· 6 6 #include <linux/dma-mapping.h> 7 7 #include "mt76.h" 8 8 #include "dma.h" 9 + #include "mt76_connac.h" 9 10 10 11 static struct mt76_txwi_cache * 11 12 mt76_alloc_txwi(struct mt76_dev *dev) ··· 189 188 static void 190 189 mt76_dma_sync_idx(struct mt76_dev *dev, struct mt76_queue *q) 191 190 { 192 - Q_WRITE(q, desc_base, q->desc_dma); 193 - if ((q->flags & MT_QFLAG_WED_RRO_EN) && !mt76_npu_device_active(dev)) 191 + if ((q->flags & MT_QFLAG_WED_RRO_EN) && 192 + (!is_mt7992(dev) || !mt76_npu_device_active(dev))) 194 193 Q_WRITE(q, ring_size, MT_DMA_RRO_EN | q->ndesc); 195 194 else 196 195 Q_WRITE(q, ring_size, q->ndesc); 197 196 198 197 if (mt76_queue_is_npu_tx(q)) { 199 - writel(q->desc_dma, &q->regs->desc_base); 200 198 writel(q->ndesc, &q->regs->ring_size); 199 + writel(q->desc_dma, &q->regs->desc_base); 201 200 } 201 + 202 + Q_WRITE(q, desc_base, q->desc_dma); 202 203 q->head = Q_READ(q, dma_idx); 203 204 q->tail = q->head; 204 205 } ··· 666 663 if (!t) 667 664 goto free_skb; 668 665 666 + t->phy_idx = phy->band_idx; 667 + t->qid = qid; 669 668 txwi = mt76_get_txwi_ptr(dev, t); 670 669 671 670 skb->prev = skb->next = NULL; ··· 879 874 if (!buf) 880 875 break; 881 876 882 - if (!mt76_queue_is_wed_rro(q)) 877 + if (mtk_wed_device_active(&dev->mmio.wed) && 878 + mt76_queue_is_wed_rro(q)) 879 + continue; 880 + 881 + if (mt76_npu_device_active(dev) && 882 + mt76_queue_is_wed_rro(q)) 883 + continue; 884 + 885 + if (!mt76_queue_is_wed_rro_rxdmad_c(q) && 886 + !mt76_queue_is_wed_rro_ind(q)) 883 887 mt76_put_page_pool_buf(buf, false); 884 888 } while (1); 885 889 ··· 927 913 928 914 if (mtk_wed_device_active(&dev->mmio.wed) && 929 915 mt76_queue_is_wed_rro(q)) 916 + return; 917 + 918 + if (mt76_npu_device_active(dev) && 919 + mt76_queue_is_wed_rro(q)) 920 + return; 921 + 922 + if (mt76_queue_is_npu_txfree(q)) 930 923 return; 931 924 932 925 mt76_dma_sync_idx(dev, q); ··· 1188 1167 1189 1168 mt76_for_each_q_rx(dev, i) { 1190 1169 struct mt76_queue *q = &dev->q_rx[i]; 1191 - 1192 - if (mtk_wed_device_active(&dev->mmio.wed) && 1193 - mt76_queue_is_wed_rro(q)) 1194 - continue; 1195 1170 1196 1171 netif_napi_del(&dev->napi[i]); 1197 1172 mt76_dma_rx_cleanup(dev, q);
+3 -1
drivers/net/wireless/mediatek/mt76/dma.h
··· 174 174 static inline void 175 175 mt76_dma_reset_tx_queue(struct mt76_dev *dev, struct mt76_queue *q) 176 176 { 177 - dev->queue_ops->reset_q(dev, q, true); 177 + bool reset_idx = q && !mt76_queue_is_npu_tx(q); 178 + 179 + dev->queue_ops->reset_q(dev, q, reset_idx); 178 180 if (mtk_wed_device_active(&dev->mmio.wed)) 179 181 mt76_wed_dma_setup(dev, q, true); 180 182 }
+109 -45
drivers/net/wireless/mediatek/mt76/eeprom.c
··· 9 9 #include <linux/nvmem-consumer.h> 10 10 #include <linux/etherdevice.h> 11 11 #include "mt76.h" 12 + #include "mt76_connac.h" 13 + 14 + enum mt76_sku_type { 15 + MT76_SKU_RATE, 16 + MT76_SKU_BACKOFF, 17 + MT76_SKU_BACKOFF_BF_OFFSET, 18 + }; 12 19 13 20 static int mt76_get_of_eeprom_data(struct mt76_dev *dev, void *eep, int len) 14 21 { ··· 299 292 } 300 293 EXPORT_SYMBOL_GPL(mt76_find_channel_node); 301 294 302 - 303 295 static s8 304 296 mt76_get_txs_delta(struct device_node *np, u8 nss) 305 297 { ··· 312 306 return be32_to_cpu(val[nss - 1]); 313 307 } 314 308 309 + static inline u8 mt76_backoff_n_chains(struct mt76_dev *dev, u8 idx) 310 + { 311 + /* 0:1T1ss, 1:2T1ss, ..., 14:5T5ss */ 312 + static const u8 connac3_table[] = { 313 + 1, 2, 3, 4, 5, 2, 3, 4, 5, 3, 4, 5, 4, 5, 5}; 314 + static const u8 connac2_table[] = { 315 + 1, 2, 3, 4, 2, 3, 4, 3, 4, 4, 0, 0, 0, 0, 0}; 316 + 317 + if (idx >= ARRAY_SIZE(connac3_table)) 318 + return 0; 319 + 320 + return is_mt799x(dev) ? connac3_table[idx] : connac2_table[idx]; 321 + } 322 + 315 323 static void 316 - mt76_apply_array_limit(s8 *pwr, size_t pwr_len, const s8 *data, 317 - s8 target_power, s8 nss_delta, s8 *max_power) 324 + mt76_apply_array_limit(struct mt76_dev *dev, s8 *pwr, size_t pwr_len, 325 + const s8 *data, s8 target_power, s8 nss_delta, 326 + s8 *max_power, int n_chains, enum mt76_sku_type type) 318 327 { 319 328 int i; 320 329 ··· 337 316 return; 338 317 339 318 for (i = 0; i < pwr_len; i++) { 340 - pwr[i] = min_t(s8, target_power, data[i] + nss_delta); 319 + u8 backoff_chain_idx = i; 320 + int backoff_n_chains; 321 + s8 backoff_delta; 322 + s8 delta; 323 + 324 + switch (type) { 325 + case MT76_SKU_RATE: 326 + delta = 0; 327 + backoff_delta = 0; 328 + backoff_n_chains = 0; 329 + break; 330 + case MT76_SKU_BACKOFF_BF_OFFSET: 331 + backoff_chain_idx += 1; 332 + fallthrough; 333 + case MT76_SKU_BACKOFF: 334 + delta = mt76_tx_power_path_delta(n_chains); 335 + backoff_n_chains = mt76_backoff_n_chains(dev, backoff_chain_idx); 336 + backoff_delta = mt76_tx_power_path_delta(backoff_n_chains); 337 + break; 338 + default: 339 + return; 340 + } 341 + 342 + pwr[i] = min_t(s8, target_power + delta - backoff_delta, data[i] + nss_delta); 343 + 344 + /* used for padding, doesn't need to be considered */ 345 + if (data[i] >= S8_MAX - 1) 346 + continue; 347 + 348 + /* only consider backoff value for the configured chain number */ 349 + if (type != MT76_SKU_RATE && n_chains != backoff_n_chains) 350 + continue; 351 + 341 352 *max_power = max(*max_power, pwr[i]); 342 353 } 343 354 } 344 355 345 356 static void 346 - mt76_apply_multi_array_limit(s8 *pwr, size_t pwr_len, s8 pwr_num, 347 - const s8 *data, size_t len, s8 target_power, 348 - s8 nss_delta) 357 + mt76_apply_multi_array_limit(struct mt76_dev *dev, s8 *pwr, size_t pwr_len, 358 + s8 pwr_num, const s8 *data, size_t len, 359 + s8 target_power, s8 nss_delta, s8 *max_power, 360 + int n_chains, enum mt76_sku_type type) 349 361 { 362 + static const int connac2_backoff_ru_idx = 2; 350 363 int i, cur; 351 - s8 max_power = -128; 352 364 353 365 if (!data) 354 366 return; ··· 391 337 if (len < pwr_len + 1) 392 338 break; 393 339 394 - mt76_apply_array_limit(pwr + pwr_len * i, pwr_len, data + 1, 395 - target_power, nss_delta, &max_power); 340 + /* Each RU entry (RU26, RU52, RU106, BW20, ...) in the DTS 341 + * corresponds to 10 stream combinations (1T1ss, 2T1ss, 3T1ss, 342 + * 4T1ss, 2T2ss, 3T2ss, 4T2ss, 3T3ss, 4T3ss, 4T4ss). 343 + * 344 + * For beamforming tables: 345 + * - In connac2, beamforming entries for BW20~BW160 and OFDM 346 + * do not include 1T1ss. 347 + * - In connac3, beamforming entries for BW20~BW160 and RU 348 + * include 1T1ss, but OFDM beamforming does not include 1T1ss. 349 + * 350 + * Non-beamforming and RU entries for both connac2 and connac3 351 + * include 1T1ss. 352 + */ 353 + if (!is_mt799x(dev) && type == MT76_SKU_BACKOFF && 354 + i > connac2_backoff_ru_idx) 355 + type = MT76_SKU_BACKOFF_BF_OFFSET; 356 + 357 + mt76_apply_array_limit(dev, pwr + pwr_len * i, pwr_len, data + 1, 358 + target_power, nss_delta, max_power, 359 + n_chains, type); 396 360 if (--cur > 0) 397 361 continue; 398 362 ··· 432 360 struct device_node *np; 433 361 const s8 *val; 434 362 char name[16]; 435 - u32 mcs_rates = dev->drv->mcs_rates; 436 - u32 ru_rates = ARRAY_SIZE(dest->ru[0]); 437 363 char band; 438 364 size_t len; 439 - s8 max_power = 0; 440 - s8 max_power_backoff = -127; 365 + s8 max_power = -127; 441 366 s8 txs_delta; 442 367 int n_chains = hweight16(phy->chainmask); 443 - s8 target_power_combine = target_power + mt76_tx_power_path_delta(n_chains); 444 - 445 - if (!mcs_rates) 446 - mcs_rates = 10; 447 368 448 369 memset(dest, target_power, sizeof(*dest) - sizeof(dest->path)); 449 370 memset(&dest->path, 0, sizeof(dest->path)); ··· 474 409 txs_delta = mt76_get_txs_delta(np, hweight16(phy->chainmask)); 475 410 476 411 val = mt76_get_of_array_s8(np, "rates-cck", &len, ARRAY_SIZE(dest->cck)); 477 - mt76_apply_array_limit(dest->cck, ARRAY_SIZE(dest->cck), val, 478 - target_power, txs_delta, &max_power); 412 + mt76_apply_array_limit(dev, dest->cck, ARRAY_SIZE(dest->cck), val, 413 + target_power, txs_delta, &max_power, n_chains, MT76_SKU_RATE); 479 414 480 - val = mt76_get_of_array_s8(np, "rates-ofdm", 481 - &len, ARRAY_SIZE(dest->ofdm)); 482 - mt76_apply_array_limit(dest->ofdm, ARRAY_SIZE(dest->ofdm), val, 483 - target_power, txs_delta, &max_power); 415 + val = mt76_get_of_array_s8(np, "rates-ofdm", &len, ARRAY_SIZE(dest->ofdm)); 416 + mt76_apply_array_limit(dev, dest->ofdm, ARRAY_SIZE(dest->ofdm), val, 417 + target_power, txs_delta, &max_power, n_chains, MT76_SKU_RATE); 484 418 485 - val = mt76_get_of_array_s8(np, "rates-mcs", &len, mcs_rates + 1); 486 - mt76_apply_multi_array_limit(dest->mcs[0], ARRAY_SIZE(dest->mcs[0]), 487 - ARRAY_SIZE(dest->mcs), val, len, 488 - target_power, txs_delta); 419 + val = mt76_get_of_array_s8(np, "rates-mcs", &len, ARRAY_SIZE(dest->mcs[0]) + 1); 420 + mt76_apply_multi_array_limit(dev, dest->mcs[0], ARRAY_SIZE(dest->mcs[0]), 421 + ARRAY_SIZE(dest->mcs), val, len, target_power, 422 + txs_delta, &max_power, n_chains, MT76_SKU_RATE); 489 423 490 - val = mt76_get_of_array_s8(np, "rates-ru", &len, ru_rates + 1); 491 - mt76_apply_multi_array_limit(dest->ru[0], ARRAY_SIZE(dest->ru[0]), 492 - ARRAY_SIZE(dest->ru), val, len, 493 - target_power, txs_delta); 424 + val = mt76_get_of_array_s8(np, "rates-ru", &len, ARRAY_SIZE(dest->ru[0]) + 1); 425 + mt76_apply_multi_array_limit(dev, dest->ru[0], ARRAY_SIZE(dest->ru[0]), 426 + ARRAY_SIZE(dest->ru), val, len, target_power, 427 + txs_delta, &max_power, n_chains, MT76_SKU_RATE); 494 428 495 - max_power_backoff = max_power; 496 429 val = mt76_get_of_array_s8(np, "paths-cck", &len, ARRAY_SIZE(dest->path.cck)); 497 - mt76_apply_array_limit(dest->path.cck, ARRAY_SIZE(dest->path.cck), val, 498 - target_power_combine, txs_delta, &max_power_backoff); 430 + mt76_apply_array_limit(dev, dest->path.cck, ARRAY_SIZE(dest->path.cck), val, 431 + target_power, txs_delta, &max_power, n_chains, MT76_SKU_BACKOFF); 499 432 500 433 val = mt76_get_of_array_s8(np, "paths-ofdm", &len, ARRAY_SIZE(dest->path.ofdm)); 501 - mt76_apply_array_limit(dest->path.ofdm, ARRAY_SIZE(dest->path.ofdm), val, 502 - target_power_combine, txs_delta, &max_power_backoff); 434 + mt76_apply_array_limit(dev, dest->path.ofdm, ARRAY_SIZE(dest->path.ofdm), val, 435 + target_power, txs_delta, &max_power, n_chains, MT76_SKU_BACKOFF); 503 436 504 437 val = mt76_get_of_array_s8(np, "paths-ofdm-bf", &len, ARRAY_SIZE(dest->path.ofdm_bf)); 505 - mt76_apply_array_limit(dest->path.ofdm_bf, ARRAY_SIZE(dest->path.ofdm_bf), val, 506 - target_power_combine, txs_delta, &max_power_backoff); 438 + mt76_apply_array_limit(dev, dest->path.ofdm_bf, ARRAY_SIZE(dest->path.ofdm_bf), val, 439 + target_power, txs_delta, &max_power, n_chains, 440 + MT76_SKU_BACKOFF_BF_OFFSET); 507 441 508 442 val = mt76_get_of_array_s8(np, "paths-ru", &len, ARRAY_SIZE(dest->path.ru[0]) + 1); 509 - mt76_apply_multi_array_limit(dest->path.ru[0], ARRAY_SIZE(dest->path.ru[0]), 510 - ARRAY_SIZE(dest->path.ru), val, len, 511 - target_power_combine, txs_delta); 443 + mt76_apply_multi_array_limit(dev, dest->path.ru[0], ARRAY_SIZE(dest->path.ru[0]), 444 + ARRAY_SIZE(dest->path.ru), val, len, target_power, 445 + txs_delta, &max_power, n_chains, MT76_SKU_BACKOFF); 512 446 513 447 val = mt76_get_of_array_s8(np, "paths-ru-bf", &len, ARRAY_SIZE(dest->path.ru_bf[0]) + 1); 514 - mt76_apply_multi_array_limit(dest->path.ru_bf[0], ARRAY_SIZE(dest->path.ru_bf[0]), 515 - ARRAY_SIZE(dest->path.ru_bf), val, len, 516 - target_power_combine, txs_delta); 448 + mt76_apply_multi_array_limit(dev, dest->path.ru_bf[0], ARRAY_SIZE(dest->path.ru_bf[0]), 449 + ARRAY_SIZE(dest->path.ru_bf), val, len, target_power, 450 + txs_delta, &max_power, n_chains, MT76_SKU_BACKOFF); 517 451 518 452 return max_power; 519 453 }
+228 -2
drivers/net/wireless/mediatek/mt76/mac80211.c
··· 726 726 INIT_LIST_HEAD(&dev->rxwi_cache); 727 727 dev->token_size = dev->drv->token_size; 728 728 INIT_DELAYED_WORK(&dev->scan_work, mt76_scan_work); 729 + spin_lock_init(&dev->scan_lock); 729 730 730 731 for (i = 0; i < ARRAY_SIZE(dev->q_rx); i++) 731 732 skb_queue_head_init(&dev->rx_skb[i]); ··· 971 970 return true; 972 971 } 973 972 973 + if (atomic_read(&phy->mgmt_tx_pending)) 974 + return true; 975 + 974 976 return false; 975 977 } 976 978 EXPORT_SYMBOL_GPL(mt76_has_tx_pending); ··· 1034 1030 int timeout = HZ / 5; 1035 1031 int ret; 1036 1032 1037 - set_bit(MT76_RESET, &phy->state); 1038 - 1039 1033 mt76_worker_disable(&dev->tx_worker); 1034 + mt76_txq_schedule_pending(phy); 1035 + 1036 + set_bit(MT76_RESET, &phy->state); 1040 1037 wait_event_timeout(dev->tx_wait, !mt76_has_tx_pending(phy), timeout); 1041 1038 mt76_update_survey(phy); 1042 1039 ··· 1721 1716 1722 1717 idr_destroy(&wcid->pktid); 1723 1718 1719 + /* Remove from sta_poll_list to prevent list corruption after reset. 1720 + * Without this, mt76_reset_device() reinitializes sta_poll_list but 1721 + * leaves wcid->poll_list with stale pointers, causing list corruption 1722 + * when mt76_wcid_add_poll() checks list_empty(). 1723 + */ 1724 + spin_lock_bh(&dev->sta_poll_lock); 1725 + if (!list_empty(&wcid->poll_list)) 1726 + list_del_init(&wcid->poll_list); 1727 + spin_unlock_bh(&dev->sta_poll_lock); 1728 + 1724 1729 spin_lock_bh(&phy->tx_lock); 1725 1730 1726 1731 if (!list_empty(&wcid->tx_list)) ··· 2136 2121 return sel_links; 2137 2122 } 2138 2123 EXPORT_SYMBOL_GPL(mt76_select_links); 2124 + 2125 + struct mt76_offchannel_cb_data { 2126 + struct mt76_phy *phy; 2127 + bool offchannel; 2128 + }; 2129 + 2130 + static void 2131 + mt76_offchannel_send_nullfunc(struct mt76_offchannel_cb_data *data, 2132 + struct ieee80211_vif *vif, int link_id) 2133 + { 2134 + struct mt76_phy *phy = data->phy; 2135 + struct ieee80211_tx_info *info; 2136 + struct ieee80211_sta *sta = NULL; 2137 + struct ieee80211_hdr *hdr; 2138 + struct mt76_wcid *wcid; 2139 + struct sk_buff *skb; 2140 + 2141 + skb = ieee80211_nullfunc_get(phy->hw, vif, link_id, true); 2142 + if (!skb) 2143 + return; 2144 + 2145 + hdr = (struct ieee80211_hdr *)skb->data; 2146 + if (data->offchannel) 2147 + hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_PM); 2148 + 2149 + skb->priority = 7; 2150 + skb_set_queue_mapping(skb, IEEE80211_AC_VO); 2151 + 2152 + if (!ieee80211_tx_prepare_skb(phy->hw, vif, skb, 2153 + phy->main_chandef.chan->band, 2154 + &sta)) 2155 + return; 2156 + 2157 + if (sta) 2158 + wcid = (struct mt76_wcid *)sta->drv_priv; 2159 + else 2160 + wcid = ((struct mt76_vif_link *)vif->drv_priv)->wcid; 2161 + 2162 + if (link_id >= 0) { 2163 + info = IEEE80211_SKB_CB(skb); 2164 + info->control.flags &= ~IEEE80211_TX_CTRL_MLO_LINK; 2165 + info->control.flags |= 2166 + u32_encode_bits(link_id, IEEE80211_TX_CTRL_MLO_LINK); 2167 + } 2168 + 2169 + mt76_tx(phy, sta, wcid, skb); 2170 + } 2171 + 2172 + static void 2173 + mt76_offchannel_notify_iter(void *_data, u8 *mac, struct ieee80211_vif *vif) 2174 + { 2175 + struct mt76_offchannel_cb_data *data = _data; 2176 + struct mt76_vif_link *mlink; 2177 + struct mt76_vif_data *mvif; 2178 + int link_id; 2179 + 2180 + if (vif->type != NL80211_IFTYPE_STATION || !vif->cfg.assoc) 2181 + return; 2182 + 2183 + mlink = (struct mt76_vif_link *)vif->drv_priv; 2184 + mvif = mlink->mvif; 2185 + 2186 + if (!ieee80211_vif_is_mld(vif)) { 2187 + if (mt76_vif_link_phy(mlink) == data->phy) { 2188 + if (!data->offchannel && mlink->beacon_mon_interval) 2189 + WRITE_ONCE(mlink->beacon_mon_last, jiffies); 2190 + mt76_offchannel_send_nullfunc(data, vif, -1); 2191 + } 2192 + return; 2193 + } 2194 + 2195 + for (link_id = 0; link_id < IEEE80211_MLD_MAX_NUM_LINKS; link_id++) { 2196 + if (link_id == mvif->deflink_id) 2197 + mlink = (struct mt76_vif_link *)vif->drv_priv; 2198 + else 2199 + mlink = rcu_dereference(mvif->link[link_id]); 2200 + if (!mlink) 2201 + continue; 2202 + if (mt76_vif_link_phy(mlink) != data->phy) 2203 + continue; 2204 + 2205 + if (!data->offchannel && mlink->beacon_mon_interval) 2206 + WRITE_ONCE(mlink->beacon_mon_last, jiffies); 2207 + 2208 + mt76_offchannel_send_nullfunc(data, vif, link_id); 2209 + } 2210 + } 2211 + 2212 + void mt76_offchannel_notify(struct mt76_phy *phy, bool offchannel) 2213 + { 2214 + struct mt76_offchannel_cb_data data = { 2215 + .phy = phy, 2216 + .offchannel = offchannel, 2217 + }; 2218 + 2219 + if (!phy->num_sta) 2220 + return; 2221 + 2222 + local_bh_disable(); 2223 + ieee80211_iterate_active_interfaces_atomic(phy->hw, 2224 + IEEE80211_IFACE_ITER_NORMAL, 2225 + mt76_offchannel_notify_iter, &data); 2226 + local_bh_enable(); 2227 + } 2228 + EXPORT_SYMBOL_GPL(mt76_offchannel_notify); 2229 + 2230 + struct mt76_rx_beacon_data { 2231 + struct mt76_phy *phy; 2232 + const u8 *bssid; 2233 + }; 2234 + 2235 + static void mt76_rx_beacon_iter(void *_data, u8 *mac, 2236 + struct ieee80211_vif *vif) 2237 + { 2238 + struct mt76_rx_beacon_data *data = _data; 2239 + struct mt76_vif_link *mlink = (struct mt76_vif_link *)vif->drv_priv; 2240 + struct mt76_vif_data *mvif = mlink->mvif; 2241 + int link_id; 2242 + 2243 + if (vif->type != NL80211_IFTYPE_STATION || !vif->cfg.assoc) 2244 + return; 2245 + 2246 + for (link_id = 0; link_id < IEEE80211_MLD_MAX_NUM_LINKS; link_id++) { 2247 + struct ieee80211_bss_conf *link_conf; 2248 + 2249 + if (link_id == mvif->deflink_id) 2250 + mlink = (struct mt76_vif_link *)vif->drv_priv; 2251 + else 2252 + mlink = rcu_dereference(mvif->link[link_id]); 2253 + if (!mlink || !mlink->beacon_mon_interval) 2254 + continue; 2255 + 2256 + if (mt76_vif_link_phy(mlink) != data->phy) 2257 + continue; 2258 + 2259 + link_conf = rcu_dereference(vif->link_conf[link_id]); 2260 + if (!link_conf) 2261 + continue; 2262 + 2263 + if (!ether_addr_equal(link_conf->bssid, data->bssid) && 2264 + (!link_conf->nontransmitted || 2265 + !ether_addr_equal(link_conf->transmitter_bssid, 2266 + data->bssid))) 2267 + continue; 2268 + 2269 + WRITE_ONCE(mlink->beacon_mon_last, jiffies); 2270 + } 2271 + } 2272 + 2273 + void mt76_rx_beacon(struct mt76_phy *phy, struct sk_buff *skb) 2274 + { 2275 + struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb; 2276 + struct ieee80211_hdr *hdr = mt76_skb_get_hdr(skb); 2277 + struct mt76_rx_beacon_data data = { 2278 + .phy = phy, 2279 + .bssid = hdr->addr3, 2280 + }; 2281 + 2282 + mt76_scan_rx_beacon(phy->dev, phy->chandef.chan); 2283 + 2284 + if (!phy->num_sta) 2285 + return; 2286 + 2287 + if (status->flag & (RX_FLAG_FAILED_FCS_CRC | RX_FLAG_ONLY_MONITOR)) 2288 + return; 2289 + 2290 + ieee80211_iterate_active_interfaces_atomic(phy->hw, 2291 + IEEE80211_IFACE_ITER_RESUME_ALL, 2292 + mt76_rx_beacon_iter, &data); 2293 + } 2294 + EXPORT_SYMBOL_GPL(mt76_rx_beacon); 2295 + 2296 + static void mt76_beacon_mon_iter(void *data, u8 *mac, 2297 + struct ieee80211_vif *vif) 2298 + { 2299 + struct mt76_phy *phy = data; 2300 + struct mt76_vif_link *mlink = (struct mt76_vif_link *)vif->drv_priv; 2301 + struct mt76_vif_data *mvif = mlink->mvif; 2302 + int link_id; 2303 + 2304 + if (vif->type != NL80211_IFTYPE_STATION || !vif->cfg.assoc) 2305 + return; 2306 + 2307 + for (link_id = 0; link_id < IEEE80211_MLD_MAX_NUM_LINKS; link_id++) { 2308 + if (link_id == mvif->deflink_id) 2309 + mlink = (struct mt76_vif_link *)vif->drv_priv; 2310 + else 2311 + mlink = rcu_dereference(mvif->link[link_id]); 2312 + if (!mlink || !mlink->beacon_mon_interval) 2313 + continue; 2314 + 2315 + if (mt76_vif_link_phy(mlink) != phy) 2316 + continue; 2317 + 2318 + if (time_after(jiffies, 2319 + READ_ONCE(mlink->beacon_mon_last) + 2320 + MT76_BEACON_MON_MAX_MISS * mlink->beacon_mon_interval)) 2321 + ieee80211_beacon_loss(vif); 2322 + } 2323 + } 2324 + 2325 + void mt76_beacon_mon_check(struct mt76_phy *phy) 2326 + { 2327 + if (phy->offchannel) 2328 + return; 2329 + 2330 + ieee80211_iterate_active_interfaces_atomic(phy->hw, 2331 + IEEE80211_IFACE_ITER_RESUME_ALL, 2332 + mt76_beacon_mon_iter, phy); 2333 + } 2334 + EXPORT_SYMBOL_GPL(mt76_beacon_mon_check);
+1 -1
drivers/net/wireless/mediatek/mt76/mcu.c
··· 98 98 /* orig skb might be needed for retry, mcu_skb_send_msg consumes it */ 99 99 if (orig_skb) 100 100 skb_get(orig_skb); 101 - ret = dev->mcu_ops->mcu_skb_send_msg(dev, skb, cmd, &seq); 101 + ret = dev->mcu_ops->mcu_skb_send_msg(dev, skb, cmd, wait_resp ? &seq : NULL); 102 102 if (ret < 0) 103 103 goto out; 104 104
+46 -1
drivers/net/wireless/mediatek/mt76/mt76.h
··· 55 55 FIELD_PREP(MT_QFLAG_WED_RING, _n)) 56 56 #define MT_NPU_Q_TX(_n) __MT_NPU_Q(MT76_WED_Q_TX, _n) 57 57 #define MT_NPU_Q_RX(_n) __MT_NPU_Q(MT76_WED_Q_RX, _n) 58 + #define MT_NPU_Q_TXFREE(_n) (FIELD_PREP(MT_QFLAG_WED_TYPE, MT76_WED_Q_TXFREE) | \ 59 + FIELD_PREP(MT_QFLAG_WED_RING, _n)) 58 60 59 61 struct mt76_dev; 60 62 struct mt76_phy; ··· 364 362 }; 365 363 366 364 #define MT76_N_WCIDS 1088 365 + #define MT76_BEACON_MON_MAX_MISS 7 367 366 368 367 /* stored in ieee80211_tx_info::hw_queue */ 369 368 #define MT_TX_HW_QUEUE_PHY GENMASK(3, 2) ··· 451 448 }; 452 449 453 450 u8 qid; 451 + u8 phy_idx; 454 452 }; 455 453 456 454 struct mt76_rx_tid { ··· 544 540 u32 survey_flags; 545 541 u16 txwi_size; 546 542 u16 token_size; 547 - u8 mcs_rates; 548 543 549 544 unsigned int link_data_size; 550 545 ··· 834 831 u8 mcast_rates_idx; 835 832 u8 beacon_rates_idx; 836 833 bool offchannel; 834 + unsigned long beacon_mon_last; 835 + u16 beacon_mon_interval; 837 836 struct ieee80211_chanctx_conf *ctx; 838 837 struct mt76_wcid *wcid; 839 838 struct mt76_vif_data *mvif; ··· 863 858 spinlock_t tx_lock; 864 859 struct list_head tx_list; 865 860 struct mt76_queue *q_tx[__MT_TXQ_MAX]; 861 + 862 + atomic_t mgmt_tx_pending; 866 863 867 864 struct cfg80211_chan_def chandef; 868 865 struct cfg80211_chan_def main_chandef; ··· 1009 1002 u32 rxfilter; 1010 1003 1011 1004 struct delayed_work scan_work; 1005 + spinlock_t scan_lock; 1012 1006 struct { 1013 1007 struct cfg80211_scan_request *req; 1014 1008 struct ieee80211_channel *chan; ··· 1017 1009 struct mt76_vif_link *mlink; 1018 1010 struct mt76_phy *phy; 1019 1011 int chan_idx; 1012 + bool beacon_wait; 1013 + bool beacon_received; 1020 1014 } scan; 1021 1015 1022 1016 #ifdef CONFIG_NL80211_TESTMODE ··· 1528 1518 void mt76_tx_check_agg_ssn(struct ieee80211_sta *sta, struct sk_buff *skb); 1529 1519 void mt76_txq_schedule(struct mt76_phy *phy, enum mt76_txq_id qid); 1530 1520 void mt76_txq_schedule_all(struct mt76_phy *phy); 1521 + void mt76_txq_schedule_pending(struct mt76_phy *phy); 1531 1522 void mt76_tx_worker_run(struct mt76_dev *dev); 1532 1523 void mt76_tx_worker(struct mt76_worker *w); 1533 1524 void mt76_release_buffered_frames(struct ieee80211_hw *hw, ··· 1607 1596 int mt76_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 1608 1597 struct ieee80211_scan_request *hw_req); 1609 1598 void mt76_cancel_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif); 1599 + void mt76_scan_rx_beacon(struct mt76_dev *dev, struct ieee80211_channel *chan); 1600 + void mt76_rx_beacon(struct mt76_phy *phy, struct sk_buff *skb); 1601 + void mt76_beacon_mon_check(struct mt76_phy *phy); 1610 1602 void mt76_sw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 1611 1603 const u8 *mac); 1612 1604 void mt76_sw_scan_complete(struct ieee80211_hw *hw, ··· 1663 1649 int mt76_npu_net_setup_tc(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 1664 1650 struct net_device *dev, enum tc_setup_type type, 1665 1651 void *type_data); 1652 + int mt76_npu_send_txrx_addr(struct mt76_dev *dev, int ifindex, 1653 + u32 direction, u32 i_count_addr, 1654 + u32 o_status_addr, u32 o_count_addr); 1666 1655 #else 1667 1656 static inline void mt76_npu_check_ppe(struct mt76_dev *dev, 1668 1657 struct sk_buff *skb, u32 info) ··· 1721 1704 struct net_device *dev, 1722 1705 enum tc_setup_type type, 1723 1706 void *type_data) 1707 + { 1708 + return -EOPNOTSUPP; 1709 + } 1710 + 1711 + static inline int mt76_npu_send_txrx_addr(struct mt76_dev *dev, int ifindex, 1712 + u32 direction, u32 i_count_addr, 1713 + u32 o_status_addr, u32 o_count_addr) 1724 1714 { 1725 1715 return -EOPNOTSUPP; 1726 1716 } ··· 1799 1775 struct mt76_queue_entry *e); 1800 1776 int __mt76_set_channel(struct mt76_phy *phy, struct cfg80211_chan_def *chandef, 1801 1777 bool offchannel); 1778 + 1779 + static inline bool 1780 + mt76_offchannel_chandef(struct mt76_phy *phy, struct ieee80211_channel *chan, 1781 + struct cfg80211_chan_def *chandef) 1782 + { 1783 + cfg80211_chandef_create(chandef, chan, NL80211_CHAN_HT20); 1784 + if (phy->main_chandef.chan != chan) 1785 + return true; 1786 + 1787 + *chandef = phy->main_chandef; 1788 + return false; 1789 + } 1802 1790 int mt76_set_channel(struct mt76_phy *phy, struct cfg80211_chan_def *chandef, 1803 1791 bool offchannel); 1804 1792 void mt76_scan_work(struct work_struct *work); ··· 1822 1786 struct ieee80211_vif *vif); 1823 1787 void mt76_put_vif_phy_link(struct mt76_phy *phy, struct ieee80211_vif *vif, 1824 1788 struct mt76_vif_link *mlink); 1789 + void mt76_offchannel_notify(struct mt76_phy *phy, bool offchannel); 1825 1790 1826 1791 /* usb */ 1827 1792 static inline bool mt76u_urb_error(struct urb *urb) ··· 2028 1991 { 2029 1992 return mt76_queue_is_npu(q) && 2030 1993 FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) == MT76_WED_Q_RX; 1994 + } 1995 + 1996 + static inline bool mt76_queue_is_npu_txfree(struct mt76_queue *q) 1997 + { 1998 + if (q->flags & MT_QFLAG_WED) 1999 + return false; 2000 + 2001 + return FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) == MT76_WED_Q_TXFREE; 2031 2002 } 2032 2003 2033 2004 struct mt76_txwi_cache *
-15
drivers/net/wireless/mediatek/mt76/mt7615/mac.c
··· 1167 1167 } 1168 1168 EXPORT_SYMBOL_GPL(mt7615_mac_set_rates); 1169 1169 1170 - void mt7615_mac_enable_rtscts(struct mt7615_dev *dev, 1171 - struct ieee80211_vif *vif, bool enable) 1172 - { 1173 - struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv; 1174 - u32 addr; 1175 - 1176 - addr = mt7615_mac_wtbl_addr(dev, mvif->sta.wcid.idx) + 3 * 4; 1177 - 1178 - if (enable) 1179 - mt76_set(dev, addr, MT_WTBL_W3_RTS); 1180 - else 1181 - mt76_clear(dev, addr, MT_WTBL_W3_RTS); 1182 - } 1183 - EXPORT_SYMBOL_GPL(mt7615_mac_enable_rtscts); 1184 - 1185 1170 static int 1186 1171 mt7615_mac_wtbl_update_key(struct mt7615_dev *dev, struct mt76_wcid *wcid, 1187 1172 struct ieee80211_key_conf *key,
+4 -3
drivers/net/wireless/mediatek/mt76/mt7615/main.c
··· 583 583 } 584 584 } 585 585 586 - if (changed & BSS_CHANGED_ERP_CTS_PROT) 587 - mt7615_mac_enable_rtscts(dev, vif, info->use_cts_prot); 588 - 589 586 if (changed & BSS_CHANGED_BEACON_ENABLED && info->enable_beacon) { 590 587 mt7615_mcu_add_bss_info(phy, vif, NULL, true); 591 588 mt7615_mcu_sta_add(phy, vif, NULL, true); ··· 594 597 if (changed & (BSS_CHANGED_BEACON | 595 598 BSS_CHANGED_BEACON_ENABLED)) 596 599 mt7615_mcu_add_beacon(dev, hw, vif, info->enable_beacon); 600 + 601 + if (changed & BSS_CHANGED_HT || changed & BSS_CHANGED_ERP_CTS_PROT) 602 + mt7615_mcu_set_protection(phy, vif, info->ht_operation_mode, 603 + info->use_cts_prot); 597 604 598 605 if (changed & BSS_CHANGED_PS) 599 606 mt76_connac_mcu_set_vif_ps(&dev->mt76, vif);
+47
drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
··· 2564 2564 return mt76_mcu_send_msg(&dev->mt76, MCU_CE_CMD(SET_ROC), 2565 2565 &req, sizeof(req), false); 2566 2566 } 2567 + 2568 + int mt7615_mcu_set_protection(struct mt7615_phy *phy, struct ieee80211_vif *vif, 2569 + u8 ht_mode, bool use_cts_prot) 2570 + { 2571 + struct mt7615_dev *dev = phy->dev; 2572 + struct { 2573 + u8 prot_idx; 2574 + u8 band; 2575 + u8 rsv[2]; 2576 + 2577 + bool long_nav; 2578 + bool prot_mm; 2579 + bool prot_gf; 2580 + bool prot_bw40; 2581 + bool prot_rifs; 2582 + bool prot_bw80; 2583 + bool prot_bw160; 2584 + u8 prot_erp_mask; 2585 + } __packed req = { 2586 + .prot_idx = 0x2, 2587 + .band = phy != &dev->phy, 2588 + }; 2589 + 2590 + switch (ht_mode & IEEE80211_HT_OP_MODE_PROTECTION) { 2591 + case IEEE80211_HT_OP_MODE_PROTECTION_NONMEMBER: 2592 + case IEEE80211_HT_OP_MODE_PROTECTION_NONHT_MIXED: 2593 + req.prot_mm = true; 2594 + req.prot_gf = true; 2595 + fallthrough; 2596 + case IEEE80211_HT_OP_MODE_PROTECTION_20MHZ: 2597 + req.prot_bw40 = true; 2598 + break; 2599 + } 2600 + 2601 + if (ht_mode & IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT) 2602 + req.prot_gf = true; 2603 + 2604 + if (use_cts_prot) { 2605 + struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv; 2606 + u8 i = mvif->mt76.omac_idx > HW_BSSID_MAX ? HW_BSSID_0 : mvif->mt76.omac_idx; 2607 + 2608 + req.prot_erp_mask = BIT(i); 2609 + } 2610 + 2611 + return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(PROTECT_CTRL), &req, 2612 + sizeof(req), true); 2613 + }
+2 -3
drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h
··· 467 467 void mt7615_mac_cca_stats_reset(struct mt7615_phy *phy); 468 468 void mt7615_mac_set_scs(struct mt7615_phy *phy, bool enable); 469 469 void mt7615_mac_enable_nf(struct mt7615_dev *dev, bool ext_phy); 470 - void mt7615_mac_enable_rtscts(struct mt7615_dev *dev, 471 - struct ieee80211_vif *vif, bool enable); 472 470 void mt7615_mac_sta_poll(struct mt7615_dev *dev); 473 471 int mt7615_mac_write_txwi(struct mt7615_dev *dev, __le32 *txwi, 474 472 struct sk_buff *skb, struct mt76_wcid *wcid, ··· 521 523 int mt7615_mcu_apply_rx_dcoc(struct mt7615_phy *phy); 522 524 int mt7615_mcu_apply_tx_dpd(struct mt7615_phy *phy); 523 525 int mt7615_dfs_init_radar_detector(struct mt7615_phy *phy); 524 - 526 + int mt7615_mcu_set_protection(struct mt7615_phy *phy, struct ieee80211_vif *vif, 527 + u8 ht_mode, bool use_cts_prot); 525 528 int mt7615_mcu_set_roc(struct mt7615_phy *phy, struct ieee80211_vif *vif, 526 529 struct ieee80211_channel *chan, int duration); 527 530
-2
drivers/net/wireless/mediatek/mt76/mt7615/regs.h
··· 455 455 #define MT_WTBL_RIUCR3_RATE6 GENMASK(19, 8) 456 456 #define MT_WTBL_RIUCR3_RATE7 GENMASK(31, 20) 457 457 458 - #define MT_WTBL_W3_RTS BIT(22) 459 - 460 458 #define MT_WTBL_W5_CHANGE_BW_RATE GENMASK(7, 5) 461 459 #define MT_WTBL_W5_SHORT_GI_20 BIT(8) 462 460 #define MT_WTBL_W5_SHORT_GI_40 BIT(9)
+9 -2
drivers/net/wireless/mediatek/mt76/mt76_connac.h
··· 182 182 return mt76_chip(dev) == 0x7920; 183 183 } 184 184 185 + static inline bool is_mt7902(struct mt76_dev *dev) 186 + { 187 + return mt76_chip(dev) == 0x7902; 188 + } 189 + 185 190 static inline bool is_mt7922(struct mt76_dev *dev) 186 191 { 187 192 return mt76_chip(dev) == 0x7922; 188 193 } 189 194 190 - static inline bool is_mt7921(struct mt76_dev *dev) 195 + static inline bool is_connac2(struct mt76_dev *dev) 191 196 { 192 - return mt76_chip(dev) == 0x7961 || is_mt7922(dev) || is_mt7920(dev); 197 + return mt76_chip(dev) == 0x7961 || is_mt7922(dev) || is_mt7920(dev) || 198 + is_mt7902(dev); 193 199 } 194 200 195 201 static inline bool is_mt7663(struct mt76_dev *dev) ··· 277 271 case 0x7961: 278 272 case 0x7920: 279 273 case 0x7922: 274 + case 0x7902: 280 275 case 0x7925: 281 276 case 0x7663: 282 277 case 0x7622:
+18 -10
drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c
··· 173 173 174 174 txp->msdu_id[0] = cpu_to_le16(id | MT_MSDU_ID_VALID); 175 175 176 - if (is_mt7663(dev) || is_mt7921(dev) || is_mt7925(dev)) 176 + if (is_mt7663(dev) || is_connac2(dev) || is_mt7925(dev)) 177 177 last_mask = MT_TXD_LEN_LAST; 178 178 else 179 179 last_mask = MT_TXD_LEN_AMSDU_LAST | ··· 217 217 u32 last_mask; 218 218 int i; 219 219 220 - if (is_mt7663(dev) || is_mt7921(dev) || is_mt7925(dev)) 220 + if (is_mt7663(dev) || is_connac2(dev) || is_mt7925(dev)) 221 221 last_mask = MT_TXD_LEN_LAST; 222 222 else 223 223 last_mask = MT_TXD_LEN_MSDU_LAST; ··· 309 309 chandef = mvif->ctx ? &mvif->ctx->def : &mphy->chandef; 310 310 band = chandef->chan->band; 311 311 312 - if (is_mt7921(mphy->dev)) { 312 + if (is_connac2(mphy->dev)) { 313 313 rateidx = ffs(conf->basic_rates) - 1; 314 314 goto legacy; 315 315 } ··· 548 548 val = MT_TXD1_LONG_FORMAT | 549 549 FIELD_PREP(MT_TXD1_WLAN_IDX, wcid->idx) | 550 550 FIELD_PREP(MT_TXD1_OWN_MAC, omac_idx); 551 - if (!is_mt7921(dev)) 551 + if (!is_connac2(dev)) 552 552 val |= MT_TXD1_VTA; 553 553 if (phy_idx || band_idx) 554 554 val |= MT_TXD1_TGID; ··· 557 557 txwi[2] = 0; 558 558 559 559 val = FIELD_PREP(MT_TXD3_REM_TX_COUNT, 15); 560 - if (!is_mt7921(dev)) 560 + if (!is_connac2(dev)) 561 561 val |= MT_TXD3_SW_POWER_MGMT; 562 562 if (key) 563 563 val |= MT_TXD3_PROTECT_FRAME; ··· 599 599 txwi[6] |= cpu_to_le32(val); 600 600 txwi[3] |= cpu_to_le32(MT_TXD3_BA_DISABLE); 601 601 602 - if (!is_mt7921(dev)) { 602 + if (!is_connac2(dev)) { 603 603 u8 spe_idx = mt76_connac_spe_idx(mphy->antenna_mask); 604 604 605 605 if (!spe_idx) ··· 831 831 }; 832 832 struct ieee80211_radiotap_he_mu *he_mu; 833 833 834 - if (is_mt7921(dev)) { 834 + if (is_connac2(dev)) { 835 835 mu_known.flags1 |= HE_BITS(MU_FLAGS1_SIG_B_COMP_KNOWN); 836 836 mu_known.flags2 |= HE_BITS(MU_FLAGS2_PUNC_FROM_SIG_A_BW_KNOWN); 837 837 } ··· 1047 1047 stbc = FIELD_GET(MT_PRXV_HT_STBC, v0); 1048 1048 gi = FIELD_GET(MT_PRXV_HT_SGI, v0); 1049 1049 *mode = FIELD_GET(MT_PRXV_TX_MODE, v0); 1050 - if (is_mt7921(dev)) 1050 + if (is_connac2(dev)) 1051 1051 dcm = !!(idx & MT_PRXV_TX_DCM); 1052 1052 else 1053 1053 dcm = FIELD_GET(MT_PRXV_DCM, v0); ··· 1153 1153 return; 1154 1154 1155 1155 wcid = (struct mt76_wcid *)sta->drv_priv; 1156 - if (!test_and_set_bit(tid, &wcid->ampdu_state)) 1157 - ieee80211_start_tx_ba_session(sta, tid, 0); 1156 + if (!test_and_set_bit(tid, &wcid->ampdu_state)) { 1157 + if (ieee80211_start_tx_ba_session(sta, tid, 0)) 1158 + clear_bit(tid, &wcid->ampdu_state); 1159 + } 1158 1160 } 1159 1161 EXPORT_SYMBOL_GPL(mt76_connac2_tx_check_aggr); 1160 1162 ··· 1209 1207 } 1210 1208 spin_unlock_bh(&dev->token_lock); 1211 1209 idr_destroy(&dev->token); 1210 + 1211 + for (id = 0; id < __MT_MAX_BAND; id++) { 1212 + struct mt76_phy *phy = dev->phys[id]; 1213 + if (phy) 1214 + atomic_set(&phy->mgmt_tx_pending, 0); 1215 + } 1212 1216 } 1213 1217 EXPORT_SYMBOL_GPL(mt76_connac2_tx_token_put);
+29 -17
drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
··· 4 4 #include <linux/firmware.h> 5 5 #include "mt76_connac2_mac.h" 6 6 #include "mt76_connac_mcu.h" 7 + #include "mt792x_regs.h" 7 8 8 9 int mt76_connac_mcu_start_firmware(struct mt76_dev *dev, u32 addr, u32 option) 9 10 { ··· 66 65 int cmd; 67 66 68 67 if ((!is_connac_v1(dev) && addr == MCU_PATCH_ADDRESS) || 69 - (is_mt7921(dev) && addr == 0x900000) || 68 + (is_connac2(dev) && addr == 0x900000) || 70 69 (is_mt7925(dev) && (addr == 0x900000 || addr == 0xe0002800)) || 71 70 (is_mt799x(dev) && addr == 0x900000)) 72 71 cmd = MCU_CMD(PATCH_START_REQ); ··· 403 402 switch (vif->type) { 404 403 case NL80211_IFTYPE_MESH_POINT: 405 404 case NL80211_IFTYPE_AP: 406 - if (vif->p2p && !is_mt7921(dev)) 405 + if (vif->p2p && !is_connac2(dev)) 407 406 conn_type = CONNECTION_P2P_GC; 408 407 else 409 408 conn_type = CONNECTION_INFRA_STA; ··· 411 410 basic->aid = cpu_to_le16(link_sta->sta->aid); 412 411 break; 413 412 case NL80211_IFTYPE_STATION: 414 - if (vif->p2p && !is_mt7921(dev)) 413 + if (vif->p2p && !is_connac2(dev)) 415 414 conn_type = CONNECTION_P2P_GO; 416 415 else 417 416 conn_type = CONNECTION_INFRA_AP; ··· 875 874 struct sta_rec_vht *vht; 876 875 int len; 877 876 878 - len = is_mt7921(dev) ? sizeof(*vht) : sizeof(*vht) - 4; 877 + len = is_connac2(dev) ? sizeof(*vht) : sizeof(*vht) - 4; 879 878 tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_VHT, len); 880 879 vht = (struct sta_rec_vht *)tlv; 881 880 vht->vht_cap = cpu_to_le32(sta->deflink.vht_cap.cap); ··· 886 885 /* starec uapsd */ 887 886 mt76_connac_mcu_sta_uapsd(skb, vif, sta); 888 887 889 - if (!is_mt7921(dev)) 888 + if (!is_connac2(dev)) 890 889 return; 891 890 892 891 if (sta->deflink.ht_cap.ht_supported || sta->deflink.he_cap.has_he) ··· 1296 1295 wtbl_hdr); 1297 1296 1298 1297 ret = mt76_connac_mcu_sta_wed_update(dev, skb); 1299 - if (ret) 1298 + if (ret) { 1299 + dev_kfree_skb(skb); 1300 1300 return ret; 1301 + } 1301 1302 1302 1303 ret = mt76_mcu_skb_send_msg(dev, skb, cmd, true); 1303 1304 if (ret) ··· 1312 1309 mt76_connac_mcu_sta_ba_tlv(skb, params, enable, tx); 1313 1310 1314 1311 ret = mt76_connac_mcu_sta_wed_update(dev, skb); 1315 - if (ret) 1312 + if (ret) { 1313 + dev_kfree_skb(skb); 1316 1314 return ret; 1315 + } 1317 1316 1318 1317 return mt76_mcu_skb_send_msg(dev, skb, cmd, true); 1319 1318 } ··· 1779 1774 req->ssid_type_ext = n_ssids ? BIT(0) : 0; 1780 1775 req->ssids_num = n_ssids; 1781 1776 1782 - duration = is_mt7921(phy->dev) ? 0 : MT76_CONNAC_SCAN_CHANNEL_TIME; 1777 + duration = is_connac2(phy->dev) ? 0 : MT76_CONNAC_SCAN_CHANNEL_TIME; 1783 1778 /* increase channel time for passive scan */ 1784 1779 if (!sreq->n_ssids) 1785 1780 duration *= 2; ··· 1822 1817 req->ies_len = cpu_to_le16(sreq->ie_len); 1823 1818 } 1824 1819 1825 - if (is_mt7921(phy->dev)) 1820 + if (is_connac2(phy->dev)) 1826 1821 req->scan_func |= SCAN_FUNC_SPLIT_SCAN; 1827 1822 1828 1823 memcpy(req->bssid, sreq->bssid, ETH_ALEN); ··· 1898 1893 get_random_mask_addr(addr, sreq->mac_addr, 1899 1894 sreq->mac_addr_mask); 1900 1895 } 1901 - if (is_mt7921(phy->dev)) { 1896 + if (is_connac2(phy->dev)) { 1902 1897 req->mt7921.bss_idx = mvif->idx; 1903 1898 req->mt7921.delay = cpu_to_le32(sreq->delay); 1904 1899 } ··· 2038 2033 struct mt76_power_limits *limits, 2039 2034 enum nl80211_band band) 2040 2035 { 2041 - int max_power = is_mt7921(dev) ? 127 : 63; 2036 + int max_power = is_connac2(dev) ? 127 : 63; 2042 2037 int i, offset = sizeof(limits->cck); 2043 2038 2044 2039 memset(sku, max_power, MT_SKU_POWER_LIMIT); ··· 2066 2061 offset += 12; 2067 2062 } 2068 2063 2069 - if (!is_mt7921(dev)) 2064 + if (!is_connac2(dev)) 2070 2065 return; 2071 2066 2072 2067 /* he */ ··· 2122 2117 enum nl80211_band band) 2123 2118 { 2124 2119 struct mt76_dev *dev = phy->dev; 2125 - int sku_len, batch_len = is_mt7921(dev) ? 8 : 16; 2120 + int sku_len, batch_len = is_connac2(dev) ? 8 : 16; 2126 2121 static const u8 chan_list_2ghz[] = { 2127 2122 1, 2, 3, 4, 5, 6, 7, 2128 2123 8, 9, 10, 11, 12, 13, 14 ··· 2163 2158 if (!limits) 2164 2159 return -ENOMEM; 2165 2160 2166 - sku_len = is_mt7921(dev) ? sizeof(sku_tlbv) : sizeof(sku_tlbv) - 92; 2161 + sku_len = is_connac2(dev) ? sizeof(sku_tlbv) : sizeof(sku_tlbv) - 92; 2167 2162 tx_power = 2 * phy->hw->conf.power_level; 2168 2163 if (!tx_power) 2169 2164 tx_power = 127; ··· 2247 2242 false); 2248 2243 if (err < 0) 2249 2244 goto out; 2245 + 2246 + /* read a CR to avoid PSE buffer underflow */ 2247 + mt76_connac_mcu_reg_rr(dev, MT_PSE_BASE); 2250 2248 } 2251 2249 2252 2250 out: ··· 2772 2764 return PTR_ERR(skb); 2773 2765 2774 2766 ret = mt76_connac_mcu_sta_key_tlv(sta_key_conf, skb, key, cmd); 2775 - if (ret) 2767 + if (ret) { 2768 + dev_kfree_skb(skb); 2776 2769 return ret; 2770 + } 2777 2771 2778 2772 ret = mt76_connac_mcu_sta_wed_update(dev, skb); 2779 - if (ret) 2773 + if (ret) { 2774 + dev_kfree_skb(skb); 2780 2775 return ret; 2776 + } 2781 2777 2782 2778 return mt76_mcu_skb_send_msg(dev, skb, mcu_cmd, true); 2783 2779 } ··· 3084 3072 { 3085 3073 u32 mode = DL_MODE_NEED_RSP; 3086 3074 3087 - if ((!is_mt7921(dev) && !is_mt7925(dev)) || info == PATCH_SEC_NOT_SUPPORT) 3075 + if ((!is_connac2(dev) && !is_mt7925(dev)) || info == PATCH_SEC_NOT_SUPPORT) 3088 3076 return mode; 3089 3077 3090 3078 switch (FIELD_GET(PATCH_SEC_ENC_TYPE_MASK, info)) {
+14 -1
drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h
··· 628 628 __le32 flag; 629 629 } __packed; 630 630 631 + struct sta_rec_eml_op { 632 + __le16 tag; 633 + __le16 len; 634 + u8 link_bitmap; 635 + u8 link_ant_num[3]; 636 + } __packed; 637 + 631 638 /* wtbl_rec */ 632 639 633 640 struct wtbl_req_hdr { ··· 803 796 sizeof(struct sta_rec_he_6g_capa) + \ 804 797 sizeof(struct sta_rec_pn_info) + \ 805 798 sizeof(struct sta_rec_tx_proc) + \ 799 + sizeof(struct sta_rec_eml_op) + \ 806 800 sizeof(struct tlv) + \ 807 801 MT76_CONNAC_WTBL_UPDATE_MAX_SIZE) 808 802 ··· 840 832 STA_REC_PN_INFO = 0x26, 841 833 STA_REC_KEY_V3 = 0x27, 842 834 STA_REC_HDRT = 0x28, 835 + STA_REC_EML_OP = 0x29, 843 836 STA_REC_HDR_TRANS = 0x2B, 844 837 STA_REC_MAX_NUM 845 838 }; ··· 1317 1308 MCU_UNI_CMD_PER_STA_INFO = 0x6d, 1318 1309 MCU_UNI_CMD_ALL_STA_INFO = 0x6e, 1319 1310 MCU_UNI_CMD_ASSERT_DUMP = 0x6f, 1311 + MCU_UNI_CMD_EXT_EEPROM_CTRL = 0x74, 1320 1312 MCU_UNI_CMD_RADIO_STATUS = 0x80, 1313 + MCU_UNI_CMD_MLD = 0x82, 1321 1314 MCU_UNI_CMD_SDO = 0x88, 1322 1315 }; 1323 1316 ··· 1374 1363 UNI_BSS_INFO_BASIC = 0, 1375 1364 UNI_BSS_INFO_RA = 1, 1376 1365 UNI_BSS_INFO_RLM = 2, 1366 + UNI_BSS_INFO_PROTECT_INFO = 3, 1377 1367 UNI_BSS_INFO_BSS_COLOR = 4, 1378 1368 UNI_BSS_INFO_HE_BASIC = 5, 1379 1369 UNI_BSS_INFO_11V_MBSSID = 6, ··· 1395 1383 UNI_BSS_INFO_MLD = 26, 1396 1384 UNI_BSS_INFO_PM_DISABLE = 27, 1397 1385 UNI_BSS_INFO_EHT = 30, 1386 + UNI_BSS_INFO_MLD_LINK_OP = 36, 1398 1387 }; 1399 1388 1400 1389 enum { ··· 1878 1865 1879 1866 ret |= feature_set & FW_FEATURE_SET_ENCRYPT ? 1880 1867 DL_MODE_ENCRYPT | DL_MODE_RESET_SEC_IV : 0; 1881 - if (is_mt7921(dev) || is_mt7925(dev)) 1868 + if (is_connac2(dev) || is_mt7925(dev)) 1882 1869 ret |= feature_set & FW_FEATURE_ENCRY_MODE ? 1883 1870 DL_CONFIG_ENCRY_MODE_SEL : 0; 1884 1871 ret |= FIELD_PREP(DL_MODE_KEY_IDX,
+1
drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c
··· 534 534 return; 535 535 536 536 clear_bit(MT76_RESTART, &dev->mphy.state); 537 + ieee80211_wake_queues(hw); 537 538 } 538 539 EXPORT_SYMBOL_GPL(mt76x02_reconfig_complete); 539 540
+1
drivers/net/wireless/mediatek/mt76/mt7915/init.c
··· 1294 1294 1295 1295 void mt7915_unregister_device(struct mt7915_dev *dev) 1296 1296 { 1297 + cancel_work_sync(&dev->dump_work); 1297 1298 mt7915_unregister_ext_phy(dev); 1298 1299 mt7915_coredump_unregister(dev); 1299 1300 mt7915_unregister_thermal(&dev->phy);
-13
drivers/net/wireless/mediatek/mt76/mt7915/mac.c
··· 232 232 rcu_read_unlock(); 233 233 } 234 234 235 - void mt7915_mac_enable_rtscts(struct mt7915_dev *dev, 236 - struct ieee80211_vif *vif, bool enable) 237 - { 238 - struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv; 239 - u32 addr; 240 - 241 - addr = mt7915_mac_wtbl_lmac_addr(dev, mvif->sta.wcid.idx, 5); 242 - if (enable) 243 - mt76_set(dev, addr, BIT(5)); 244 - else 245 - mt76_clear(dev, addr, BIT(5)); 246 - } 247 - 248 235 static void 249 236 mt7915_wed_check_ppe(struct mt7915_dev *dev, struct mt76_queue *q, 250 237 struct mt7915_sta *msta, struct sk_buff *skb,
+6 -3
drivers/net/wireless/mediatek/mt76/mt7915/main.c
··· 68 68 if (ret) 69 69 goto out; 70 70 71 - ret = mt76_connac_mcu_set_rts_thresh(&dev->mt76, 0x92b, 71 + ret = mt76_connac_mcu_set_rts_thresh(&dev->mt76, MT7915_RTS_LEN_THRES, 72 72 phy->mt76->band_idx); 73 73 if (ret) 74 74 goto out; ··· 633 633 if (set_sta == 1) 634 634 mt7915_mcu_add_sta(dev, vif, NULL, CONN_STATE_PORT_SECURE, false); 635 635 636 - if (changed & BSS_CHANGED_ERP_CTS_PROT) 637 - mt7915_mac_enable_rtscts(dev, vif, info->use_cts_prot); 636 + if (changed & BSS_CHANGED_HT || changed & BSS_CHANGED_ERP_CTS_PROT) 637 + mt7915_mcu_set_protection(phy, vif, info->ht_operation_mode, 638 + info->use_cts_prot); 638 639 639 640 if (changed & BSS_CHANGED_ERP_SLOT) { 640 641 int slottime = 9; ··· 852 851 return mt7915_mcu_add_sta(dev, vif, sta, CONN_STATE_PORT_SECURE, false); 853 852 854 853 case MT76_STA_EVENT_DISASSOC: 854 + mutex_lock(&dev->mt76.mutex); 855 855 for (i = 0; i < ARRAY_SIZE(msta->twt.flow); i++) 856 856 mt7915_mac_twt_teardown_flow(dev, msta, i); 857 + mutex_unlock(&dev->mt76.mutex); 857 858 858 859 mt7915_mcu_add_sta(dev, vif, sta, CONN_STATE_DISCONNECT, false); 859 860 msta->wcid.sta_disabled = 1;
+65 -1
drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
··· 1765 1765 } 1766 1766 out: 1767 1767 ret = mt76_connac_mcu_sta_wed_update(&dev->mt76, skb); 1768 - if (ret) 1768 + if (ret) { 1769 + dev_kfree_skb(skb); 1769 1770 return ret; 1771 + } 1770 1772 1771 1773 return mt76_mcu_skb_send_msg(&dev->mt76, skb, 1772 1774 MCU_EXT_CMD(STA_REC_UPDATE), true); ··· 3954 3952 dev_kfree_skb(skb); 3955 3953 3956 3954 return ret; 3955 + } 3956 + 3957 + int mt7915_mcu_set_protection(struct mt7915_phy *phy, struct ieee80211_vif *vif, 3958 + u8 ht_mode, bool use_cts_prot) 3959 + { 3960 + struct mt7915_dev *dev = phy->dev; 3961 + int len = sizeof(struct sta_req_hdr) + sizeof(struct bss_info_prot); 3962 + struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv; 3963 + struct bss_info_prot *prot; 3964 + struct sk_buff *skb; 3965 + struct tlv *tlv; 3966 + enum { 3967 + PROT_NONMEMBER = BIT(1), 3968 + PROT_20MHZ = BIT(2), 3969 + PROT_NONHT_MIXED = BIT(3), 3970 + PROT_LEGACY_ERP = BIT(5), 3971 + PROT_NONGF_STA = BIT(7), 3972 + }; 3973 + u32 rts_threshold; 3974 + 3975 + skb = __mt76_connac_mcu_alloc_sta_req(&dev->mt76, &mvif->mt76, 3976 + NULL, len); 3977 + if (IS_ERR(skb)) 3978 + return PTR_ERR(skb); 3979 + 3980 + tlv = mt76_connac_mcu_add_tlv(skb, BSS_INFO_PROTECT_INFO, 3981 + sizeof(*prot)); 3982 + prot = (struct bss_info_prot *)tlv; 3983 + 3984 + switch (ht_mode & IEEE80211_HT_OP_MODE_PROTECTION) { 3985 + case IEEE80211_HT_OP_MODE_PROTECTION_NONMEMBER: 3986 + prot->prot_mode = cpu_to_le32(PROT_NONMEMBER); 3987 + break; 3988 + case IEEE80211_HT_OP_MODE_PROTECTION_20MHZ: 3989 + prot->prot_mode = cpu_to_le32(PROT_20MHZ); 3990 + break; 3991 + case IEEE80211_HT_OP_MODE_PROTECTION_NONHT_MIXED: 3992 + prot->prot_mode = cpu_to_le32(PROT_NONHT_MIXED); 3993 + break; 3994 + } 3995 + 3996 + if (ht_mode & IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT) 3997 + prot->prot_mode |= cpu_to_le32(PROT_NONGF_STA); 3998 + 3999 + if (use_cts_prot) 4000 + prot->prot_mode |= cpu_to_le32(PROT_LEGACY_ERP); 4001 + 4002 + /* reuse current RTS setting */ 4003 + rts_threshold = phy->mt76->hw->wiphy->rts_threshold; 4004 + if (rts_threshold == (u32)-1) 4005 + prot->rts_len_thres = cpu_to_le32(MT7915_RTS_LEN_THRES); 4006 + else 4007 + prot->rts_len_thres = cpu_to_le32(rts_threshold); 4008 + 4009 + prot->rts_pkt_thres = 0x2; 4010 + 4011 + prot->he_rts_thres = cpu_to_le16(vif->bss_conf.frame_time_rts_th); 4012 + if (!prot->he_rts_thres) 4013 + prot->he_rts_thres = cpu_to_le16(DEFAULT_HE_DURATION_RTS_THRES); 4014 + 4015 + return mt76_mcu_skb_send_msg(&dev->mt76, skb, 4016 + MCU_EXT_CMD(BSS_INFO_UPDATE), true); 3957 4017 } 3958 4018 3959 4019 int mt7915_mcu_update_bss_color(struct mt7915_dev *dev, struct ieee80211_vif *vif,
+11
drivers/net/wireless/mediatek/mt76/mt7915/mcu.h
··· 399 399 __le16 prob_rsp_len; 400 400 } __packed __aligned(4); 401 401 402 + struct bss_info_prot { 403 + __le16 tag; 404 + __le16 len; 405 + __le32 prot_type; 406 + __le32 prot_mode; 407 + __le32 rts_len_thres; 408 + __le16 he_rts_thres; 409 + u8 rts_pkt_thres; 410 + u8 rsv[5]; 411 + } __packed; 412 + 402 413 enum { 403 414 BSS_INFO_BCN_CSA, 404 415 BSS_INFO_BCN_BCC,
+4
drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h
··· 84 84 #define MT7915_CRIT_TEMP 110 85 85 #define MT7915_MAX_TEMP 120 86 86 87 + #define MT7915_RTS_LEN_THRES 0x92b 88 + 87 89 struct mt7915_vif; 88 90 struct mt7915_sta; 89 91 struct mt7915_dfs_pulse; ··· 475 473 u32 changed); 476 474 int mt7915_mcu_add_beacon(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 477 475 int enable, u32 changed); 476 + int mt7915_mcu_set_protection(struct mt7915_phy *phy, struct ieee80211_vif *vif, 477 + u8 ht_mode, bool use_cts_prot); 478 478 int mt7915_mcu_add_obss_spr(struct mt7915_phy *phy, struct ieee80211_vif *vif, 479 479 struct ieee80211_he_obss_pd *he_obss_pd); 480 480 int mt7915_mcu_add_rate_ctrl(struct mt7915_dev *dev, struct ieee80211_vif *vif,
+3 -1
drivers/net/wireless/mediatek/mt76/mt7921/init.c
··· 302 302 dev->pm.idle_timeout = MT792x_PM_TIMEOUT; 303 303 dev->pm.stats.last_wake_event = jiffies; 304 304 dev->pm.stats.last_doze_event = jiffies; 305 - if (!mt76_is_usb(&dev->mt76)) { 305 + 306 + if (!mt76_is_usb(&dev->mt76) && 307 + !is_mt7902(&dev->mt76)) { 306 308 dev->pm.enable_user = true; 307 309 dev->pm.enable = true; 308 310 dev->pm.ds_enable_user = true;
+20 -9
drivers/net/wireless/mediatek/mt76/mt7921/main.c
··· 371 371 { 372 372 struct mt792x_phy *phy = &dev->phy; 373 373 374 + if (!test_and_clear_bit(MT76_STATE_ROC, &phy->mt76->state)) 375 + return; 376 + 374 377 timer_delete_sync(&phy->roc_timer); 375 - cancel_work_sync(&phy->roc_work); 376 - if (test_and_clear_bit(MT76_STATE_ROC, &phy->mt76->state)) 377 - ieee80211_iterate_interfaces(mt76_hw(dev), 378 - IEEE80211_IFACE_ITER_RESUME_ALL, 379 - mt7921_roc_iter, (void *)phy); 378 + cancel_work(&phy->roc_work); 379 + 380 + ieee80211_iterate_interfaces(mt76_hw(dev), 381 + IEEE80211_IFACE_ITER_RESUME_ALL, 382 + mt7921_roc_iter, (void *)phy); 380 383 } 381 384 EXPORT_SYMBOL_GPL(mt7921_roc_abort_sync); 382 385 ··· 390 387 phy = (struct mt792x_phy *)container_of(work, struct mt792x_phy, 391 388 roc_work); 392 389 393 - if (!test_and_clear_bit(MT76_STATE_ROC, &phy->mt76->state)) 394 - return; 395 - 396 390 mt792x_mutex_acquire(phy->dev); 391 + if (!test_and_clear_bit(MT76_STATE_ROC, &phy->mt76->state)) { 392 + mt792x_mutex_release(phy->dev); 393 + return; 394 + } 397 395 ieee80211_iterate_active_interfaces(phy->mt76->hw, 398 396 IEEE80211_IFACE_ITER_RESUME_ALL, 399 397 mt7921_roc_iter, phy); ··· 800 796 } 801 797 802 798 out: 803 - mt7921_mcu_set_clc(dev, dev->mt76.alpha2, dev->country_ie_env); 799 + if (vif->bss_conf.chanreq.oper.chan->band == NL80211_BAND_6GHZ) 800 + mt7921_regd_update(dev); 804 801 } 805 802 806 803 int mt7921_mac_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif, ··· 811 806 struct mt792x_sta *msta = (struct mt792x_sta *)sta->drv_priv; 812 807 struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; 813 808 int ret, idx; 809 + 810 + if (sta->aid > MT7921_MAX_AID) 811 + return -ENOENT; 814 812 815 813 idx = mt76_wcid_alloc(dev->mt76.wcid_mask, MT792x_WTBL_STA - 1); 816 814 if (idx < 0) ··· 857 849 struct mt792x_dev *dev = container_of(mdev, struct mt792x_dev, mt76); 858 850 struct mt792x_sta *msta = (struct mt792x_sta *)sta->drv_priv; 859 851 struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; 852 + 853 + if (sta->aid > MT7921_MAX_AID) 854 + return -ENOENT; 860 855 861 856 if (ev != MT76_STA_EVENT_ASSOC) 862 857 return 0;
+3
drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
··· 1353 1353 u16 len = le16_to_cpu(rule->len); 1354 1354 u16 offset = len + sizeof(*rule); 1355 1355 1356 + if (buf_len < offset) 1357 + break; 1358 + 1356 1359 pos += offset; 1357 1360 buf_len -= offset; 1358 1361 if (rule->alpha2[0] != alpha2[0] ||
+16
drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h
··· 7 7 #include "../mt792x.h" 8 8 #include "regs.h" 9 9 10 + #define MT7921_MAX_AID 20 11 + 10 12 #define MT7921_TX_RING_SIZE 2048 11 13 #define MT7921_TX_MCU_RING_SIZE 256 12 14 #define MT7921_TX_FWDL_RING_SIZE 128 ··· 16 14 #define MT7921_RX_RING_SIZE 1536 17 15 #define MT7921_RX_MCU_RING_SIZE 8 18 16 #define MT7921_RX_MCU_WA_RING_SIZE 512 17 + 18 + /* MT7902 Rx Ring0 is for both Rx Event and Tx Done Event */ 19 + #define MT7902_RX_MCU_RING_SIZE 512 19 20 20 21 #define MT7921_EEPROM_SIZE 3584 21 22 #define MT7921_TOKEN_SIZE 8192 ··· 120 115 MT7921_RXQ_BAND0 = 0, 121 116 MT7921_RXQ_BAND1, 122 117 MT7921_RXQ_MCU_WM = 0, 118 + }; 119 + 120 + /* MT7902 assigns its MCU-WM TXQ at index 15 */ 121 + enum mt7902_txq_id { 122 + MT7902_TXQ_MCU_WM = 15, 123 + }; 124 + 125 + struct mt7921_dma_layout { 126 + u8 mcu_wm_txq; 127 + u16 mcu_rxdone_ring_size; 128 + bool has_mcu_wa; 123 129 }; 124 130 125 131 enum {
+56 -14
drivers/net/wireless/mediatek/mt76/mt7921/pci.c
··· 26 26 .driver_data = (kernel_ulong_t)MT7922_FIRMWARE_WM }, 27 27 { PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, 0x7920), 28 28 .driver_data = (kernel_ulong_t)MT7920_FIRMWARE_WM }, 29 + { PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, 0x7902), 30 + .driver_data = (kernel_ulong_t)MT7902_FIRMWARE_WM }, 29 31 { }, 30 32 }; 31 33 ··· 169 167 170 168 static int mt7921_dma_init(struct mt792x_dev *dev) 171 169 { 170 + struct mt7921_dma_layout layout = { 171 + /* General case: MT7921 / MT7922 /MT7920 */ 172 + .mcu_wm_txq = MT7921_TXQ_MCU_WM, 173 + .mcu_rxdone_ring_size = MT7921_RX_MCU_RING_SIZE, 174 + .has_mcu_wa = true, 175 + }; 176 + bool is_mt7902; 172 177 int ret; 178 + 179 + is_mt7902 = mt7921_l1_rr(dev, MT_HW_CHIPID) == 0x7902; 180 + 181 + /* 182 + * MT7902 special case: 183 + * - MCU-WM TXQ uses index 15 184 + * - RX Ring0 is larger and shared for event/TX-done 185 + * - MT7902 does not use the MCU_WA ring 186 + */ 187 + if (is_mt7902) { 188 + layout.mcu_wm_txq = MT7902_TXQ_MCU_WM; 189 + layout.mcu_rxdone_ring_size = MT7902_RX_MCU_RING_SIZE; 190 + layout.has_mcu_wa = false; 191 + } 173 192 174 193 mt76_dma_attach(&dev->mt76); 175 194 ··· 208 185 mt76_wr(dev, MT_WFDMA0_TX_RING0_EXT_CTRL, 0x4); 209 186 210 187 /* command to WM */ 211 - ret = mt76_init_mcu_queue(&dev->mt76, MT_MCUQ_WM, MT7921_TXQ_MCU_WM, 188 + ret = mt76_init_mcu_queue(&dev->mt76, MT_MCUQ_WM, layout.mcu_wm_txq, 212 189 MT7921_TX_MCU_RING_SIZE, MT_TX_RING_BASE); 213 190 if (ret) 214 191 return ret; ··· 222 199 /* event from WM before firmware download */ 223 200 ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MCU], 224 201 MT7921_RXQ_MCU_WM, 225 - MT7921_RX_MCU_RING_SIZE, 202 + layout.mcu_rxdone_ring_size, 226 203 MT_RX_BUF_SIZE, MT_RX_EVENT_RING_BASE); 227 204 if (ret) 228 205 return ret; 229 206 230 - /* Change mcu queue after firmware download */ 231 - ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MCU_WA], 232 - MT7921_RXQ_MCU_WM, 233 - MT7921_RX_MCU_WA_RING_SIZE, 234 - MT_RX_BUF_SIZE, MT_WFDMA0(0x540)); 235 - if (ret) 236 - return ret; 207 + if (layout.has_mcu_wa) { 208 + /* Change mcu queue after firmware download */ 209 + ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MCU_WA], 210 + MT7921_RXQ_MCU_WM, 211 + MT7921_RX_MCU_WA_RING_SIZE, 212 + MT_RX_BUF_SIZE, MT_WFDMA0(0x540)); 213 + if (ret) 214 + return ret; 215 + } 237 216 238 217 /* rx data */ 239 218 ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MAIN], ··· 301 276 struct mt76_bus_ops *bus_ops; 302 277 struct mt792x_dev *dev; 303 278 struct mt76_dev *mdev; 279 + void __iomem *regs; 304 280 u16 cmd, chipid; 305 281 u8 features; 306 282 int ret; 307 283 308 284 ret = pcim_enable_device(pdev); 309 - if (ret) 310 - return ret; 311 - 312 - ret = pcim_iomap_regions(pdev, BIT(0), pci_name(pdev)); 313 285 if (ret) 314 286 return ret; 315 287 ··· 343 321 344 322 pci_set_drvdata(pdev, mdev); 345 323 324 + regs = pcim_iomap_region(pdev, 0, pci_name(pdev)); 325 + if (IS_ERR(regs)) 326 + return PTR_ERR(regs); 327 + 346 328 dev = container_of(mdev, struct mt792x_dev, mt76); 347 329 dev->fw_features = features; 348 330 dev->hif_ops = &mt7921_pcie_ops; 349 331 dev->irq_map = &irq_map; 350 - mt76_mmio_init(&dev->mt76, pcim_iomap_table(pdev)[0]); 332 + mt76_mmio_init(&dev->mt76, regs); 333 + 334 + if (id->device == 0x7902) { 335 + struct mt792x_irq_map *map; 336 + 337 + /* MT7902 needs a mutable copy because wm2_complete_mask differs */ 338 + map = devm_kmemdup(&pdev->dev, &irq_map, 339 + sizeof(irq_map), GFP_KERNEL); 340 + if (!map) 341 + return -ENOMEM; 342 + 343 + map->rx.wm2_complete_mask = 0; 344 + dev->irq_map = map; 345 + } 346 + 351 347 tasklet_init(&mdev->irq_tasklet, mt792x_irq_tasklet, (unsigned long)dev); 352 348 353 349 dev->phy.dev = dev; ··· 619 579 MODULE_FIRMWARE(MT7921_ROM_PATCH); 620 580 MODULE_FIRMWARE(MT7922_FIRMWARE_WM); 621 581 MODULE_FIRMWARE(MT7922_ROM_PATCH); 582 + MODULE_FIRMWARE(MT7902_FIRMWARE_WM); 583 + MODULE_FIRMWARE(MT7902_ROM_PATCH); 622 584 MODULE_AUTHOR("Sean Wang <sean.wang@mediatek.com>"); 623 585 MODULE_AUTHOR("Lorenzo Bianconi <lorenzo@kernel.org>"); 624 586 MODULE_DESCRIPTION("MediaTek MT7921E (PCIe) wireless driver");
+3 -3
drivers/net/wireless/mediatek/mt76/mt7921/pci_mac.c
··· 71 71 mt76_txq_schedule_all(&dev->mphy); 72 72 73 73 mt76_worker_disable(&dev->mt76.tx_worker); 74 - napi_disable(&dev->mt76.napi[MT_RXQ_MAIN]); 75 - napi_disable(&dev->mt76.napi[MT_RXQ_MCU]); 76 - napi_disable(&dev->mt76.napi[MT_RXQ_MCU_WA]); 74 + mt76_for_each_q_rx(&dev->mt76, i) { 75 + napi_disable(&dev->mt76.napi[i]); 76 + } 77 77 napi_disable(&dev->mt76.tx_napi); 78 78 79 79 mt76_connac2_tx_token_put(&dev->mt76);
+4
drivers/net/wireless/mediatek/mt76/mt7921/sdio.c
··· 19 19 static const struct sdio_device_id mt7921s_table[] = { 20 20 { SDIO_DEVICE(SDIO_VENDOR_ID_MEDIATEK, 0x7901), 21 21 .driver_data = (kernel_ulong_t)MT7921_FIRMWARE_WM }, 22 + { SDIO_DEVICE(SDIO_VENDOR_ID_MEDIATEK, 0x7902), 23 + .driver_data = (kernel_ulong_t)MT7902_FIRMWARE_WM }, 22 24 { } /* Terminating entry */ 23 25 }; 24 26 ··· 319 317 MODULE_DEVICE_TABLE(sdio, mt7921s_table); 320 318 MODULE_FIRMWARE(MT7921_FIRMWARE_WM); 321 319 MODULE_FIRMWARE(MT7921_ROM_PATCH); 320 + MODULE_FIRMWARE(MT7902_FIRMWARE_WM); 321 + MODULE_FIRMWARE(MT7902_ROM_PATCH); 322 322 323 323 static DEFINE_SIMPLE_DEV_PM_OPS(mt7921s_pm_ops, mt7921s_suspend, mt7921s_resume); 324 324
+2
drivers/net/wireless/mediatek/mt76/mt7925/init.c
··· 91 91 92 92 mt7925_mac_init_basic_rates(dev); 93 93 94 + memzero_explicit(&dev->mt76.alpha2, sizeof(dev->mt76.alpha2)); 95 + 94 96 return 0; 95 97 } 96 98 EXPORT_SYMBOL_GPL(mt7925_mac_init);
+12 -6
drivers/net/wireless/mediatek/mt76/mt7925/mac.c
··· 804 804 txwi[5] = cpu_to_le32(val); 805 805 806 806 val = MT_TXD6_DAS | FIELD_PREP(MT_TXD6_MSDU_CNT, 1); 807 - if (!ieee80211_vif_is_mld(vif) || 808 - (q_idx >= MT_LMAC_ALTX0 && q_idx <= MT_LMAC_BCN0)) 807 + if (vif && (!ieee80211_vif_is_mld(vif) || 808 + (q_idx >= MT_LMAC_ALTX0 && q_idx <= MT_LMAC_BCN0))) 809 809 val |= MT_TXD6_DIS_MAT; 810 810 txwi[6] = cpu_to_le32(val); 811 811 txwi[7] = 0; ··· 846 846 bool is_8023; 847 847 u16 fc, tid; 848 848 849 + if (!sta) 850 + return; 851 + 849 852 link_sta = rcu_dereference(sta->link[wcid->link_id]); 850 853 if (!link_sta) 851 854 return; 852 855 853 - if (!sta || !(link_sta->ht_cap.ht_supported || link_sta->he_cap.has_he)) 856 + if (!(link_sta->ht_cap.ht_supported || link_sta->he_cap.has_he)) 854 857 return; 855 858 856 859 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK; ··· 885 882 else 886 883 mlink = &msta->deflink; 887 884 888 - if (!test_and_set_bit(tid, &mlink->wcid.ampdu_state)) 889 - ieee80211_start_tx_ba_session(sta, tid, 0); 885 + if (!test_and_set_bit(tid, &mlink->wcid.ampdu_state)) { 886 + if (ieee80211_start_tx_ba_session(sta, tid, 0)) 887 + clear_bit(tid, &mlink->wcid.ampdu_state); 888 + } 890 889 } 891 890 892 891 static bool ··· 1285 1280 if (vif->type == NL80211_IFTYPE_AP) { 1286 1281 mt76_connac_mcu_uni_add_bss(dev->phy.mt76, vif, &mvif->sta.deflink.wcid, 1287 1282 true, NULL); 1288 - mt7925_mcu_sta_update(dev, NULL, vif, true, 1283 + mt7925_mcu_sta_update(dev, NULL, vif, 1284 + &mvif->sta.deflink, true, 1289 1285 MT76_STA_INFO_STATE_NONE); 1290 1286 mt7925_mcu_uni_add_beacon_offload(dev, hw, vif, true); 1291 1287 }
+326 -68
drivers/net/wireless/mediatek/mt76/mt7925/main.c
··· 245 245 { 246 246 struct wiphy *wiphy = phy->mt76->hw->wiphy; 247 247 static const u8 ext_capa_sta[] = { 248 + [0] = WLAN_EXT_CAPA1_EXT_CHANNEL_SWITCHING, 248 249 [2] = WLAN_EXT_CAPA3_MULTI_BSSID_SUPPORT, 249 250 [7] = WLAN_EXT_CAPA8_OPMODE_NOTIF, 250 251 }; ··· 439 438 if (phy->chip_cap & MT792x_CHIP_CAP_RSSI_NOTIFY_EVT_EN) 440 439 vif->driver_flags |= IEEE80211_VIF_SUPPORTS_CQM_RSSI; 441 440 441 + INIT_WORK(&mvif->csa_work, mt7925_csa_work); 442 + timer_setup(&mvif->csa_timer, mt792x_csa_timer, 0); 443 + 442 444 out: 443 445 mt792x_mutex_release(dev); 444 446 ··· 461 457 { 462 458 struct mt792x_phy *phy = &dev->phy; 463 459 460 + if (!test_and_clear_bit(MT76_STATE_ROC, &phy->mt76->state)) 461 + return; 462 + 464 463 timer_delete_sync(&phy->roc_timer); 465 - cancel_work_sync(&phy->roc_work); 466 - if (test_and_clear_bit(MT76_STATE_ROC, &phy->mt76->state)) 467 - ieee80211_iterate_interfaces(mt76_hw(dev), 468 - IEEE80211_IFACE_ITER_RESUME_ALL, 469 - mt7925_roc_iter, (void *)phy); 464 + 465 + cancel_work(&phy->roc_work); 466 + 467 + ieee80211_iterate_interfaces(mt76_hw(dev), 468 + IEEE80211_IFACE_ITER_RESUME_ALL, 469 + mt7925_roc_iter, (void *)phy); 470 470 } 471 471 EXPORT_SYMBOL_GPL(mt7925_roc_abort_sync); 472 472 ··· 549 541 550 542 phy->roc_grant = false; 551 543 552 - err = mt7925_mcu_set_mlo_roc(mconf, sel_links, 5, ++phy->roc_token_id); 544 + err = mt7925_mcu_set_mlo_roc(phy, mconf, sel_links, 5, ++phy->roc_token_id); 553 545 if (err < 0) { 554 546 clear_bit(MT76_STATE_ROC, &phy->mt76->state); 555 547 goto out; ··· 594 586 595 587 static int mt7925_set_link_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, 596 588 struct ieee80211_vif *vif, struct ieee80211_sta *sta, 597 - struct ieee80211_key_conf *key, int link_id) 589 + struct ieee80211_key_conf *key, int link_id, 590 + struct mt792x_link_sta *mlink) 598 591 { 599 592 struct mt792x_dev *dev = mt792x_hw_dev(hw); 600 593 struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; ··· 604 595 struct ieee80211_bss_conf *link_conf; 605 596 struct ieee80211_link_sta *link_sta; 606 597 int idx = key->keyidx, err = 0; 607 - struct mt792x_link_sta *mlink; 608 598 struct mt792x_bss_conf *mconf; 609 599 struct mt76_wcid *wcid; 610 600 u8 *wcid_keyidx; ··· 611 603 link_conf = mt792x_vif_to_bss_conf(vif, link_id); 612 604 link_sta = sta ? mt792x_sta_to_link_sta(vif, sta, link_id) : NULL; 613 605 mconf = mt792x_vif_to_link(mvif, link_id); 614 - mlink = mt792x_sta_to_link(msta, link_id); 615 606 wcid = &mlink->wcid; 616 607 wcid_keyidx = &wcid->hw_key_idx; 617 608 ··· 678 671 struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; 679 672 struct mt792x_sta *msta = sta ? (struct mt792x_sta *)sta->drv_priv : 680 673 &mvif->sta; 674 + struct mt792x_link_sta *mlink; 681 675 int err; 682 676 683 677 /* The hardware does not support per-STA RX GTK, fallback ··· 700 692 add = key->link_id != -1 ? BIT(key->link_id) : msta->valid_links; 701 693 702 694 for_each_set_bit(link_id, &add, IEEE80211_MLD_MAX_NUM_LINKS) { 703 - err = mt7925_set_link_key(hw, cmd, vif, sta, key, link_id); 695 + mlink = mt792x_sta_to_link(msta, link_id); 696 + err = mt7925_set_link_key(hw, cmd, vif, sta, key, link_id, 697 + mlink); 704 698 if (err < 0) 705 699 break; 706 700 } 707 701 } else { 708 - err = mt7925_set_link_key(hw, cmd, vif, sta, key, vif->bss_conf.link_id); 702 + mlink = mt792x_sta_to_link(msta, vif->bss_conf.link_id); 703 + err = mt7925_set_link_key(hw, cmd, vif, sta, key, 704 + vif->bss_conf.link_id, mlink); 709 705 } 710 706 711 707 mt792x_mutex_release(dev); ··· 854 842 855 843 static int mt7925_mac_link_sta_add(struct mt76_dev *mdev, 856 844 struct ieee80211_vif *vif, 857 - struct ieee80211_link_sta *link_sta) 845 + struct ieee80211_link_sta *link_sta, 846 + struct mt792x_link_sta *mlink) 858 847 { 859 848 struct mt792x_dev *dev = container_of(mdev, struct mt792x_dev, mt76); 860 849 struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; 861 850 struct ieee80211_bss_conf *link_conf; 862 851 struct mt792x_bss_conf *mconf; 863 852 u8 link_id = link_sta->link_id; 864 - struct mt792x_link_sta *mlink; 853 + bool wcid_published = false; 865 854 struct mt792x_sta *msta; 866 855 struct mt76_wcid *wcid; 856 + bool pm_woken = false; 867 857 int ret, idx; 868 858 869 859 msta = (struct mt792x_sta *)link_sta->sta->drv_priv; 870 - mlink = mt792x_sta_to_link(msta, link_id); 860 + 861 + if (WARN_ON_ONCE(!mlink)) 862 + return -EINVAL; 871 863 872 864 idx = mt76_wcid_alloc(dev->mt76.wcid_mask, MT792x_WTBL_STA - 1); 873 865 if (idx < 0) ··· 890 874 wcid = &mlink->wcid; 891 875 ewma_signal_init(&wcid->rssi); 892 876 rcu_assign_pointer(dev->mt76.wcid[wcid->idx], wcid); 893 - mt76_wcid_init(wcid, 0); 877 + wcid_published = true; 894 878 ewma_avg_signal_init(&mlink->avg_ack_signal); 895 879 memset(mlink->airtime_ac, 0, 896 880 sizeof(msta->deflink.airtime_ac)); 897 881 898 882 ret = mt76_connac_pm_wake(&dev->mphy, &dev->pm); 899 883 if (ret) 900 - return ret; 884 + goto out_wcid; 885 + pm_woken = true; 901 886 902 887 mt7925_mac_wtbl_update(dev, idx, 903 888 MT_WTBL_UPDATE_ADM_COUNT_CLEAR); ··· 907 890 908 891 /* should update bss info before STA add */ 909 892 if (vif->type == NL80211_IFTYPE_STATION && !link_sta->sta->tdls) { 910 - if (ieee80211_vif_is_mld(vif)) 911 - mt7925_mcu_add_bss_info(&dev->phy, mconf->mt76.ctx, 912 - link_conf, link_sta, link_sta != mlink->pri_link); 913 - else 914 - mt7925_mcu_add_bss_info(&dev->phy, mconf->mt76.ctx, 915 - link_conf, link_sta, false); 893 + struct mt792x_link_sta *mlink_bc; 894 + 895 + mlink_bc = mt792x_sta_to_link(&mvif->sta, mconf->link_id); 896 + 897 + if (ieee80211_vif_is_mld(vif)) { 898 + ret = mt7925_mcu_add_bss_info_sta(&dev->phy, mconf->mt76.ctx, 899 + link_conf, link_sta, 900 + mlink_bc->wcid.idx, mlink->wcid.idx, 901 + link_sta != mlink->pri_link); 902 + if (ret) 903 + goto out_pm; 904 + } else { 905 + ret = mt7925_mcu_add_bss_info_sta(&dev->phy, mconf->mt76.ctx, 906 + link_conf, link_sta, 907 + mlink_bc->wcid.idx, mlink->wcid.idx, 908 + false); 909 + if (ret) 910 + goto out_pm; 911 + } 916 912 } 917 913 918 914 if (ieee80211_vif_is_mld(vif) && 919 915 link_sta == mlink->pri_link) { 920 - ret = mt7925_mcu_sta_update(dev, link_sta, vif, true, 916 + ret = mt7925_mcu_sta_update(dev, link_sta, vif, 917 + mlink, true, 921 918 MT76_STA_INFO_STATE_NONE); 922 919 if (ret) 923 - return ret; 920 + goto out_pm; 924 921 } else if (ieee80211_vif_is_mld(vif) && 925 922 link_sta != mlink->pri_link) { 926 - ret = mt7925_mcu_sta_update(dev, mlink->pri_link, vif, 927 - true, MT76_STA_INFO_STATE_ASSOC); 928 - if (ret) 929 - return ret; 923 + struct mt792x_link_sta *pri_mlink; 924 + struct mt76_wcid *pri_wcid; 930 925 931 - ret = mt7925_mcu_sta_update(dev, link_sta, vif, true, 926 + /* alternative lookup via def_wcid */ 927 + pri_wcid = mlink->wcid.def_wcid; 928 + 929 + pri_mlink = pri_wcid ? 930 + container_of(pri_wcid, struct mt792x_link_sta, wcid) : 931 + NULL; 932 + 933 + if (WARN_ON_ONCE(!pri_mlink)) { 934 + ret = -EINVAL; 935 + goto out_pm; 936 + } 937 + 938 + ret = mt7925_mcu_sta_update(dev, mlink->pri_link, vif, 939 + pri_mlink, true, 932 940 MT76_STA_INFO_STATE_ASSOC); 933 941 if (ret) 934 - return ret; 942 + goto out_pm; 943 + 944 + ret = mt7925_mcu_sta_update(dev, link_sta, vif, 945 + mlink, true, 946 + MT76_STA_INFO_STATE_ASSOC); 947 + if (ret) 948 + goto out_pm; 935 949 } else { 936 - ret = mt7925_mcu_sta_update(dev, link_sta, vif, true, 950 + ret = mt7925_mcu_sta_update(dev, link_sta, vif, 951 + mlink, true, 937 952 MT76_STA_INFO_STATE_NONE); 938 953 if (ret) 939 - return ret; 954 + goto out_pm; 940 955 } 941 956 942 957 mt76_connac_power_save_sched(&dev->mphy, &dev->pm); 943 958 944 959 return 0; 960 + 961 + out_pm: 962 + if (pm_woken) 963 + mt76_connac_power_save_sched(&dev->mphy, &dev->pm); 964 + out_wcid: 965 + if (wcid_published) { 966 + u16 idx = wcid->idx; 967 + 968 + rcu_assign_pointer(dev->mt76.wcid[idx], NULL); 969 + mt76_wcid_cleanup(mdev, wcid); 970 + mt76_wcid_mask_clear(mdev->wcid_mask, wcid->idx); 971 + } 972 + return ret; 973 + } 974 + 975 + /* 976 + * Host-only unwind for sta_add_links() failures. 977 + * 978 + * If add_links fail due to MCU/firmware timeouts; calling the full remove 979 + * path would send more firmware commands and may hang again. So only rollback 980 + * host-published state here (msta->link/valid_links, dev->mt76.wcid[idx]) and 981 + * free mlink objects (RCU-safe). Firmware state is left for reset/recovery. 982 + */ 983 + static void 984 + mt7925_mac_sta_unwind_links_host(struct mt792x_dev *dev, 985 + struct ieee80211_sta *sta, 986 + unsigned long links) 987 + { 988 + struct mt792x_sta *msta = (struct mt792x_sta *)sta->drv_priv; 989 + unsigned int link_id; 990 + 991 + for_each_set_bit(link_id, &links, IEEE80211_MLD_MAX_NUM_LINKS) { 992 + struct mt792x_link_sta *mlink; 993 + u16 idx; 994 + 995 + mlink = rcu_replace_pointer(msta->link[link_id], NULL, 996 + lockdep_is_held(&dev->mt76.mutex)); 997 + if (!mlink) 998 + continue; 999 + 1000 + msta->valid_links &= ~BIT(link_id); 1001 + if (msta->deflink_id == link_id) 1002 + msta->deflink_id = IEEE80211_LINK_UNSPECIFIED; 1003 + 1004 + idx = mlink->wcid.idx; 1005 + rcu_assign_pointer(dev->mt76.wcid[idx], NULL); 1006 + mt76_wcid_cleanup(&dev->mt76, &mlink->wcid); 1007 + mt76_wcid_mask_clear(dev->mt76.wcid_mask, idx); 1008 + 1009 + if (mlink != &msta->deflink) 1010 + kfree_rcu(mlink, rcu_head); 1011 + } 945 1012 } 946 1013 947 1014 static int ··· 1033 932 struct ieee80211_sta *sta, unsigned long new_links) 1034 933 { 1035 934 struct mt792x_sta *msta = (struct mt792x_sta *)sta->drv_priv; 935 + unsigned long added_links = 0; 1036 936 unsigned int link_id; 1037 937 int err = 0; 1038 938 1039 939 for_each_set_bit(link_id, &new_links, IEEE80211_MLD_MAX_NUM_LINKS) { 1040 940 struct ieee80211_link_sta *link_sta; 1041 941 struct mt792x_link_sta *mlink; 942 + bool is_deflink = false; 1042 943 1043 944 if (msta->deflink_id == IEEE80211_LINK_UNSPECIFIED) { 1044 945 mlink = &msta->deflink; 1045 - msta->deflink_id = link_id; 946 + is_deflink = true; 1046 947 } else { 1047 - mlink = devm_kzalloc(dev->mt76.dev, sizeof(*mlink), GFP_KERNEL); 948 + mlink = kzalloc(sizeof(*mlink), GFP_KERNEL); 1048 949 if (!mlink) { 1049 950 err = -ENOMEM; 1050 951 break; 1051 952 } 1052 953 } 1053 954 1054 - msta->valid_links |= BIT(link_id); 1055 - rcu_assign_pointer(msta->link[link_id], mlink); 1056 955 mlink->sta = msta; 1057 956 mlink->pri_link = &sta->deflink; 1058 957 mlink->wcid.def_wcid = &msta->deflink.wcid; 1059 958 1060 959 link_sta = mt792x_sta_to_link_sta(vif, sta, link_id); 1061 - mt7925_mac_link_sta_add(&dev->mt76, vif, link_sta); 960 + err = mt7925_mac_link_sta_add(&dev->mt76, vif, link_sta, mlink); 961 + if (err) { 962 + if (!is_deflink) 963 + kfree_rcu(mlink, rcu_head); 964 + break; 965 + } 966 + 967 + if (is_deflink) 968 + msta->deflink_id = link_id; 969 + 970 + rcu_assign_pointer(msta->link[link_id], mlink); 971 + msta->valid_links |= BIT(link_id); 972 + 973 + added_links |= BIT(link_id); 1062 974 } 975 + 976 + if (err && added_links) 977 + mt7925_mac_sta_unwind_links_host(dev, sta, added_links); 1063 978 1064 979 return err; 1065 980 } ··· 1098 981 1099 982 err = mt7925_mac_sta_add_links(dev, vif, sta, sta->valid_links); 1100 983 } else { 1101 - err = mt7925_mac_link_sta_add(mdev, vif, &sta->deflink); 984 + err = mt7925_mac_link_sta_add(mdev, vif, &sta->deflink, 985 + &msta->deflink); 1102 986 } 1103 987 1104 988 return err; ··· 1148 1030 struct mt792x_link_sta *mlink; 1149 1031 struct mt792x_sta *msta; 1150 1032 1033 + mt792x_mutex_acquire(dev); 1034 + 1151 1035 msta = (struct mt792x_sta *)link_sta->sta->drv_priv; 1152 1036 mlink = mt792x_sta_to_link(msta, link_sta->link_id); 1153 - 1154 - mt792x_mutex_acquire(dev); 1155 1037 1156 1038 if (ieee80211_vif_is_mld(vif)) { 1157 1039 link_conf = mt792x_vif_to_bss_conf(vif, msta->deflink_id); ··· 1173 1055 MT_WTBL_UPDATE_ADM_COUNT_CLEAR); 1174 1056 memset(mlink->airtime_ac, 0, sizeof(mlink->airtime_ac)); 1175 1057 1176 - mt7925_mcu_sta_update(dev, link_sta, vif, true, MT76_STA_INFO_STATE_ASSOC); 1058 + mt7925_mcu_sta_update(dev, link_sta, vif, mlink, true, 1059 + MT76_STA_INFO_STATE_ASSOC); 1177 1060 1178 1061 mt792x_mutex_release(dev); 1179 1062 } ··· 1202 1083 1203 1084 static void mt7925_mac_link_sta_remove(struct mt76_dev *mdev, 1204 1085 struct ieee80211_vif *vif, 1205 - struct ieee80211_link_sta *link_sta) 1086 + struct ieee80211_link_sta *link_sta, 1087 + struct mt792x_link_sta *mlink) 1206 1088 { 1207 1089 struct mt792x_dev *dev = container_of(mdev, struct mt792x_dev, mt76); 1090 + struct mt76_wcid *wcid = &mlink->wcid; 1208 1091 struct ieee80211_bss_conf *link_conf; 1209 1092 u8 link_id = link_sta->link_id; 1210 - struct mt792x_link_sta *mlink; 1211 - struct mt792x_sta *msta; 1212 - 1213 - msta = (struct mt792x_sta *)link_sta->sta->drv_priv; 1214 - mlink = mt792x_sta_to_link(msta, link_id); 1093 + u16 idx = wcid->idx; 1215 1094 1216 1095 mt7925_roc_abort_sync(dev); 1217 1096 1218 - mt76_connac_free_pending_tx_skbs(&dev->pm, &mlink->wcid); 1097 + mt76_connac_free_pending_tx_skbs(&dev->pm, wcid); 1219 1098 mt76_connac_pm_wake(&dev->mphy, &dev->pm); 1220 1099 1221 - mt7925_mcu_sta_update(dev, link_sta, vif, false, 1100 + mt7925_mcu_sta_update(dev, link_sta, vif, mlink, false, 1222 1101 MT76_STA_INFO_STATE_NONE); 1223 1102 mt7925_mac_wtbl_update(dev, mlink->wcid.idx, 1224 1103 MT_WTBL_UPDATE_ADM_COUNT_CLEAR); ··· 1240 1123 list_del_init(&mlink->wcid.poll_list); 1241 1124 spin_unlock_bh(&mdev->sta_poll_lock); 1242 1125 1126 + rcu_assign_pointer(dev->mt76.wcid[idx], NULL); 1127 + mt76_wcid_cleanup(mdev, wcid); 1128 + mt76_wcid_mask_clear(mdev->wcid_mask, idx); 1129 + 1243 1130 mt76_connac_power_save_sched(&dev->mphy, &dev->pm); 1244 1131 } 1245 1132 ··· 1253 1132 { 1254 1133 struct mt792x_sta *msta = (struct mt792x_sta *)sta->drv_priv; 1255 1134 struct mt76_dev *mdev = &dev->mt76; 1256 - struct mt76_wcid *wcid; 1257 1135 unsigned int link_id; 1258 1136 1259 1137 /* clean up bss before starec */ ··· 1291 1171 if (!link_sta) 1292 1172 continue; 1293 1173 1294 - mlink = mt792x_sta_to_link(msta, link_id); 1174 + mlink = rcu_replace_pointer(msta->link[link_id], NULL, 1175 + lockdep_is_held(&mdev->mutex)); 1295 1176 if (!mlink) 1296 1177 continue; 1297 1178 1298 - mt7925_mac_link_sta_remove(&dev->mt76, vif, link_sta); 1299 - 1300 - wcid = &mlink->wcid; 1301 - rcu_assign_pointer(msta->link[link_id], NULL); 1302 1179 msta->valid_links &= ~BIT(link_id); 1303 1180 mlink->sta = NULL; 1304 1181 mlink->pri_link = NULL; 1305 1182 1306 - if (link_sta != mlink->pri_link) { 1307 - mt76_wcid_cleanup(mdev, wcid); 1308 - mt76_wcid_mask_clear(mdev->wcid_mask, wcid->idx); 1309 - } 1183 + mt7925_mac_link_sta_remove(&dev->mt76, vif, link_sta, mlink); 1184 + 1185 + if (mlink != &msta->deflink) 1186 + kfree_rcu(mlink, rcu_head); 1310 1187 1311 1188 if (msta->deflink_id == link_id) 1312 1189 msta->deflink_id = IEEE80211_LINK_UNSPECIFIED; ··· 1440 1323 void mt7925_scan_work(struct work_struct *work) 1441 1324 { 1442 1325 struct mt792x_phy *phy; 1326 + struct mt792x_dev *dev; 1327 + struct mt76_connac_pm *pm; 1443 1328 1444 1329 phy = (struct mt792x_phy *)container_of(work, struct mt792x_phy, 1445 1330 scan_work.work); 1331 + 1332 + dev = phy->dev; 1333 + pm = &dev->pm; 1334 + 1335 + if (pm->suspended) 1336 + return; 1446 1337 1447 1338 while (true) { 1448 1339 struct sk_buff *skb; ··· 1669 1544 valid = ieee80211_vif_is_mld(vif) ? mvif->valid_links : BIT(0); 1670 1545 1671 1546 for_each_set_bit(i, &valid, IEEE80211_MLD_MAX_NUM_LINKS) { 1547 + struct mt792x_bss_conf *mconf; 1672 1548 struct mt792x_link_sta *mlink; 1673 1549 1550 + mconf = mt792x_vif_to_link(mvif, i); 1674 1551 mlink = mt792x_sta_to_link(msta, i); 1675 1552 1676 1553 if (enabled) ··· 1683 1556 if (!mlink->wcid.sta) 1684 1557 continue; 1685 1558 1686 - mt7925_mcu_wtbl_update_hdr_trans(dev, vif, sta, i); 1559 + mt7925_mcu_wtbl_update_hdr_trans(dev, vif, mconf, mlink); 1687 1560 } 1688 1561 1689 1562 mt792x_mutex_release(dev); ··· 1843 1716 if (err) 1844 1717 goto out; 1845 1718 1846 - err = mt7925_mcu_sta_update(dev, NULL, vif, true, 1719 + err = mt7925_mcu_sta_update(dev, NULL, vif, 1720 + &mvif->sta.deflink, true, 1847 1721 MT76_STA_INFO_STATE_NONE); 1848 1722 out: 1849 1723 mt792x_mutex_release(dev); ··· 1877 1749 mt7925_add_chanctx(struct ieee80211_hw *hw, 1878 1750 struct ieee80211_chanctx_conf *ctx) 1879 1751 { 1752 + struct mt792x_dev *dev = mt792x_hw_dev(hw); 1753 + 1754 + dev->new_ctx = ctx; 1755 + 1880 1756 return 0; 1881 1757 } 1882 1758 ··· 1888 1756 mt7925_remove_chanctx(struct ieee80211_hw *hw, 1889 1757 struct ieee80211_chanctx_conf *ctx) 1890 1758 { 1759 + struct mt792x_dev *dev = mt792x_hw_dev(hw); 1760 + 1761 + if (dev->new_ctx == ctx) 1762 + dev->new_ctx = NULL; 1763 + 1891 1764 } 1892 1765 1893 1766 static void ··· 1987 1850 mt792x_mutex_acquire(dev); 1988 1851 1989 1852 if (changed & BSS_CHANGED_ASSOC) { 1990 - mt7925_mcu_sta_update(dev, NULL, vif, true, 1853 + mt7925_mcu_sta_update(dev, NULL, vif, 1854 + &mvif->sta.deflink, true, 1991 1855 MT76_STA_INFO_STATE_ASSOC); 1992 1856 mt7925_mcu_set_beacon_filter(dev, vif, vif->cfg.assoc); 1993 1857 ··· 2032 1894 struct mt792x_phy *phy = mt792x_hw_phy(hw); 2033 1895 struct mt792x_dev *dev = mt792x_hw_dev(hw); 2034 1896 struct mt792x_bss_conf *mconf; 2035 - struct ieee80211_bss_conf *link_conf; 2036 1897 2037 1898 mconf = mt792x_vif_to_link(mvif, info->link_id); 2038 - link_conf = mt792x_vif_to_bss_conf(vif, mconf->link_id); 2039 1899 2040 1900 mt792x_mutex_acquire(dev); 2041 1901 ··· 2074 1938 ieee80211_queue_delayed_work(hw, &dev->mlo_pm_work, 5 * HZ); 2075 1939 mvif->mlo_pm_state = MT792x_MLO_CHANGED_PS; 2076 1940 } 2077 - 2078 - if (changed & IEEE80211_CHANCTX_CHANGE_PUNCTURING) 2079 - mt7925_mcu_set_eht_pp(mvif->phy->mt76, &mconf->mt76, 2080 - link_conf, NULL); 2081 1941 2082 1942 if (changed & BSS_CHANGED_CQM) 2083 1943 mt7925_mcu_set_rssimonitor(dev, vif); ··· 2276 2144 mctx->bss_conf = NULL; 2277 2145 mconf->mt76.ctx = NULL; 2278 2146 mutex_unlock(&dev->mt76.mutex); 2147 + 2148 + if (link_conf->csa_active) { 2149 + timer_delete_sync(&mvif->csa_timer); 2150 + cancel_work_sync(&mvif->csa_work); 2151 + } 2279 2152 } 2280 2153 2281 2154 static void mt7925_rfkill_poll(struct ieee80211_hw *hw) ··· 2293 2156 mt792x_mutex_release(phy->dev); 2294 2157 2295 2158 wiphy_rfkill_set_hw_state(hw->wiphy, ret == 0); 2159 + } 2160 + 2161 + static int mt7925_switch_vif_chanctx(struct ieee80211_hw *hw, 2162 + struct ieee80211_vif_chanctx_switch *vifs, 2163 + int n_vifs, 2164 + enum ieee80211_chanctx_switch_mode mode) 2165 + { 2166 + return mt7925_assign_vif_chanctx(hw, vifs->vif, vifs->link_conf, 2167 + vifs->new_ctx); 2168 + } 2169 + 2170 + void mt7925_csa_work(struct work_struct *work) 2171 + { 2172 + struct mt792x_vif *mvif; 2173 + struct mt792x_dev *dev; 2174 + struct ieee80211_vif *vif; 2175 + struct ieee80211_bss_conf *link_conf; 2176 + struct mt792x_bss_conf *mconf; 2177 + u8 link_id, roc_rtype; 2178 + int ret = 0; 2179 + 2180 + mvif = (struct mt792x_vif *)container_of(work, struct mt792x_vif, 2181 + csa_work); 2182 + dev = mvif->phy->dev; 2183 + vif = container_of((void *)mvif, struct ieee80211_vif, drv_priv); 2184 + 2185 + if (ieee80211_vif_is_mld(vif)) 2186 + return; 2187 + 2188 + if (!dev->new_ctx) 2189 + return; 2190 + 2191 + link_id = 0; 2192 + mconf = &mvif->bss_conf; 2193 + link_conf = &vif->bss_conf; 2194 + roc_rtype = MT7925_ROC_REQ_JOIN; 2195 + 2196 + mt792x_mutex_acquire(dev); 2197 + ret = mt7925_set_roc(mvif->phy, mconf, dev->new_ctx->def.chan, 2198 + 4000, roc_rtype); 2199 + mt792x_mutex_release(dev); 2200 + if (!ret) { 2201 + mt792x_mutex_acquire(dev); 2202 + ret = mt7925_mcu_set_chctx(mvif->phy->mt76, &mconf->mt76, link_conf, 2203 + dev->new_ctx); 2204 + mt792x_mutex_release(dev); 2205 + 2206 + mt7925_abort_roc(mvif->phy, mconf); 2207 + } 2208 + 2209 + ieee80211_chswitch_done(vif, !ret, link_id); 2210 + } 2211 + 2212 + static int mt7925_pre_channel_switch(struct ieee80211_hw *hw, 2213 + struct ieee80211_vif *vif, 2214 + struct ieee80211_channel_switch *chsw) 2215 + { 2216 + if (ieee80211_vif_is_mld(vif)) 2217 + return -EOPNOTSUPP; 2218 + 2219 + if (vif->type != NL80211_IFTYPE_STATION || !vif->cfg.assoc) 2220 + return -EOPNOTSUPP; 2221 + 2222 + if (!cfg80211_chandef_usable(hw->wiphy, &chsw->chandef, 2223 + IEEE80211_CHAN_DISABLED)) 2224 + return -EOPNOTSUPP; 2225 + 2226 + return 0; 2227 + } 2228 + 2229 + static void mt7925_channel_switch(struct ieee80211_hw *hw, 2230 + struct ieee80211_vif *vif, 2231 + struct ieee80211_channel_switch *chsw) 2232 + { 2233 + struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; 2234 + u16 beacon_interval; 2235 + 2236 + if (ieee80211_vif_is_mld(vif)) 2237 + return; 2238 + 2239 + beacon_interval = vif->bss_conf.beacon_int; 2240 + 2241 + mvif->csa_timer.expires = TU_TO_EXP_TIME(beacon_interval * chsw->count); 2242 + add_timer(&mvif->csa_timer); 2243 + } 2244 + 2245 + static void mt7925_abort_channel_switch(struct ieee80211_hw *hw, 2246 + struct ieee80211_vif *vif, 2247 + struct ieee80211_bss_conf *link_conf) 2248 + { 2249 + struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; 2250 + 2251 + timer_delete_sync(&mvif->csa_timer); 2252 + cancel_work_sync(&mvif->csa_work); 2253 + } 2254 + 2255 + static void mt7925_channel_switch_rx_beacon(struct ieee80211_hw *hw, 2256 + struct ieee80211_vif *vif, 2257 + struct ieee80211_channel_switch *chsw) 2258 + { 2259 + struct mt792x_dev *dev = mt792x_hw_dev(hw); 2260 + struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; 2261 + u16 beacon_interval; 2262 + 2263 + if (ieee80211_vif_is_mld(vif)) 2264 + return; 2265 + 2266 + beacon_interval = vif->bss_conf.beacon_int; 2267 + 2268 + if (cfg80211_chandef_identical(&chsw->chandef, 2269 + &dev->new_ctx->def) && 2270 + chsw->count) { 2271 + mod_timer(&mvif->csa_timer, 2272 + TU_TO_EXP_TIME(beacon_interval * chsw->count)); 2273 + } 2296 2274 } 2297 2275 2298 2276 const struct ieee80211_ops mt7925_ops = { ··· 2473 2221 .change_vif_links = mt7925_change_vif_links, 2474 2222 .change_sta_links = mt7925_change_sta_links, 2475 2223 .rfkill_poll = mt7925_rfkill_poll, 2224 + 2225 + .switch_vif_chanctx = mt7925_switch_vif_chanctx, 2226 + .pre_channel_switch = mt7925_pre_channel_switch, 2227 + .channel_switch = mt7925_channel_switch, 2228 + .abort_channel_switch = mt7925_abort_channel_switch, 2229 + .channel_switch_rx_beacon = mt7925_channel_switch_rx_beacon, 2476 2230 }; 2477 2231 EXPORT_SYMBOL_GPL(mt7925_ops); 2478 2232
+106 -88
drivers/net/wireless/mediatek/mt76/mt7925/mcu.c
··· 1066 1066 static void 1067 1067 mt7925_mcu_sta_hdr_trans_tlv(struct sk_buff *skb, 1068 1068 struct ieee80211_vif *vif, 1069 - struct ieee80211_link_sta *link_sta) 1069 + struct mt792x_link_sta *mlink) 1070 1070 { 1071 - struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; 1072 1071 struct sta_rec_hdr_trans *hdr_trans; 1073 1072 struct mt76_wcid *wcid; 1074 1073 struct tlv *tlv; ··· 1081 1082 else 1082 1083 hdr_trans->from_ds = true; 1083 1084 1084 - if (link_sta) { 1085 - struct mt792x_sta *msta = (struct mt792x_sta *)link_sta->sta->drv_priv; 1086 - struct mt792x_link_sta *mlink; 1087 - 1088 - mlink = mt792x_sta_to_link(msta, link_sta->link_id); 1089 - wcid = &mlink->wcid; 1090 - } else { 1091 - wcid = &mvif->sta.deflink.wcid; 1092 - } 1093 - 1094 - if (!wcid) 1085 + if (WARN_ON_ONCE(!mlink)) 1095 1086 return; 1087 + 1088 + wcid = &mlink->wcid; 1096 1089 1097 1090 hdr_trans->dis_rx_hdr_tran = !test_bit(MT_WCID_FLAG_HDR_TRANS, &wcid->flags); 1098 1091 if (test_bit(MT_WCID_FLAG_4ADDR, &wcid->flags)) { ··· 1095 1104 1096 1105 int mt7925_mcu_wtbl_update_hdr_trans(struct mt792x_dev *dev, 1097 1106 struct ieee80211_vif *vif, 1098 - struct ieee80211_sta *sta, 1099 - int link_id) 1107 + struct mt792x_bss_conf *mconf, 1108 + struct mt792x_link_sta *mlink) 1100 1109 { 1101 - struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; 1102 - struct ieee80211_link_sta *link_sta = sta ? &sta->deflink : NULL; 1103 - struct mt792x_link_sta *mlink; 1104 - struct mt792x_bss_conf *mconf; 1105 - struct mt792x_sta *msta; 1106 1110 struct sk_buff *skb; 1107 - 1108 - msta = sta ? (struct mt792x_sta *)sta->drv_priv : &mvif->sta; 1109 - 1110 - mlink = mt792x_sta_to_link(msta, link_id); 1111 - link_sta = mt792x_sta_to_link_sta(vif, sta, link_id); 1112 - mconf = mt792x_vif_to_link(mvif, link_id); 1113 1111 1114 1112 skb = __mt76_connac_mcu_alloc_sta_req(&dev->mt76, &mconf->mt76, 1115 1113 &mlink->wcid, ··· 1106 1126 if (IS_ERR(skb)) 1107 1127 return PTR_ERR(skb); 1108 1128 1109 - /* starec hdr trans */ 1110 - mt7925_mcu_sta_hdr_trans_tlv(skb, vif, link_sta); 1129 + mt7925_mcu_sta_hdr_trans_tlv(skb, vif, mlink); 1111 1130 return mt76_mcu_skb_send_msg(&dev->mt76, skb, 1112 1131 MCU_WMWA_UNI_CMD(STA_REC_UPDATE), true); 1113 1132 } ··· 1267 1288 return PTR_ERR(skb); 1268 1289 1269 1290 ret = mt7925_mcu_sta_key_tlv(wcid, sta_key_conf, skb, key, cmd, msta); 1270 - if (ret) 1291 + if (ret) { 1292 + dev_kfree_skb(skb); 1271 1293 return ret; 1294 + } 1272 1295 1273 1296 return mt76_mcu_skb_send_msg(dev, skb, mcu_cmd, true); 1274 1297 } 1275 1298 1276 - int mt7925_mcu_set_mlo_roc(struct mt792x_bss_conf *mconf, u16 sel_links, 1277 - int duration, u8 token_id) 1299 + int mt7925_mcu_set_mlo_roc(struct mt792x_phy *phy, struct mt792x_bss_conf *mconf, 1300 + u16 sel_links, int duration, u8 token_id) 1278 1301 { 1279 1302 struct mt792x_vif *mvif = mconf->vif; 1280 1303 struct ieee80211_vif *vif = container_of((void *)mvif, ··· 1311 1330 .roc[1].len = cpu_to_le16(sizeof(struct roc_acquire_tlv)) 1312 1331 }; 1313 1332 1333 + struct wiphy *wiphy = phy->mt76->hw->wiphy; 1334 + 1314 1335 if (!mconf || hweight16(vif->valid_links) < 2 || 1315 1336 hweight16(sel_links) != 2) 1316 1337 return -EPERM; ··· 1335 1352 is_AG_band |= links[i].chan->band == NL80211_BAND_2GHZ; 1336 1353 } 1337 1354 1338 - if (vif->cfg.eml_cap & IEEE80211_EML_CAP_EMLSR_SUPP) 1355 + if (!(wiphy->iftype_ext_capab[0].mld_capa_and_ops & 1356 + IEEE80211_MLD_CAP_OP_MAX_SIMUL_LINKS)) 1339 1357 type = is_AG_band ? MT7925_ROC_REQ_MLSR_AG : 1340 1358 MT7925_ROC_REQ_MLSR_AA; 1341 1359 else ··· 1705 1721 static void 1706 1722 mt7925_mcu_sta_amsdu_tlv(struct sk_buff *skb, 1707 1723 struct ieee80211_vif *vif, 1708 - struct ieee80211_link_sta *link_sta) 1724 + struct ieee80211_link_sta *link_sta, 1725 + struct mt792x_link_sta *mlink) 1709 1726 { 1710 - struct mt792x_sta *msta = (struct mt792x_sta *)link_sta->sta->drv_priv; 1711 - struct mt792x_link_sta *mlink; 1712 1727 struct sta_rec_amsdu *amsdu; 1713 1728 struct tlv *tlv; 1714 1729 ··· 1723 1740 amsdu->max_amsdu_num = 8; 1724 1741 amsdu->amsdu_en = true; 1725 1742 1726 - mlink = mt792x_sta_to_link(msta, link_sta->link_id); 1727 1743 mlink->wcid.amsdu = true; 1728 1744 1729 1745 switch (link_sta->agg.max_amsdu_len) { ··· 1893 1911 1894 1912 static void 1895 1913 mt7925_mcu_sta_mld_tlv(struct sk_buff *skb, 1896 - struct ieee80211_vif *vif, struct ieee80211_sta *sta) 1914 + struct ieee80211_vif *vif, 1915 + struct ieee80211_sta *sta, 1916 + struct mt792x_bss_conf *mconf, 1917 + struct mt792x_link_sta *mlink) 1897 1918 { 1898 1919 struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; 1899 1920 struct mt792x_sta *msta = (struct mt792x_sta *)sta->drv_priv; 1900 - unsigned long valid = mvif->valid_links; 1901 - struct mt792x_bss_conf *mconf; 1902 - struct mt792x_link_sta *mlink; 1921 + struct mt792x_dev *dev = mvif->phy->dev; 1922 + struct mt792x_bss_conf *mconf_pri; 1903 1923 struct sta_rec_mld *mld; 1904 1924 struct tlv *tlv; 1905 - int i, cnt = 0; 1925 + u8 cnt = 0; 1926 + 1927 + /* Primary link always uses driver's deflink WCID. */ 1928 + mconf_pri = (msta->deflink_id != IEEE80211_LINK_UNSPECIFIED) ? 1929 + mt792x_vif_to_link(mvif, msta->deflink_id) : NULL; 1930 + 1931 + /* If caller is operating on deflink, reuse its mconf as primary. */ 1932 + if (!mconf_pri && mlink == &msta->deflink) 1933 + mconf_pri = mconf; 1934 + 1935 + if (!mconf_pri) { 1936 + dev_warn_ratelimited(dev->mt76.dev, 1937 + "mt7925: MLD_TLV_LINK skip (no primary mconf) sta=%pM\n", 1938 + sta->addr); 1939 + return; 1940 + } 1906 1941 1907 1942 tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_MLD, sizeof(*mld)); 1908 1943 mld = (struct sta_rec_mld *)tlv; 1909 1944 memcpy(mld->mac_addr, sta->addr, ETH_ALEN); 1945 + 1910 1946 mld->primary_id = cpu_to_le16(msta->deflink.wcid.idx); 1911 1947 mld->wlan_id = cpu_to_le16(msta->deflink.wcid.idx); 1912 - mld->link_num = min_t(u8, hweight16(mvif->valid_links), 2); 1913 1948 1914 - for_each_set_bit(i, &valid, IEEE80211_MLD_MAX_NUM_LINKS) { 1915 - if (cnt == mld->link_num) 1916 - break; 1949 + /* Always encode primary link first. */ 1950 + mld->link[cnt].wlan_id = cpu_to_le16(msta->deflink.wcid.idx); 1951 + mld->link[cnt++].bss_idx = mconf_pri->mt76.idx; 1917 1952 1918 - mconf = mt792x_vif_to_link(mvif, i); 1919 - mlink = mt792x_sta_to_link(msta, i); 1953 + /* Optionally encode the currently-updated secondary link. */ 1954 + if (mlink && mlink != &msta->deflink && mconf) { 1955 + mld->secondary_id = cpu_to_le16(mlink->wcid.idx); 1920 1956 mld->link[cnt].wlan_id = cpu_to_le16(mlink->wcid.idx); 1921 1957 mld->link[cnt++].bss_idx = mconf->mt76.idx; 1922 - 1923 - if (mlink != &msta->deflink) 1924 - mld->secondary_id = cpu_to_le16(mlink->wcid.idx); 1925 1958 } 1959 + 1960 + mld->link_num = cnt; 1926 1961 } 1927 1962 1928 1963 static void ··· 1960 1961 struct mt792x_vif *mvif = (struct mt792x_vif *)info->vif->drv_priv; 1961 1962 struct mt76_dev *dev = phy->dev; 1962 1963 struct mt792x_bss_conf *mconf; 1964 + struct mt792x_link_sta *mlink; 1963 1965 struct sk_buff *skb; 1964 1966 int conn_state; 1965 1967 1966 1968 mconf = mt792x_vif_to_link(mvif, info->wcid->link_id); 1969 + mlink = container_of(info->wcid, struct mt792x_link_sta, wcid); 1967 1970 1968 1971 skb = __mt76_connac_mcu_alloc_sta_req(dev, &mconf->mt76, info->wcid, 1969 1972 MT7925_STA_UPDATE_MAX_SIZE); ··· 1983 1982 mt7925_mcu_sta_ht_tlv(skb, info->link_sta); 1984 1983 mt7925_mcu_sta_vht_tlv(skb, info->link_sta); 1985 1984 mt76_connac_mcu_sta_uapsd(skb, info->vif, info->link_sta->sta); 1986 - mt7925_mcu_sta_amsdu_tlv(skb, info->vif, info->link_sta); 1985 + mt7925_mcu_sta_amsdu_tlv(skb, info->vif, info->link_sta, mlink); 1987 1986 mt7925_mcu_sta_he_tlv(skb, info->link_sta); 1988 1987 mt7925_mcu_sta_he_6g_tlv(skb, info->link_sta); 1989 1988 mt7925_mcu_sta_eht_tlv(skb, info->link_sta); ··· 1994 1993 info->state); 1995 1994 1996 1995 if (info->state != MT76_STA_INFO_STATE_NONE) { 1997 - mt7925_mcu_sta_mld_tlv(skb, info->vif, info->link_sta->sta); 1996 + mt7925_mcu_sta_mld_tlv(skb, info->vif, 1997 + info->link_sta->sta, 1998 + mconf, mlink); 1999 + 1998 2000 mt7925_mcu_sta_eht_mld_tlv(skb, info->vif, info->link_sta->sta); 1999 2001 } 2000 2002 } ··· 2007 2003 mt76_connac_mcu_add_tlv(skb, STA_REC_MLD_OFF, 2008 2004 sizeof(struct tlv)); 2009 2005 } else { 2010 - mt7925_mcu_sta_hdr_trans_tlv(skb, info->vif, info->link_sta); 2006 + if (!info->link_sta) 2007 + mlink = &mvif->sta.deflink; 2008 + 2009 + mt7925_mcu_sta_hdr_trans_tlv(skb, info->vif, mlink); 2011 2010 } 2012 2011 2013 2012 return mt76_mcu_skb_send_msg(dev, skb, info->cmd, true); ··· 2018 2011 2019 2012 int mt7925_mcu_sta_update(struct mt792x_dev *dev, 2020 2013 struct ieee80211_link_sta *link_sta, 2021 - struct ieee80211_vif *vif, bool enable, 2014 + struct ieee80211_vif *vif, 2015 + struct mt792x_link_sta *mlink, 2016 + bool enable, 2022 2017 enum mt76_sta_info_state state) 2023 2018 { 2024 2019 struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; ··· 2035 2026 .offload_fw = true, 2036 2027 .rcpi = to_rcpi(rssi), 2037 2028 }; 2038 - struct mt792x_sta *msta; 2039 - struct mt792x_link_sta *mlink; 2040 2029 2041 - if (link_sta) { 2042 - msta = (struct mt792x_sta *)link_sta->sta->drv_priv; 2043 - mlink = mt792x_sta_to_link(msta, link_sta->link_id); 2044 - } 2045 - info.wcid = link_sta ? &mlink->wcid : &mvif->sta.deflink.wcid; 2030 + info.wcid = &mlink->wcid; 2046 2031 info.newly = state != MT76_STA_INFO_STATE_ASSOC; 2047 2032 2048 2033 return mt7925_mcu_sta_cmd(&dev->mphy, &info); ··· 2473 2470 struct ieee80211_bss_conf *link_conf, 2474 2471 struct ieee80211_link_sta *link_sta, 2475 2472 struct ieee80211_chanctx_conf *ctx, 2476 - struct mt76_phy *phy, u16 wlan_idx, 2473 + struct mt76_phy *phy, 2474 + u16 bmc_tx_wlan_idx, 2475 + u16 sta_wlan_idx, 2477 2476 bool enable) 2478 2477 { 2479 2478 struct ieee80211_vif *vif = link_conf->vif; ··· 2484 2479 &link_conf->chanreq.oper; 2485 2480 enum nl80211_band band = chandef->chan->band; 2486 2481 struct mt76_connac_bss_basic_tlv *basic_req; 2487 - struct mt792x_link_sta *mlink; 2488 2482 struct tlv *tlv; 2489 2483 int conn_type; 2490 2484 u8 idx; ··· 2507 2503 basic_req->phymode = mt76_connac_get_phy_mode(phy, vif, band, link_sta); 2508 2504 basic_req->bcn_interval = cpu_to_le16(link_conf->beacon_int); 2509 2505 basic_req->dtim_period = link_conf->dtim_period; 2510 - basic_req->bmc_tx_wlan_idx = cpu_to_le16(wlan_idx); 2506 + basic_req->bmc_tx_wlan_idx = cpu_to_le16(bmc_tx_wlan_idx); 2511 2507 basic_req->link_idx = mconf->mt76.idx; 2512 - 2513 - if (link_sta) { 2514 - struct mt792x_sta *msta; 2515 - 2516 - msta = (struct mt792x_sta *)link_sta->sta->drv_priv; 2517 - mlink = mt792x_sta_to_link(msta, link_sta->link_id); 2518 - 2519 - } else { 2520 - mlink = &mconf->vif->sta.deflink; 2521 - } 2522 - 2523 - basic_req->sta_idx = cpu_to_le16(mlink->wcid.idx); 2508 + basic_req->sta_idx = cpu_to_le16(sta_wlan_idx); 2524 2509 basic_req->omac_idx = mconf->mt76.omac_idx; 2525 2510 basic_req->band_idx = mconf->mt76.band_idx; 2526 2511 basic_req->wmm_idx = mconf->mt76.wmm_idx; ··· 2816 2823 &dev_req, sizeof(dev_req), true); 2817 2824 } 2818 2825 2819 - int mt7925_mcu_add_bss_info(struct mt792x_phy *phy, 2820 - struct ieee80211_chanctx_conf *ctx, 2821 - struct ieee80211_bss_conf *link_conf, 2822 - struct ieee80211_link_sta *link_sta, 2823 - int enable) 2826 + int mt7925_mcu_add_bss_info_sta(struct mt792x_phy *phy, 2827 + struct ieee80211_chanctx_conf *ctx, 2828 + struct ieee80211_bss_conf *link_conf, 2829 + struct ieee80211_link_sta *link_sta, 2830 + u16 bmc_tx_wlan_idx, 2831 + u16 sta_wlan_idx, 2832 + int enable) 2824 2833 { 2825 - struct mt792x_vif *mvif = (struct mt792x_vif *)link_conf->vif->drv_priv; 2826 2834 struct mt792x_bss_conf *mconf = mt792x_link_conf_to_mconf(link_conf); 2827 2835 struct mt792x_dev *dev = phy->dev; 2828 - struct mt792x_link_sta *mlink_bc; 2829 2836 struct sk_buff *skb; 2830 2837 2831 2838 skb = __mt7925_mcu_alloc_bss_req(&dev->mt76, &mconf->mt76, ··· 2833 2840 if (IS_ERR(skb)) 2834 2841 return PTR_ERR(skb); 2835 2842 2836 - mlink_bc = mt792x_sta_to_link(&mvif->sta, mconf->link_id); 2837 - 2838 2843 /* bss_basic must be first */ 2839 2844 mt7925_mcu_bss_basic_tlv(skb, link_conf, link_sta, ctx, phy->mt76, 2840 - mlink_bc->wcid.idx, enable); 2845 + bmc_tx_wlan_idx, sta_wlan_idx, enable); 2841 2846 mt7925_mcu_bss_sec_tlv(skb, link_conf); 2842 2847 mt7925_mcu_bss_bmc_tlv(skb, phy, ctx, link_conf); 2843 2848 mt7925_mcu_bss_qos_tlv(skb, link_conf); ··· 2854 2863 2855 2864 return mt76_mcu_skb_send_msg(&dev->mt76, skb, 2856 2865 MCU_UNI_CMD(BSS_INFO_UPDATE), true); 2866 + } 2867 + 2868 + int mt7925_mcu_add_bss_info(struct mt792x_phy *phy, 2869 + struct ieee80211_chanctx_conf *ctx, 2870 + struct ieee80211_bss_conf *link_conf, 2871 + struct ieee80211_link_sta *link_sta, 2872 + int enable) 2873 + { 2874 + struct mt792x_vif *mvif = (struct mt792x_vif *)link_conf->vif->drv_priv; 2875 + struct mt792x_bss_conf *mconf = mt792x_link_conf_to_mconf(link_conf); 2876 + struct mt792x_link_sta *mlink_bc; 2877 + struct mt792x_link_sta *mlink; 2878 + 2879 + mlink_bc = mt792x_sta_to_link(&mvif->sta, mconf->link_id); 2880 + 2881 + if (link_sta) { 2882 + struct mt792x_sta *msta = (void *)link_sta->sta->drv_priv; 2883 + 2884 + mlink = mt792x_sta_to_link(msta, link_sta->link_id); 2885 + if (WARN_ON(!mlink)) 2886 + return -EINVAL; 2887 + } else { 2888 + mlink = &mconf->vif->sta.deflink; 2889 + } 2890 + 2891 + return mt7925_mcu_add_bss_info_sta(phy, ctx, link_conf, link_sta, 2892 + mlink_bc->wcid.idx, mlink->wcid.idx, enable); 2857 2893 } 2858 2894 2859 2895 int mt7925_mcu_set_dbdc(struct mt76_phy *phy, bool enable) ··· 3393 3375 u8 rsvd[64]; 3394 3376 } __packed req = { 3395 3377 .tag = cpu_to_le16(0x3), 3396 - .len = cpu_to_le16(sizeof(req) - 4), 3397 3378 3398 3379 .idx = idx, 3399 3380 .env = env_cap, ··· 3421 3404 memcpy(req.type, rule->type, 2); 3422 3405 3423 3406 req.size = cpu_to_le16(seg->len); 3407 + req.len = cpu_to_le16(sizeof(req) + seg->len - 4); 3424 3408 dev->phy.clc_chan_conf = clc->ver == 1 ? 0xff : rule->flag; 3425 3409 skb = __mt76_mcu_msg_alloc(&dev->mt76, &req, 3426 3410 le16_to_cpu(req.size) + sizeof(req), ··· 3745 3727 memcpy(tx_power_tlv->alpha2, dev->alpha2, sizeof(dev->alpha2)); 3746 3728 tx_power_tlv->n_chan = num_ch; 3747 3729 tx_power_tlv->tag = cpu_to_le16(0x1); 3748 - tx_power_tlv->len = cpu_to_le16(sizeof(*tx_power_tlv)); 3730 + tx_power_tlv->len = cpu_to_le16(msg_len); 3749 3731 3750 3732 switch (band) { 3751 3733 case NL80211_BAND_2GHZ:
+7
drivers/net/wireless/mediatek/mt76/mt7925/mcu.h
··· 693 693 struct ieee80211_bss_conf *link_conf, 694 694 struct ieee80211_link_sta *link_sta, 695 695 int enable); 696 + int mt7925_mcu_add_bss_info_sta(struct mt792x_phy *phy, 697 + struct ieee80211_chanctx_conf *ctx, 698 + struct ieee80211_bss_conf *link_conf, 699 + struct ieee80211_link_sta *link_sta, 700 + u16 bmc_tx_wlan_idx, 701 + u16 sta_wlan_idx, 702 + int enable); 696 703 int mt7925_mcu_set_timing(struct mt792x_phy *phy, 697 704 struct ieee80211_bss_conf *link_conf); 698 705 int mt7925_mcu_set_deep_sleep(struct mt792x_dev *dev, bool enable);
+8 -5
drivers/net/wireless/mediatek/mt76/mt7925/mt7925.h
··· 250 250 bool enable); 251 251 int mt7925_mcu_sta_update(struct mt792x_dev *dev, 252 252 struct ieee80211_link_sta *link_sta, 253 - struct ieee80211_vif *vif, bool enable, 253 + struct ieee80211_vif *vif, 254 + struct mt792x_link_sta *mlink, 255 + bool enable, 254 256 enum mt76_sta_info_state state); 255 257 int mt7925_mcu_set_chan_info(struct mt792x_phy *phy, u16 tag); 256 258 int mt7925_mcu_set_tx(struct mt792x_dev *dev, struct ieee80211_bss_conf *bss_conf); ··· 300 298 void mt7925_mlo_pm_work(struct work_struct *work); 301 299 void mt7925_scan_work(struct work_struct *work); 302 300 void mt7925_roc_work(struct work_struct *work); 301 + void mt7925_csa_work(struct work_struct *work); 303 302 int mt7925_mcu_uni_bss_ps(struct mt792x_dev *dev, 304 303 struct ieee80211_bss_conf *link_conf); 305 304 void mt7925_coredump_work(struct work_struct *work); ··· 352 349 int mt7925_mcu_regval(struct mt792x_dev *dev, u32 regidx, u32 *val, bool set); 353 350 int mt7925_mcu_set_clc(struct mt792x_dev *dev, u8 *alpha2, 354 351 enum environment_cap env_cap); 355 - int mt7925_mcu_set_mlo_roc(struct mt792x_bss_conf *mconf, u16 sel_links, 356 - int duration, u8 token_id); 352 + int mt7925_mcu_set_mlo_roc(struct mt792x_phy *phy, struct mt792x_bss_conf *mconf, 353 + u16 sel_links, int duration, u8 token_id); 357 354 int mt7925_mcu_set_roc(struct mt792x_phy *phy, struct mt792x_bss_conf *mconf, 358 355 struct ieee80211_channel *chan, int duration, 359 356 enum mt7925_roc_req type, u8 token_id); ··· 370 367 int mt7925_mcu_set_rts_thresh(struct mt792x_phy *phy, u32 val); 371 368 int mt7925_mcu_wtbl_update_hdr_trans(struct mt792x_dev *dev, 372 369 struct ieee80211_vif *vif, 373 - struct ieee80211_sta *sta, 374 - int link_id); 370 + struct mt792x_bss_conf *mconf, 371 + struct mt792x_link_sta *mlink); 375 372 int mt7925_mcu_wf_rf_pin_ctrl(struct mt792x_phy *phy); 376 373 377 374 int mt7925_testmode_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+2 -1
drivers/net/wireless/mediatek/mt76/mt7925/regd.c
··· 232 232 dev->regd_user) 233 233 return -EINVAL; 234 234 235 - if (mdev->alpha2[0] != '0' && mdev->alpha2[1] != '0') 235 + if ((mdev->alpha2[0] && mdev->alpha2[0] != '0') && 236 + (mdev->alpha2[1] && mdev->alpha2[1] != '0')) 236 237 return 0; 237 238 238 239 /* do not need to update the same country twice */
+7
drivers/net/wireless/mediatek/mt76/mt792x.h
··· 41 41 #define MT792x_MCU_INIT_RETRY_COUNT 10 42 42 #define MT792x_WFSYS_INIT_RETRY_COUNT 2 43 43 44 + #define MT7902_FIRMWARE_WM "mediatek/WIFI_RAM_CODE_MT7902_1.bin" 44 45 #define MT7920_FIRMWARE_WM "mediatek/WIFI_RAM_CODE_MT7961_1a.bin" 45 46 #define MT7921_FIRMWARE_WM "mediatek/WIFI_RAM_CODE_MT7961_1.bin" 46 47 #define MT7922_FIRMWARE_WM "mediatek/WIFI_RAM_CODE_MT7922_1.bin" 47 48 #define MT7925_FIRMWARE_WM "mediatek/mt7925/WIFI_RAM_CODE_MT7925_1_1.bin" 48 49 50 + #define MT7902_ROM_PATCH "mediatek/WIFI_MT7902_patch_mcu_1_1_hdr.bin" 49 51 #define MT7920_ROM_PATCH "mediatek/WIFI_MT7961_patch_mcu_1a_2_hdr.bin" 50 52 #define MT7921_ROM_PATCH "mediatek/WIFI_MT7961_patch_mcu_1_2_hdr.bin" 51 53 #define MT7922_ROM_PATCH "mediatek/WIFI_MT7922_patch_mcu_1_1_hdr.bin" ··· 97 95 98 96 struct mt792x_link_sta { 99 97 struct mt76_wcid wcid; /* must be first */ 98 + struct rcu_head rcu_head; 100 99 101 100 u32 airtime_ac[8]; 102 101 ··· 451 448 static inline char *mt792x_ram_name(struct mt792x_dev *dev) 452 449 { 453 450 switch (mt76_chip(&dev->mt76)) { 451 + case 0x7902: 452 + return MT7902_FIRMWARE_WM; 454 453 case 0x7920: 455 454 return MT7920_FIRMWARE_WM; 456 455 case 0x7922: ··· 467 462 static inline char *mt792x_patch_name(struct mt792x_dev *dev) 468 463 { 469 464 switch (mt76_chip(&dev->mt76)) { 465 + case 0x7902: 466 + return MT7902_ROM_PATCH; 470 467 case 0x7920: 471 468 return MT7920_ROM_PATCH; 472 469 case 0x7922:
+10 -4
drivers/net/wireless/mediatek/mt76/mt792x_core.c
··· 151 151 cancel_work_sync(&dev->reset_work); 152 152 mt76_connac_free_pending_tx_skbs(&dev->pm, NULL); 153 153 154 - if (is_mt7921(&dev->mt76)) { 154 + if (is_connac2(&dev->mt76)) { 155 155 mt792x_mutex_acquire(dev); 156 156 mt76_connac_mcu_set_mac_enable(&dev->mt76, 0, false, false); 157 157 mt792x_mutex_release(dev); ··· 691 691 ieee80211_hw_set(hw, SUPPORTS_MULTI_BSSID); 692 692 ieee80211_hw_set(hw, SUPPORTS_ONLY_HE_MULTI_BSSID); 693 693 694 - if (is_mt7921(&dev->mt76)) { 695 - ieee80211_hw_set(hw, CHANCTX_STA_CSA); 696 - } 694 + ieee80211_hw_set(hw, CHANCTX_STA_CSA); 695 + 697 696 698 697 if (dev->pm.enable) 699 698 ieee80211_hw_set(hw, CONNECTION_MONITOR); ··· 925 926 int mt792x_load_firmware(struct mt792x_dev *dev) 926 927 { 927 928 int ret; 929 + 930 + mt76_connac_mcu_restart(&dev->mt76); 931 + 932 + if (!mt76_poll_msec(dev, MT_CONN_ON_MISC, MT_TOP_MISC_FW_STATE, 933 + MT_TOP_MISC2_FW_PWR_ON, 1000)) 934 + dev_warn(dev->mt76.dev, 935 + "MCU is not ready for firmware download\n"); 928 936 929 937 ret = mt76_connac2_load_patch(&dev->mt76, mt792x_patch_name(dev)); 930 938 if (ret)
+17 -1
drivers/net/wireless/mediatek/mt76/mt792x_dma.c
··· 103 103 mt76_wr(dev, MT_WFDMA0_TX_RING3_EXT_CTRL, PREFETCH(0x0400, 0x10)); 104 104 mt76_wr(dev, MT_WFDMA0_TX_RING15_EXT_CTRL, PREFETCH(0x0500, 0x4)); 105 105 mt76_wr(dev, MT_WFDMA0_TX_RING16_EXT_CTRL, PREFETCH(0x0540, 0x4)); 106 + } else if (is_mt7902(&dev->mt76)) { 107 + /* rx ring */ 108 + mt76_wr(dev, MT_WFDMA0_RX_RING0_EXT_CTRL, PREFETCH(0x0000, 0x4)); 109 + mt76_wr(dev, MT_WFDMA0_RX_RING1_EXT_CTRL, PREFETCH(0x0040, 0x4)); 110 + mt76_wr(dev, MT_WFDMA0_RX_RING2_EXT_CTRL, PREFETCH(0x0080, 0x4)); 111 + mt76_wr(dev, MT_WFDMA0_RX_RING3_EXT_CTRL, PREFETCH(0x00c0, 0x4)); 112 + /* tx ring */ 113 + mt76_wr(dev, MT_WFDMA0_TX_RING0_EXT_CTRL, PREFETCH(0x0100, 0x4)); 114 + mt76_wr(dev, MT_WFDMA0_TX_RING1_EXT_CTRL, PREFETCH(0x0140, 0x4)); 115 + mt76_wr(dev, MT_WFDMA0_TX_RING2_EXT_CTRL, PREFETCH(0x0180, 0x4)); 116 + mt76_wr(dev, MT_WFDMA0_TX_RING3_EXT_CTRL, PREFETCH(0x01c0, 0x4)); 117 + mt76_wr(dev, MT_WFDMA0_TX_RING4_EXT_CTRL, PREFETCH(0x0200, 0x4)); 118 + mt76_wr(dev, MT_WFDMA0_TX_RING5_EXT_CTRL, PREFETCH(0x0240, 0x4)); 119 + mt76_wr(dev, MT_WFDMA0_TX_RING6_EXT_CTRL, PREFETCH(0x0280, 0x4)); 120 + mt76_wr(dev, MT_WFDMA0_TX_RING15_EXT_CTRL, PREFETCH(0x02c0, 0x4)); 121 + mt76_wr(dev, MT_WFDMA0_TX_RING16_EXT_CTRL, PREFETCH(0x0300, 0x4)); 106 122 } else { 107 123 /* rx ring */ 108 124 mt76_wr(dev, MT_WFDMA0_RX_RING0_EXT_CTRL, PREFETCH(0x0, 0x4)); ··· 372 356 373 357 int mt792x_wfsys_reset(struct mt792x_dev *dev) 374 358 { 375 - u32 addr = is_mt7921(&dev->mt76) ? 0x18000140 : 0x7c000140; 359 + u32 addr = is_connac2(&dev->mt76) ? 0x18000140 : 0x7c000140; 376 360 377 361 mt76_clear(dev, addr, WFSYS_SW_RST_B); 378 362 msleep(50);
+1 -1
drivers/net/wireless/mediatek/mt76/mt792x_mac.c
··· 375 375 } 376 376 377 377 if (!mt792x_mcu_fw_pmctrl(dev)) { 378 - cancel_delayed_work_sync(&mphy->mac_work); 378 + cancel_delayed_work(&mphy->mac_work); 379 379 return; 380 380 } 381 381 out:
+6
drivers/net/wireless/mediatek/mt76/mt792x_regs.h
··· 25 25 #define MT_PLE_AC_QEMPTY(_n) MT_PLE(0x500 + 0x40 * (_n)) 26 26 #define MT_PLE_AMSDU_PACK_MSDU_CNT(n) MT_PLE(0x10e0 + ((n) << 2)) 27 27 28 + #define MT_PSE_BASE 0x820c8000 29 + 28 30 /* TMAC: band 0(0x21000), band 1(0xa1000) */ 29 31 #define MT_WF_TMAC_BASE(_band) ((_band) ? 0x820f4000 : 0x820e4000) 30 32 #define MT_WF_TMAC(_band, ofs) (MT_WF_TMAC_BASE(_band) + (ofs)) ··· 391 389 #define MT_CBTOP_RGU(ofs) (0x70002000 + (ofs)) 392 390 #define MT_CBTOP_RGU_WF_SUBSYS_RST MT_CBTOP_RGU(0x600) 393 391 #define MT_CBTOP_RGU_WF_SUBSYS_RST_WF_WHOLE_PATH BIT(0) 392 + 393 + #define MT7925_CBTOP_RGU_WF_SUBSYS_RST 0x70028600 394 + #define MT7925_WFSYS_INIT_DONE_ADDR 0x184c1604 395 + #define MT7925_WFSYS_INIT_DONE 0x00001d1e 394 396 395 397 #define MT_HW_BOUND 0x70010020 396 398 #define MT_HW_CHIPID 0x70010200
+43 -8
drivers/net/wireless/mediatek/mt76/mt792x_usb.c
··· 206 206 mt792xu_uhw_wr(&dev->mt76, MT_SSUSB_EPCTL_CSR_EP_RST_OPT, val); 207 207 } 208 208 209 + struct mt792xu_wfsys_desc { 210 + u32 rst_reg; 211 + u32 done_reg; 212 + u32 done_mask; 213 + u32 done_val; 214 + u32 delay_ms; 215 + bool need_status_sel; 216 + }; 217 + 218 + static const struct mt792xu_wfsys_desc mt7921_wfsys_desc = { 219 + .rst_reg = MT_CBTOP_RGU_WF_SUBSYS_RST, 220 + .done_reg = MT_UDMA_CONN_INFRA_STATUS, 221 + .done_mask = MT_UDMA_CONN_WFSYS_INIT_DONE, 222 + .done_val = MT_UDMA_CONN_WFSYS_INIT_DONE, 223 + .delay_ms = 0, 224 + .need_status_sel = true, 225 + }; 226 + 227 + static const struct mt792xu_wfsys_desc mt7925_wfsys_desc = { 228 + .rst_reg = MT7925_CBTOP_RGU_WF_SUBSYS_RST, 229 + .done_reg = MT7925_WFSYS_INIT_DONE_ADDR, 230 + .done_mask = U32_MAX, 231 + .done_val = MT7925_WFSYS_INIT_DONE, 232 + .delay_ms = 20, 233 + .need_status_sel = false, 234 + }; 235 + 209 236 int mt792xu_dma_init(struct mt792x_dev *dev, bool resume) 210 237 { 211 238 int err; ··· 263 236 264 237 int mt792xu_wfsys_reset(struct mt792x_dev *dev) 265 238 { 239 + const struct mt792xu_wfsys_desc *desc = is_mt7925(&dev->mt76) ? 240 + &mt7925_wfsys_desc : 241 + &mt7921_wfsys_desc; 266 242 u32 val; 267 243 int i; 268 244 269 245 mt792xu_epctl_rst_opt(dev, false); 270 246 271 - val = mt792xu_uhw_rr(&dev->mt76, MT_CBTOP_RGU_WF_SUBSYS_RST); 247 + val = mt792xu_uhw_rr(&dev->mt76, desc->rst_reg); 272 248 val |= MT_CBTOP_RGU_WF_SUBSYS_RST_WF_WHOLE_PATH; 273 - mt792xu_uhw_wr(&dev->mt76, MT_CBTOP_RGU_WF_SUBSYS_RST, val); 249 + mt792xu_uhw_wr(&dev->mt76, desc->rst_reg, val); 274 250 275 - usleep_range(10, 20); 251 + if (desc->delay_ms) 252 + msleep(desc->delay_ms); 253 + else 254 + usleep_range(10, 20); 276 255 277 - val = mt792xu_uhw_rr(&dev->mt76, MT_CBTOP_RGU_WF_SUBSYS_RST); 256 + val = mt792xu_uhw_rr(&dev->mt76, desc->rst_reg); 278 257 val &= ~MT_CBTOP_RGU_WF_SUBSYS_RST_WF_WHOLE_PATH; 279 - mt792xu_uhw_wr(&dev->mt76, MT_CBTOP_RGU_WF_SUBSYS_RST, val); 258 + mt792xu_uhw_wr(&dev->mt76, desc->rst_reg, val); 280 259 281 - mt792xu_uhw_wr(&dev->mt76, MT_UDMA_CONN_INFRA_STATUS_SEL, 0); 260 + if (desc->need_status_sel) 261 + mt792xu_uhw_wr(&dev->mt76, MT_UDMA_CONN_INFRA_STATUS_SEL, 0); 262 + 282 263 for (i = 0; i < MT792x_WFSYS_INIT_RETRY_COUNT; i++) { 283 - val = mt792xu_uhw_rr(&dev->mt76, MT_UDMA_CONN_INFRA_STATUS); 284 - if (val & MT_UDMA_CONN_WFSYS_INIT_DONE) 264 + val = mt792xu_uhw_rr(&dev->mt76, desc->done_reg); 265 + if ((val & desc->done_mask) == desc->done_val) 285 266 break; 286 267 287 268 msleep(100);
+30 -6
drivers/net/wireless/mediatek/mt76/mt7996/debugfs.c
··· 226 226 #define RADAR_BACKGROUND 2 227 227 struct mt7996_dev *dev = data; 228 228 struct mt7996_phy *phy = mt7996_band_phy(dev, NL80211_BAND_5GHZ); 229 - int rdd_idx; 229 + struct cfg80211_chan_def *chandef; 230 + int rdd_idx, ret; 230 231 231 232 if (!phy || !val || val > RADAR_BACKGROUND) 232 233 return -EINVAL; 233 234 234 - if (val == RADAR_BACKGROUND && !dev->rdd2_phy) { 235 - dev_err(dev->mt76.dev, "Background radar is not enabled\n"); 236 - return -EINVAL; 235 + if (test_bit(MT76_SCANNING, &phy->mt76->state)) 236 + return -EBUSY; 237 + 238 + if (val == RADAR_BACKGROUND) { 239 + if (!dev->rdd2_phy || !cfg80211_chandef_valid(&dev->rdd2_chandef)) { 240 + dev_err(dev->mt76.dev, "Background radar is not enabled\n"); 241 + return -EINVAL; 242 + } 243 + chandef = &dev->rdd2_chandef; 244 + } else { 245 + chandef = &phy->mt76->chandef; 237 246 } 238 247 239 248 rdd_idx = mt7996_get_rdd_idx(phy, val == RADAR_BACKGROUND); ··· 250 241 dev_err(dev->mt76.dev, "No RDD found\n"); 251 242 return -EINVAL; 252 243 } 244 + 245 + ret = cfg80211_chandef_dfs_required(dev->mt76.hw->wiphy, chandef, 246 + NL80211_IFTYPE_AP); 247 + if (ret <= 0) 248 + return ret; 253 249 254 250 return mt7996_mcu_rdd_cmd(dev, RDD_RADAR_EMULATE, rdd_idx, 0); 255 251 } ··· 640 626 { 641 627 struct mt7996_sta *msta = (struct mt7996_sta *)sta->drv_priv; 642 628 struct mt7996_vif *mvif = msta->vif; 643 - struct mt7996_dev *dev = mvif->deflink.phy->dev; 629 + struct mt7996_phy *phy = mt7996_vif_link_phy(&mvif->deflink); 644 630 struct ieee80211_link_sta *link_sta; 645 631 struct seq_file *s = data; 646 632 struct ieee80211_vif *vif; 633 + struct mt7996_dev *dev; 647 634 unsigned int link_id; 648 635 636 + if (!phy) 637 + return; 638 + 649 639 vif = container_of((void *)mvif, struct ieee80211_vif, drv_priv); 640 + dev = phy->dev; 650 641 651 642 rcu_read_lock(); 652 643 ··· 998 979 #define LONG_PREAMBLE 1 999 980 struct ieee80211_link_sta *link_sta = file->private_data; 1000 981 struct mt7996_sta *msta = (struct mt7996_sta *)link_sta->sta->drv_priv; 1001 - struct mt7996_dev *dev = msta->vif->deflink.phy->dev; 982 + struct mt7996_phy *link_phy = mt7996_vif_link_phy(&msta->vif->deflink); 1002 983 struct mt7996_sta_link *msta_link; 1003 984 struct ra_rate phy = {}; 985 + struct mt7996_dev *dev; 1004 986 char buf[100]; 1005 987 int ret; 1006 988 u16 gi, ltf; 989 + 990 + if (!link_phy) 991 + return -EINVAL; 1007 992 1008 993 if (count >= sizeof(buf)) 1009 994 return -EINVAL; ··· 1031 1008 * spe - off: 0, on: 1 1032 1009 * ltf - 1xltf: 0, 2xltf: 1, 4xltf: 2 1033 1010 */ 1011 + dev = link_phy->dev; 1034 1012 if (sscanf(buf, "%hhu %hhu %hhu %hhu %hu %hhu %hhu %hhu %hhu %hu", 1035 1013 &phy.mode, &phy.bw, &phy.mcs, &phy.nss, &gi, 1036 1014 &phy.preamble, &phy.stbc, &phy.ldpc, &phy.spe, &ltf) != 10) {
+130 -78
drivers/net/wireless/mediatek/mt76/mt7996/dma.c
··· 128 128 129 129 /* data tx queue */ 130 130 if (is_mt7996(&dev->mt76)) { 131 - TXQ_CONFIG(0, WFDMA0, MT_INT_TX_DONE_BAND0, MT7996_TXQ_BAND0); 132 131 if (dev->hif2) { 133 - /* default bn1:ring19 bn2:ring21 */ 134 - TXQ_CONFIG(1, WFDMA0, MT_INT_TX_DONE_BAND1, 135 - MT7996_TXQ_BAND1); 136 - TXQ_CONFIG(2, WFDMA0, MT_INT_TX_DONE_BAND2, 137 - MT7996_TXQ_BAND2); 132 + if (mt76_npu_device_active(&dev->mt76)) { 133 + TXQ_CONFIG(0, WFDMA0, MT_INT_TX_DONE_BAND2, 134 + MT7996_TXQ_BAND2); 135 + TXQ_CONFIG(1, WFDMA0, MT_INT_TX_DONE_BAND0, 136 + MT7996_TXQ_BAND0); 137 + TXQ_CONFIG(2, WFDMA0, MT_INT_TX_DONE_BAND1, 138 + MT7996_TXQ_BAND1); 139 + } else { 140 + /* default bn1:ring19 bn2:ring21 */ 141 + TXQ_CONFIG(0, WFDMA0, MT_INT_TX_DONE_BAND0, 142 + MT7996_TXQ_BAND0); 143 + TXQ_CONFIG(1, WFDMA0, MT_INT_TX_DONE_BAND1, 144 + MT7996_TXQ_BAND1); 145 + TXQ_CONFIG(2, WFDMA0, MT_INT_TX_DONE_BAND2, 146 + MT7996_TXQ_BAND2); 147 + } 138 148 } else { 139 149 /* single pcie bn0/1:ring18 bn2:ring19 */ 150 + TXQ_CONFIG(0, WFDMA0, MT_INT_TX_DONE_BAND0, 151 + MT7996_TXQ_BAND0); 140 152 TXQ_CONFIG(2, WFDMA0, MT_INT_TX_DONE_BAND1, 141 153 MT7996_TXQ_BAND1); 142 154 } ··· 362 350 if (!mt7996_has_wa(dev) || mt76_npu_device_active(&dev->mt76)) 363 351 irq_mask &= ~(MT_INT_RX(MT_RXQ_MAIN_WA) | 364 352 MT_INT_RX(MT_RXQ_BAND1_WA)); 353 + if (is_mt7996(&dev->mt76) && mt76_npu_device_active(&dev->mt76)) 354 + irq_mask &= ~(MT_INT_RX(MT_RXQ_TXFREE_BAND0) | 355 + MT_INT_RX(MT_RXQ_MSDU_PAGE_BAND2)); 365 356 irq_mask = reset ? MT_INT_MCU_CMD : irq_mask; 366 357 367 358 mt7996_irq_enable(dev, irq_mask); ··· 445 430 MT_WFDMA_HOST_CONFIG_BAND1_PCIE1 | 446 431 MT_WFDMA_HOST_CONFIG_BAND2_PCIE1); 447 432 448 - if (is_mt7996(&dev->mt76)) 449 - mt76_set(dev, MT_WFDMA_HOST_CONFIG, 450 - MT_WFDMA_HOST_CONFIG_BAND2_PCIE1); 451 - else 433 + if (is_mt7996(&dev->mt76)) { 434 + if (mt76_npu_device_active(&dev->mt76)) 435 + mt76_set(dev, MT_WFDMA_HOST_CONFIG, 436 + MT_WFDMA_HOST_CONFIG_BAND0_PCIE1); 437 + else 438 + mt76_set(dev, MT_WFDMA_HOST_CONFIG, 439 + MT_WFDMA_HOST_CONFIG_BAND2_PCIE1); 440 + } else { 452 441 mt76_set(dev, MT_WFDMA_HOST_CONFIG, 453 442 MT_WFDMA_HOST_CONFIG_BAND1_PCIE1); 443 + } 454 444 455 445 /* AXI read outstanding number */ 456 446 mt76_rmw(dev, MT_WFDMA_AXI_R2A_CTRL, 457 447 MT_WFDMA_AXI_R2A_CTRL_OUTSTAND_MASK, 0x14); 458 448 459 - if (dev->hif2->speed < PCIE_SPEED_5_0GT || 460 - (dev->hif2->speed == PCIE_SPEED_5_0GT && 461 - dev->hif2->width < PCIE_LNK_X2)) { 462 - mt76_rmw(dev, WF_WFDMA0_GLO_CFG_EXT0 + hif1_ofs, 463 - WF_WFDMA0_GLO_CFG_EXT0_OUTSTAND_MASK, 464 - FIELD_PREP(WF_WFDMA0_GLO_CFG_EXT0_OUTSTAND_MASK, 465 - 0x1)); 466 - mt76_rmw(dev, MT_WFDMA_AXI_R2A_CTRL2, 467 - MT_WFDMA_AXI_R2A_CTRL2_OUTSTAND_MASK, 468 - FIELD_PREP(MT_WFDMA_AXI_R2A_CTRL2_OUTSTAND_MASK, 469 - 0x1)); 470 - } else if (dev->hif2->speed < PCIE_SPEED_8_0GT || 471 - (dev->hif2->speed == PCIE_SPEED_8_0GT && 472 - dev->hif2->width < PCIE_LNK_X2)) { 473 - mt76_rmw(dev, WF_WFDMA0_GLO_CFG_EXT0 + hif1_ofs, 474 - WF_WFDMA0_GLO_CFG_EXT0_OUTSTAND_MASK, 475 - FIELD_PREP(WF_WFDMA0_GLO_CFG_EXT0_OUTSTAND_MASK, 476 - 0x2)); 477 - mt76_rmw(dev, MT_WFDMA_AXI_R2A_CTRL2, 478 - MT_WFDMA_AXI_R2A_CTRL2_OUTSTAND_MASK, 479 - FIELD_PREP(MT_WFDMA_AXI_R2A_CTRL2_OUTSTAND_MASK, 480 - 0x2)); 449 + if (!is_mt7996(&dev->mt76) || 450 + !mt76_npu_device_active(&dev->mt76)) { 451 + if (dev->hif2->speed < PCIE_SPEED_5_0GT || 452 + (dev->hif2->speed == PCIE_SPEED_5_0GT && 453 + dev->hif2->width < PCIE_LNK_X2)) { 454 + mt76_rmw(dev, 455 + WF_WFDMA0_GLO_CFG_EXT0 + hif1_ofs, 456 + WF_WFDMA0_GLO_CFG_EXT0_OUTSTAND_MASK, 457 + FIELD_PREP(WF_WFDMA0_GLO_CFG_EXT0_OUTSTAND_MASK, 458 + 0x1)); 459 + mt76_rmw(dev, MT_WFDMA_AXI_R2A_CTRL2, 460 + MT_WFDMA_AXI_R2A_CTRL2_OUTSTAND_MASK, 461 + FIELD_PREP(MT_WFDMA_AXI_R2A_CTRL2_OUTSTAND_MASK, 462 + 0x1)); 463 + } else if (dev->hif2->speed < PCIE_SPEED_8_0GT || 464 + (dev->hif2->speed == PCIE_SPEED_8_0GT && 465 + dev->hif2->width < PCIE_LNK_X2)) { 466 + mt76_rmw(dev, WF_WFDMA0_GLO_CFG_EXT0 + hif1_ofs, 467 + WF_WFDMA0_GLO_CFG_EXT0_OUTSTAND_MASK, 468 + FIELD_PREP(WF_WFDMA0_GLO_CFG_EXT0_OUTSTAND_MASK, 469 + 0x2)); 470 + mt76_rmw(dev, MT_WFDMA_AXI_R2A_CTRL2, 471 + MT_WFDMA_AXI_R2A_CTRL2_OUTSTAND_MASK, 472 + FIELD_PREP(MT_WFDMA_AXI_R2A_CTRL2_OUTSTAND_MASK, 473 + 0x2)); 474 + } 481 475 } 482 476 483 477 /* WFDMA rx threshold */ ··· 521 497 int mt7996_dma_rro_init(struct mt7996_dev *dev) 522 498 { 523 499 struct mt76_dev *mdev = &dev->mt76; 524 - u32 irq_mask; 500 + u32 size; 525 501 int ret; 526 502 527 503 if (dev->mt76.hwrro_mode == MT76_HWRRO_V3_1) { ··· 548 524 mt76_queue_reset(dev, &mdev->q_rx[MT_RXQ_RRO_RXDMAD_C], 549 525 true); 550 526 } 551 - goto start_hw_rro; 527 + 528 + return 0; 552 529 } 553 530 554 531 /* ind cmd */ ··· 570 545 if (mtk_wed_device_active(&mdev->mmio.wed) && 571 546 mtk_wed_get_rx_capa(&mdev->mmio.wed)) 572 547 mdev->q_rx[MT_RXQ_MSDU_PAGE_BAND0].wed = &mdev->mmio.wed; 548 + 549 + size = is_mt7996(mdev) && mt76_npu_device_active(mdev) 550 + ? MT7996_NPU_RX_RING_SIZE / 4 : MT7996_RX_RING_SIZE; 573 551 ret = mt76_queue_alloc(dev, &mdev->q_rx[MT_RXQ_MSDU_PAGE_BAND0], 574 552 MT_RXQ_ID(MT_RXQ_MSDU_PAGE_BAND0), 575 - MT7996_RX_RING_SIZE, 576 - MT7996_RX_MSDU_PAGE_SIZE, 553 + size, MT7996_RX_MSDU_PAGE_SIZE, 577 554 MT_RXQ_RING_BASE(MT_RXQ_MSDU_PAGE_BAND0)); 578 555 if (ret) 579 556 return ret; ··· 587 560 if (mtk_wed_device_active(&mdev->mmio.wed) && 588 561 mtk_wed_get_rx_capa(&mdev->mmio.wed)) 589 562 mdev->q_rx[MT_RXQ_MSDU_PAGE_BAND1].wed = &mdev->mmio.wed; 563 + 564 + size = is_mt7996(mdev) && mt76_npu_device_active(mdev) 565 + ? MT7996_NPU_RX_RING_SIZE / 2 : MT7996_RX_RING_SIZE; 590 566 ret = mt76_queue_alloc(dev, &mdev->q_rx[MT_RXQ_MSDU_PAGE_BAND1], 591 567 MT_RXQ_ID(MT_RXQ_MSDU_PAGE_BAND1), 592 - MT7996_RX_RING_SIZE, 593 - MT7996_RX_MSDU_PAGE_SIZE, 568 + size, MT7996_RX_MSDU_PAGE_SIZE, 594 569 MT_RXQ_RING_BASE(MT_RXQ_MSDU_PAGE_BAND1)); 595 570 if (ret) 596 571 return ret; ··· 605 576 if (mtk_wed_device_active(&mdev->mmio.wed) && 606 577 mtk_wed_get_rx_capa(&mdev->mmio.wed)) 607 578 mdev->q_rx[MT_RXQ_MSDU_PAGE_BAND2].wed = &mdev->mmio.wed; 579 + 580 + size = is_mt7996(mdev) && mt76_npu_device_active(mdev) 581 + ? MT7996_NPU_RX_RING_SIZE : MT7996_RX_RING_SIZE; 608 582 ret = mt76_queue_alloc(dev, &mdev->q_rx[MT_RXQ_MSDU_PAGE_BAND2], 609 583 MT_RXQ_ID(MT_RXQ_MSDU_PAGE_BAND2), 610 - MT7996_RX_RING_SIZE, 611 - MT7996_RX_MSDU_PAGE_SIZE, 584 + size, MT7996_RX_MSDU_PAGE_SIZE, 612 585 MT_RXQ_RING_BASE(MT_RXQ_MSDU_PAGE_BAND2)); 613 586 if (ret) 614 587 return ret; 615 588 } 616 589 617 - start_hw_rro: 618 - if (mtk_wed_device_active(&mdev->mmio.wed)) { 619 - irq_mask = mdev->mmio.irqmask | 590 + return 0; 591 + } 592 + 593 + void mt7996_dma_rro_start(struct mt7996_dev *dev) 594 + { 595 + u32 irq_mask; 596 + 597 + if (mtk_wed_device_active(&dev->mt76.mmio.wed)) { 598 + irq_mask = dev->mt76.mmio.irqmask | 620 599 MT_INT_TX_DONE_BAND2; 621 600 622 601 mt76_wr(dev, MT_INT_MASK_CSR, irq_mask); 623 - mtk_wed_device_start_hw_rro(&mdev->mmio.wed, irq_mask, false); 602 + mtk_wed_device_start_hw_rro(&dev->mt76.mmio.wed, irq_mask, 603 + false); 624 604 mt7996_irq_enable(dev, irq_mask); 625 - } else { 626 - if (is_mt7996(&dev->mt76)) { 627 - mt76_queue_rx_init(dev, MT_RXQ_MSDU_PAGE_BAND1, 628 - mt76_dma_rx_poll); 629 - mt76_queue_rx_init(dev, MT_RXQ_MSDU_PAGE_BAND2, 630 - mt76_dma_rx_poll); 631 - mt76_queue_rx_init(dev, MT_RXQ_RRO_BAND2, 632 - mt76_dma_rx_poll); 633 - } else { 634 - mt76_queue_rx_init(dev, MT_RXQ_RRO_BAND1, 635 - mt76_dma_rx_poll); 636 - } 637 - 638 - mt76_queue_rx_init(dev, MT_RXQ_RRO_BAND0, mt76_dma_rx_poll); 639 - if (dev->mt76.hwrro_mode == MT76_HWRRO_V3_1) { 640 - mt76_queue_rx_init(dev, MT_RXQ_RRO_RXDMAD_C, 641 - mt76_dma_rx_poll); 642 - } else { 643 - mt76_queue_rx_init(dev, MT_RXQ_RRO_IND, 644 - mt76_dma_rx_poll); 645 - mt76_queue_rx_init(dev, MT_RXQ_MSDU_PAGE_BAND0, 646 - mt76_dma_rx_poll); 647 - } 648 - 649 - if (!mt76_npu_device_active(&dev->mt76)) 650 - mt7996_irq_enable(dev, MT_INT_RRO_RX_DONE); 605 + return; 651 606 } 652 607 653 - return 0; 608 + if (is_mt7996(&dev->mt76)) { 609 + mt76_queue_rx_init(dev, MT_RXQ_MSDU_PAGE_BAND1, 610 + mt76_dma_rx_poll); 611 + mt76_queue_rx_init(dev, MT_RXQ_MSDU_PAGE_BAND2, 612 + mt76_dma_rx_poll); 613 + mt76_queue_rx_init(dev, MT_RXQ_RRO_BAND2, 614 + mt76_dma_rx_poll); 615 + } else { 616 + mt76_queue_rx_init(dev, MT_RXQ_RRO_BAND1, 617 + mt76_dma_rx_poll); 618 + } 619 + 620 + mt76_queue_rx_init(dev, MT_RXQ_RRO_BAND0, mt76_dma_rx_poll); 621 + if (dev->mt76.hwrro_mode == MT76_HWRRO_V3_1) { 622 + mt76_queue_rx_init(dev, MT_RXQ_RRO_RXDMAD_C, 623 + mt76_dma_rx_poll); 624 + } else { 625 + mt76_queue_rx_init(dev, MT_RXQ_RRO_IND, 626 + mt76_dma_rx_poll); 627 + mt76_queue_rx_init(dev, MT_RXQ_MSDU_PAGE_BAND0, 628 + mt76_dma_rx_poll); 629 + } 630 + 631 + if (!mt76_npu_device_active(&dev->mt76)) 632 + mt7996_irq_enable(dev, MT_INT_RRO_RX_DONE); 654 633 } 655 634 656 635 int mt7996_dma_init(struct mt7996_dev *dev) ··· 679 642 mt7996_dma_disable(dev, true); 680 643 681 644 /* init tx queue */ 682 - ret = mt7996_init_tx_queues(&dev->phy, 683 - MT_TXQ_ID(dev->mphy.band_idx), 684 - MT7996_TX_RING_SIZE, 685 - MT_TXQ_RING_BASE(0), 686 - wed); 645 + if (is_mt7996(&dev->mt76) && mt76_npu_device_active(&dev->mt76)) 646 + ret = mt7996_init_tx_queues(&dev->phy, MT_TXQ_ID(0), 647 + MT7996_NPU_TX_RING_SIZE, 648 + MT_TXQ_RING_BASE(0) + hif1_ofs, 649 + NULL); 650 + else 651 + ret = mt7996_init_tx_queues(&dev->phy, 652 + MT_TXQ_ID(dev->mphy.band_idx), 653 + MT7996_TX_RING_SIZE, 654 + MT_TXQ_RING_BASE(0), wed); 687 655 if (ret) 688 656 return ret; 689 657 ··· 756 714 (is_mt7992(&dev->mt76)))) { 757 715 dev->mt76.q_rx[MT_RXQ_MAIN_WA].flags = MT_WED_Q_TXFREE; 758 716 dev->mt76.q_rx[MT_RXQ_MAIN_WA].wed = wed; 717 + } else if (is_mt7992(&dev->mt76) && 718 + mt76_npu_device_active(&dev->mt76)) { 719 + dev->mt76.q_rx[MT_RXQ_MAIN_WA].flags = MT_NPU_Q_TXFREE(0); 759 720 } 760 721 761 722 if (mt7996_has_wa(dev)) { ··· 891 846 /* tx free notify event from WA for band0 */ 892 847 dev->mt76.q_rx[MT_RXQ_TXFREE_BAND0].flags = MT_WED_Q_TXFREE; 893 848 dev->mt76.q_rx[MT_RXQ_TXFREE_BAND0].wed = wed; 849 + } else if (mt76_npu_device_active(&dev->mt76)) { 850 + dev->mt76.q_rx[MT_RXQ_TXFREE_BAND0].flags = MT_NPU_Q_TXFREE(0); 894 851 } 895 852 896 853 ret = mt76_queue_alloc(dev, ··· 906 859 } 907 860 908 861 if (mt7996_band_valid(dev, MT_BAND2)) { 862 + u32 size; 863 + 909 864 /* rx rro data queue for band2 */ 910 865 dev->mt76.q_rx[MT_RXQ_RRO_BAND2].flags = 911 866 MT_WED_RRO_Q_DATA(1) | MT_QFLAG_WED_RRO_EN; 912 867 if (mtk_wed_device_active(wed) && 913 868 mtk_wed_get_rx_capa(wed)) 914 869 dev->mt76.q_rx[MT_RXQ_RRO_BAND2].wed = wed; 870 + 871 + size = is_mt7996(&dev->mt76) && 872 + mt76_npu_device_active(&dev->mt76) 873 + ? MT7996_NPU_RX_RING_SIZE : MT7996_RX_RING_SIZE; 915 874 ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_RRO_BAND2], 916 875 MT_RXQ_ID(MT_RXQ_RRO_BAND2), 917 - MT7996_RX_RING_SIZE, 918 - MT7996_RX_BUF_SIZE, 876 + size, MT7996_RX_BUF_SIZE, 919 877 MT_RXQ_RING_BASE(MT_RXQ_RRO_BAND2) + hif1_ofs); 920 878 if (ret) 921 879 return ret;
+37 -27
drivers/net/wireless/mediatek/mt76/mt7996/eeprom.c
··· 33 33 if (dev->var.fem == MT7996_FEM_INT) 34 34 return MT7992_EEPROM_DEFAULT_23_INT; 35 35 return MT7992_EEPROM_DEFAULT_23; 36 + case MT7992_VAR_TYPE_24: 37 + return MT7992_EEPROM_DEFAULT_24; 36 38 case MT7992_VAR_TYPE_44: 37 39 default: 38 40 if (dev->var.fem == MT7996_FEM_INT) ··· 155 153 156 154 dev_warn(dev->mt76.dev, "eeprom load fail, use default bin\n"); 157 155 memcpy(eeprom, fw->data, MT7996_EEPROM_SIZE); 158 - dev->flash_mode = true; 156 + dev->eeprom_mode = EEPROM_MODE_DEFAULT_BIN; 159 157 160 158 out: 161 159 release_firmware(fw); ··· 165 163 166 164 static int mt7996_eeprom_load(struct mt7996_dev *dev) 167 165 { 166 + u32 eeprom_blk_size, block_num; 168 167 bool use_default = false; 169 - int ret; 168 + int ret, i; 170 169 171 170 ret = mt76_eeprom_init(&dev->mt76, MT7996_EEPROM_SIZE); 172 171 if (ret < 0) 173 172 return ret; 174 173 175 174 if (ret && !mt7996_check_eeprom(dev)) { 176 - dev->flash_mode = true; 175 + dev->eeprom_mode = EEPROM_MODE_FLASH; 177 176 goto out; 178 177 } 179 178 180 - if (!dev->flash_mode) { 181 - u32 eeprom_blk_size = MT7996_EEPROM_BLOCK_SIZE; 182 - u32 block_num = DIV_ROUND_UP(MT7996_EEPROM_SIZE, eeprom_blk_size); 179 + memset(dev->mt76.eeprom.data, 0, MT7996_EEPROM_SIZE); 180 + if (mt7996_has_ext_eeprom(dev)) { 181 + /* external eeprom mode */ 182 + dev->eeprom_mode = EEPROM_MODE_EXT; 183 + eeprom_blk_size = MT7996_EXT_EEPROM_BLOCK_SIZE; 184 + } else { 183 185 u8 free_block_num; 184 - int i; 185 186 186 - memset(dev->mt76.eeprom.data, 0, MT7996_EEPROM_SIZE); 187 - ret = mt7996_mcu_get_eeprom_free_block(dev, &free_block_num); 187 + /* efuse mode */ 188 + dev->eeprom_mode = EEPROM_MODE_EFUSE; 189 + eeprom_blk_size = MT7996_EEPROM_BLOCK_SIZE; 190 + ret = mt7996_mcu_get_efuse_free_block(dev, &free_block_num); 188 191 if (ret < 0) 189 192 return ret; 190 193 ··· 198 191 use_default = true; 199 192 goto out; 200 193 } 194 + } 201 195 202 - /* check if eeprom data from fw is valid */ 203 - if (mt7996_mcu_get_eeprom(dev, 0, NULL, 0) || 204 - mt7996_check_eeprom(dev)) { 196 + /* check if eeprom data from fw is valid */ 197 + if (mt7996_mcu_get_eeprom(dev, 0, NULL, eeprom_blk_size, 198 + dev->eeprom_mode) || 199 + mt7996_check_eeprom(dev)) { 200 + use_default = true; 201 + goto out; 202 + } 203 + 204 + /* read eeprom data from fw */ 205 + block_num = DIV_ROUND_UP(MT7996_EEPROM_SIZE, eeprom_blk_size); 206 + for (i = 1; i < block_num; i++) { 207 + u32 len = eeprom_blk_size; 208 + 209 + if (i == block_num - 1) 210 + len = MT7996_EEPROM_SIZE % eeprom_blk_size; 211 + ret = mt7996_mcu_get_eeprom(dev, i * eeprom_blk_size, 212 + NULL, len, dev->eeprom_mode); 213 + if (ret && ret != -EINVAL) { 205 214 use_default = true; 206 215 goto out; 207 - } 208 - 209 - /* read eeprom data from fw */ 210 - for (i = 1; i < block_num; i++) { 211 - u32 len = eeprom_blk_size; 212 - 213 - if (i == block_num - 1) 214 - len = MT7996_EEPROM_SIZE % eeprom_blk_size; 215 - ret = mt7996_mcu_get_eeprom(dev, i * eeprom_blk_size, 216 - NULL, len); 217 - if (ret && ret != -EINVAL) { 218 - use_default = true; 219 - goto out; 220 - } 221 216 } 222 217 } 223 218 ··· 394 385 return false; 395 386 break; 396 387 case MT7992_DEVICE_ID: 397 - if (dev->var.type == MT7992_VAR_TYPE_23) 388 + if (dev->var.type == MT7992_VAR_TYPE_23 || 389 + dev->var.type == MT7992_VAR_TYPE_24) 398 390 return false; 399 391 break; 400 392 case MT7990_DEVICE_ID: {
+91 -19
drivers/net/wireless/mediatek/mt76/mt7996/init.c
··· 34 34 BIT(NL80211_CHAN_WIDTH_40) | 35 35 BIT(NL80211_CHAN_WIDTH_80) | 36 36 BIT(NL80211_CHAN_WIDTH_160), 37 + .beacon_int_min_gcd = 100, 38 + }; 39 + 40 + static const struct ieee80211_iface_combination if_comb_global_7992 = { 41 + .limits = &if_limits_global, 42 + .n_limits = 1, 43 + .max_interfaces = 32, 44 + .num_different_channels = MT7996_MAX_RADIOS - 1, 45 + .radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) | 46 + BIT(NL80211_CHAN_WIDTH_20) | 47 + BIT(NL80211_CHAN_WIDTH_40) | 48 + BIT(NL80211_CHAN_WIDTH_80) | 49 + BIT(NL80211_CHAN_WIDTH_160), 50 + .beacon_int_min_gcd = 100, 37 51 }; 38 52 39 53 static const struct ieee80211_iface_limit if_limits[] = { ··· 99 85 .extended_capabilities_mask = if_types_ext_capa_ap, 100 86 .extended_capabilities_len = sizeof(if_types_ext_capa_ap), 101 87 .mld_capa_and_ops = 88 + FIELD_PREP_CONST(IEEE80211_MLD_CAP_OP_FREQ_SEP_TYPE_IND, 1) | 102 89 FIELD_PREP_CONST(IEEE80211_MLD_CAP_OP_MAX_SIMUL_LINKS, 103 90 MT7996_MAX_RADIOS - 1), 104 91 }, ··· 500 485 hw->vif_data_size = sizeof(struct mt7996_vif); 501 486 hw->chanctx_data_size = sizeof(struct mt76_chanctx); 502 487 503 - wiphy->iface_combinations = &if_comb_global; 488 + wiphy->iface_combinations = is_mt7996(&dev->mt76) ? &if_comb_global : 489 + &if_comb_global_7992; 504 490 wiphy->n_iface_combinations = 1; 505 491 506 492 wiphy->radio = dev->radios; ··· 537 521 ieee80211_hw_set(hw, SUPPORTS_RX_DECAP_OFFLOAD); 538 522 ieee80211_hw_set(hw, NO_VIRTUAL_MONITOR); 539 523 ieee80211_hw_set(hw, SUPPORTS_MULTI_BSSID); 524 + ieee80211_hw_set(hw, CHANCTX_STA_CSA); 540 525 541 526 hw->max_tx_fragments = 4; 527 + wiphy->txq_memory_limit = 32 << 20; /* 32 MiB */ 542 528 543 529 /* init led callbacks */ 544 530 if (IS_ENABLED(CONFIG_MT76_LEDS)) { ··· 610 592 void mt7996_mac_init(struct mt7996_dev *dev) 611 593 { 612 594 #define HIF_TXD_V2_1 0x21 613 - int i; 595 + int i, rx_path_type; 614 596 615 597 mt76_clear(dev, MT_MDP_DCR2, MT_MDP_DCR2_RX_TRANS_SHORT); 616 598 ··· 624 606 } 625 607 626 608 /* rro module init */ 627 - if (dev->hif2) 609 + if (dev->hif2) { 610 + if (mt76_npu_device_active(&dev->mt76)) 611 + rx_path_type = is_mt7996(&dev->mt76) ? 6 : 8; 612 + else 613 + rx_path_type = is_mt7996(&dev->mt76) ? 2 : 7; 628 614 mt7996_mcu_set_rro(dev, UNI_RRO_SET_PLATFORM_TYPE, 629 - is_mt7996(&dev->mt76) ? 2 : 7); 630 - else 615 + rx_path_type); 616 + } else { 631 617 mt7996_mcu_set_rro(dev, UNI_RRO_SET_PLATFORM_TYPE, 0); 618 + } 632 619 633 620 if (mt7996_has_hwrro(dev)) { 634 621 u16 timeout; ··· 691 668 return 0; 692 669 693 670 if (dev->hif2 && 694 - ((is_mt7996(&dev->mt76) && band == MT_BAND2) || 695 - (is_mt7992(&dev->mt76) && band == MT_BAND1))) { 671 + ((is_mt7992(&dev->mt76) && band == MT_BAND1) || 672 + (is_mt7996(&dev->mt76) && band == MT_BAND2 && 673 + !mt76_npu_device_active(&dev->mt76)))) { 696 674 hif1_ofs = MT_WFDMA0_PCIE1(0) - MT_WFDMA0(0); 697 675 wed = &dev->mt76.mmio.wed_hif2; 698 676 } ··· 733 709 /* init wiphy according to mphy and phy */ 734 710 mt7996_init_wiphy_band(mphy->hw, phy); 735 711 736 - if (is_mt7996(&dev->mt76) && !dev->hif2 && band == MT_BAND1) { 712 + if (is_mt7996(&dev->mt76) && 713 + ((band == MT_BAND1 && !dev->hif2) || 714 + (band == MT_BAND2 && mt76_npu_device_active(&dev->mt76)))) { 737 715 int i; 738 716 739 717 for (i = 0; i <= MT_TXQ_PSD; i++) 740 - mphy->q_tx[i] = dev->mt76.phys[MT_BAND0]->q_tx[0]; 718 + mphy->q_tx[i] = dev->mt76.phys[band - 1]->q_tx[0]; 741 719 } else { 742 - ret = mt7996_init_tx_queues(mphy->priv, MT_TXQ_ID(band), 743 - MT7996_TX_RING_SIZE, 720 + int size = is_mt7996(&dev->mt76) && 721 + mt76_npu_device_active(&dev->mt76) 722 + ? MT7996_NPU_TX_RING_SIZE / 2 : MT7996_TX_RING_SIZE; 723 + 724 + ret = mt7996_init_tx_queues(mphy->priv, MT_TXQ_ID(band), size, 744 725 MT_TXQ_RING_BASE(band) + hif1_ofs, 745 726 wed); 746 727 if (ret) ··· 785 756 mt7996_mcu_set_eeprom(dev); 786 757 mt7996_mac_init(dev); 787 758 mt7996_txbf_init(dev); 759 + 760 + if (!is_mt7990(&dev->mt76)) 761 + mt7996_mcu_set_dup_wtbl(dev); 788 762 } 789 763 790 764 void mt7996_wfsys_reset(struct mt7996_dev *dev) 791 765 { 792 - mt76_set(dev, MT_WF_SUBSYS_RST, 0x1); 766 + if (!is_mt7990(&dev->mt76)) { 767 + mt76_set(dev, MT_WF_SUBSYS_RST, 0x1); 768 + msleep(20); 769 + 770 + mt76_clear(dev, MT_WF_SUBSYS_RST, 0x1); 771 + msleep(20); 772 + 773 + return; 774 + } 775 + 776 + if (!dev->recovery.hw_full_reset) 777 + return; 778 + 779 + mt76_set(dev, MT_WF_SUBSYS_RST, 780 + MT_WF_SUBSYS_RST_WHOLE_PATH_RST_REVERT | 781 + MT_WF_SUBSYS_RST_BYPASS_WFDMA_SLP_PROT | 782 + MT_WF_SUBSYS_RST_BYPASS_WFDMA2_SLP_PROT); 783 + mt76_rmw(dev, MT_WF_SUBSYS_RST, 784 + MT_WF_SUBSYS_RST_WHOLE_PATH_RST_REVERT_CYCLE, 785 + u32_encode_bits(0x20, MT_WF_SUBSYS_RST_WHOLE_PATH_RST_REVERT_CYCLE)); 786 + mt76_clear(dev, MT_WF_L05_RST, MT_WF_L05_RST_WF_RST_MASK); 787 + mt76_set(dev, MT_WF_SUBSYS_RST, MT_WF_SUBSYS_RST_WHOLE_PATH_RST); 793 788 msleep(20); 794 789 795 - mt76_clear(dev, MT_WF_SUBSYS_RST, 0x1); 796 - msleep(20); 790 + if (mt76_poll(dev, MT_WF_L05_RST, MT_WF_L05_RST_WF_RST_MASK, 0x1a, 1000)) 791 + return; 792 + 793 + dev_err(dev->mt76.dev, "wfsys reset fail\n"); 797 794 } 798 795 799 796 static void mt7996_rro_hw_init_v3(struct mt7996_dev *dev) ··· 897 842 } 898 843 } else { 899 844 /* set emul 3.0 function */ 900 - mt76_wr(dev, MT_RRO_3_0_EMU_CONF, 901 - MT_RRO_3_0_EMU_CONF_EN_MASK); 845 + mt76_set(dev, MT_RRO_3_0_EMU_CONF, MT_RRO_3_0_EMU_CONF_EN_MASK); 902 846 903 847 mt76_wr(dev, MT_RRO_ADDR_ARRAY_BASE0, 904 848 dev->wed_rro.addr_elem[0].phy_addr); ··· 989 935 addr++; 990 936 } 991 937 938 + if (is_mt7996(&dev->mt76) && 939 + mt76_npu_device_active(&dev->mt76)) 940 + mt76_npu_send_txrx_addr(&dev->mt76, 0, i, 941 + dev->wed_rro.addr_elem[i].phy_addr, 942 + 0, 0); 943 + 992 944 #ifdef CONFIG_NET_MEDIATEK_SOC_WED 993 945 if (mtk_wed_device_active(&dev->mt76.mmio.wed) && 994 946 mtk_wed_get_rx_capa(&dev->mt76.mmio.wed)) { ··· 1054 994 addr->data = cpu_to_le32(val); 1055 995 addr++; 1056 996 } 997 + 998 + if (is_mt7996(&dev->mt76) && mt76_npu_device_active(&dev->mt76)) 999 + mt76_npu_send_txrx_addr(&dev->mt76, 1, 0, 1000 + dev->wed_rro.session.phy_addr, 0, 0); 1057 1001 1058 1002 mt7996_rro_hw_init(dev); 1059 1003 ··· 1145 1081 list); 1146 1082 list_del_init(&e->list); 1147 1083 1148 - if (mt76_npu_device_active(&dev->mt76)) 1084 + if (mt76_npu_device_active(&dev->mt76)) { 1085 + if (is_mt7996(&dev->mt76)) 1086 + mt76_npu_send_txrx_addr(&dev->mt76, 3, e->id, 1087 + 0, 0, 0); 1149 1088 goto reset_session; 1089 + } 1150 1090 1151 1091 for (i = 0; i < MT7996_RRO_WINDOW_MAX_LEN; i++) { 1152 1092 void *ptr = dev->wed_rro.session.ptr; ··· 1197 1129 else if (u32_get_bits(val, MT_PAD_GPIO_ADIE_COMB_7992)) 1198 1130 var_type = MT7992_VAR_TYPE_44; 1199 1131 else 1200 - return -EINVAL; 1132 + var_type = MT7992_VAR_TYPE_24; 1201 1133 break; 1202 1134 case MT7990_DEVICE_ID: 1203 1135 var_type = MT7990_VAR_TYPE_23; ··· 1231 1163 if (ret) 1232 1164 return ret; 1233 1165 1234 - ret = mt7996_mcu_get_eeprom(dev, MT7976C_EFUSE_OFFSET, buf, sizeof(buf)); 1166 + ret = mt7996_mcu_get_eeprom(dev, MT7976C_EFUSE_OFFSET, buf, sizeof(buf), 1167 + EEPROM_MODE_EFUSE); 1235 1168 if (ret && ret != -EINVAL) 1236 1169 return ret; 1237 1170 ··· 1765 1696 if (ret) 1766 1697 return ret; 1767 1698 1699 + mt7996_dma_rro_start(dev); 1700 + 1768 1701 ret = mt76_register_device(&dev->mt76, true, mt76_rates, 1769 1702 ARRAY_SIZE(mt76_rates)); 1770 1703 if (ret) ··· 1797 1726 1798 1727 void mt7996_unregister_device(struct mt7996_dev *dev) 1799 1728 { 1729 + cancel_work_sync(&dev->dump_work); 1800 1730 cancel_work_sync(&dev->wed_rro.work); 1801 1731 mt7996_unregister_phy(mt7996_phy3(dev)); 1802 1732 mt7996_unregister_phy(mt7996_phy2(dev));
+50 -111
drivers/net/wireless/mediatek/mt76/mt7996/mac.c
··· 13 13 14 14 #define to_rssi(field, rcpi) ((FIELD_GET(field, rcpi) - 220) / 2) 15 15 16 - static const struct mt7996_dfs_radar_spec etsi_radar_specs = { 17 - .pulse_th = { 110, -10, -80, 40, 5200, 128, 5200 }, 18 - .radar_pattern = { 19 - [5] = { 1, 0, 6, 32, 28, 0, 990, 5010, 17, 1, 1 }, 20 - [6] = { 1, 0, 9, 32, 28, 0, 615, 5010, 27, 1, 1 }, 21 - [7] = { 1, 0, 15, 32, 28, 0, 240, 445, 27, 1, 1 }, 22 - [8] = { 1, 0, 12, 32, 28, 0, 240, 510, 42, 1, 1 }, 23 - [9] = { 1, 1, 0, 0, 0, 0, 2490, 3343, 14, 0, 0, 12, 32, 28, { }, 126 }, 24 - [10] = { 1, 1, 0, 0, 0, 0, 2490, 3343, 14, 0, 0, 15, 32, 24, { }, 126 }, 25 - [11] = { 1, 1, 0, 0, 0, 0, 823, 2510, 14, 0, 0, 18, 32, 28, { }, 54 }, 26 - [12] = { 1, 1, 0, 0, 0, 0, 823, 2510, 14, 0, 0, 27, 32, 24, { }, 54 }, 27 - }, 28 - }; 29 - 30 - static const struct mt7996_dfs_radar_spec fcc_radar_specs = { 31 - .pulse_th = { 110, -10, -80, 40, 5200, 128, 5200 }, 32 - .radar_pattern = { 33 - [0] = { 1, 0, 8, 32, 28, 0, 508, 3076, 13, 1, 1 }, 34 - [1] = { 1, 0, 12, 32, 28, 0, 140, 240, 17, 1, 1 }, 35 - [2] = { 1, 0, 8, 32, 28, 0, 190, 510, 22, 1, 1 }, 36 - [3] = { 1, 0, 6, 32, 28, 0, 190, 510, 32, 1, 1 }, 37 - [4] = { 1, 0, 9, 255, 28, 0, 323, 343, 13, 1, 32 }, 38 - }, 39 - }; 40 - 41 - static const struct mt7996_dfs_radar_spec jp_radar_specs = { 42 - .pulse_th = { 110, -10, -80, 40, 5200, 128, 5200 }, 43 - .radar_pattern = { 44 - [0] = { 1, 0, 8, 32, 28, 0, 508, 3076, 13, 1, 1 }, 45 - [1] = { 1, 0, 12, 32, 28, 0, 140, 240, 17, 1, 1 }, 46 - [2] = { 1, 0, 8, 32, 28, 0, 190, 510, 22, 1, 1 }, 47 - [3] = { 1, 0, 6, 32, 28, 0, 190, 510, 32, 1, 1 }, 48 - [4] = { 1, 0, 9, 255, 28, 0, 323, 343, 13, 1, 32 }, 49 - [13] = { 1, 0, 7, 32, 28, 0, 3836, 3856, 14, 1, 1 }, 50 - [14] = { 1, 0, 6, 32, 28, 0, 615, 5010, 110, 1, 1 }, 51 - [15] = { 1, 1, 0, 0, 0, 0, 15, 5010, 110, 0, 0, 12, 32, 28 }, 52 - }, 53 - }; 54 - 55 16 static struct mt76_wcid *mt7996_rx_get_wcid(struct mt7996_dev *dev, 56 17 u16 idx, u8 band_idx) 57 18 { ··· 488 527 !(csum_status & (BIT(0) | BIT(2) | BIT(3)))) 489 528 skb->ip_summed = CHECKSUM_UNNECESSARY; 490 529 491 - if (rxd1 & MT_RXD3_NORMAL_FCS_ERR) 530 + if (rxd3 & MT_RXD3_NORMAL_FCS_ERR) 492 531 status->flag |= RX_FLAG_FAILED_FCS_CRC; 493 532 494 533 if (rxd1 & MT_RXD1_NORMAL_TKIP_MIC_ERR) ··· 661 700 662 701 hdr = mt76_skb_get_hdr(skb); 663 702 fc = hdr->frame_control; 703 + if (ieee80211_is_beacon(fc)) 704 + mt76_rx_beacon(mphy, skb); 664 705 if (ieee80211_is_data_qos(fc)) { 665 706 u8 *qos = ieee80211_get_qos_ctl(hdr); 666 707 ··· 1102 1139 * req 1103 1140 */ 1104 1141 if (le32_to_cpu(ptr[7]) & MT_TXD7_MAC_TXD) { 1105 - u32 val; 1142 + u32 val, mac_txp_size = sizeof(struct mt76_connac_hw_txp); 1106 1143 1107 1144 ptr = (__le32 *)(txwi + MT_TXD_SIZE); 1108 - memset((void *)ptr, 0, sizeof(struct mt76_connac_fw_txp)); 1145 + memset((void *)ptr, 0, mac_txp_size); 1109 1146 1110 1147 val = FIELD_PREP(MT_TXP0_TOKEN_ID0, id) | 1111 1148 MT_TXP0_TOKEN_ID0_VALID_MASK; ··· 1124 1161 tx_info->buf[1].addr >> 32); 1125 1162 #endif 1126 1163 ptr[3] = cpu_to_le32(val); 1164 + 1165 + tx_info->buf[0].len = MT_TXD_SIZE + mac_txp_size; 1127 1166 } else { 1128 1167 struct mt76_connac_txp_common *txp; 1129 1168 ··· 1235 1270 if (unlikely(fc != (IEEE80211_FTYPE_DATA | IEEE80211_STYPE_QOS_DATA))) 1236 1271 return; 1237 1272 1238 - if (!test_and_set_bit(tid, &wcid->ampdu_state)) 1239 - ieee80211_start_tx_ba_session(link_sta->sta, tid, 0); 1273 + if (!test_and_set_bit(tid, &wcid->ampdu_state) && 1274 + ieee80211_start_tx_ba_session(link_sta->sta, tid, 0)) 1275 + clear_bit(tid, &wcid->ampdu_state); 1240 1276 } 1241 1277 1242 1278 static void ··· 2169 2203 2170 2204 for_each_vif_active_link(vif, link_conf, link_id) { 2171 2205 struct mt7996_vif_link *link; 2206 + struct mt7996_phy *link_phy; 2172 2207 2173 2208 link = mt7996_vif_link(dev, vif, link_id); 2174 - if (!link || link->phy != phy) 2209 + if (!link) 2210 + continue; 2211 + 2212 + link_phy = mt7996_vif_link_phy(link); 2213 + if (link_phy != phy) 2175 2214 continue; 2176 2215 2177 2216 mt7996_mcu_add_beacon(dev->mt76.hw, vif, link_conf, ··· 2219 2248 } 2220 2249 spin_unlock_bh(&dev->mt76.token_lock); 2221 2250 idr_destroy(&dev->mt76.token); 2251 + 2252 + for (id = 0; id < __MT_MAX_BAND; id++) { 2253 + struct mt76_phy *phy = dev->mt76.phys[id]; 2254 + if (phy) 2255 + atomic_set(&phy->mgmt_tx_pending, 0); 2256 + } 2222 2257 } 2223 2258 2224 2259 static int ··· 2372 2395 struct mt7996_dev *dev = data; 2373 2396 int i; 2374 2397 2375 - for (i = 0; i < ARRAY_SIZE(msta->link); i++) { 2376 - struct mt7996_sta_link *msta_link = NULL; 2377 - 2378 - msta_link = rcu_replace_pointer(msta->link[i], msta_link, 2379 - lockdep_is_held(&dev->mt76.mutex)); 2380 - if (!msta_link) 2381 - continue; 2382 - 2383 - mt7996_mac_sta_deinit_link(dev, msta_link); 2384 - 2385 - if (msta->deflink_id == i) { 2386 - msta->deflink_id = IEEE80211_LINK_UNSPECIFIED; 2387 - continue; 2388 - } 2389 - 2390 - kfree_rcu(msta_link, rcu_head); 2391 - } 2398 + for (i = 0; i < ARRAY_SIZE(msta->link); i++) 2399 + mt7996_mac_sta_remove_link(dev, sta, i, true); 2392 2400 } 2393 2401 2394 2402 static void ··· 2506 2544 if (mtk_wed_device_active(&dev->mt76.mmio.wed)) 2507 2545 mtk_wed_device_stop(&dev->mt76.mmio.wed); 2508 2546 2547 + mt7996_npu_hw_stop(dev); 2509 2548 ieee80211_stop_queues(mt76_hw(dev)); 2510 2549 2511 2550 set_bit(MT76_RESET, &dev->mphy.state); ··· 2527 2564 mt76_queue_is_wed_rro(&dev->mt76.q_rx[i])) 2528 2565 continue; 2529 2566 2567 + if (mt76_npu_device_active(&dev->mt76) && 2568 + mt76_queue_is_wed_rro(&dev->mt76.q_rx[i])) 2569 + continue; 2570 + 2571 + if (mt76_queue_is_npu_txfree(&dev->mt76.q_rx[i])) 2572 + continue; 2573 + 2530 2574 napi_disable(&dev->mt76.napi[i]); 2531 2575 } 2532 2576 napi_disable(&dev->mt76.tx_napi); 2533 2577 2534 2578 mutex_lock(&dev->mt76.mutex); 2535 - 2536 - mt7996_npu_hw_stop(dev); 2537 2579 2538 2580 mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_DMA_STOPPED); 2539 2581 ··· 2559 2591 mt7996_dma_start(dev, false, false); 2560 2592 2561 2593 if (!is_mt7996(&dev->mt76) && dev->mt76.hwrro_mode == MT76_HWRRO_V3) 2562 - mt76_wr(dev, MT_RRO_3_0_EMU_CONF, MT_RRO_3_0_EMU_CONF_EN_MASK); 2594 + mt76_set(dev, MT_RRO_3_0_EMU_CONF, MT_RRO_3_0_EMU_CONF_EN_MASK); 2563 2595 2564 2596 if (mtk_wed_device_active(&dev->mt76.mmio.wed)) { 2565 2597 u32 wed_irq_mask = MT_INT_TX_DONE_BAND2 | ··· 2578 2610 MT_INT_TX_RX_DONE_EXT); 2579 2611 } 2580 2612 2613 + __mt7996_npu_hw_init(dev); 2614 + 2581 2615 clear_bit(MT76_MCU_RESET, &dev->mphy.state); 2582 2616 mt7996_for_each_phy(dev, phy) 2583 2617 clear_bit(MT76_RESET, &phy->mt76->state); ··· 2587 2617 mt76_for_each_q_rx(&dev->mt76, i) { 2588 2618 if (mtk_wed_device_active(&dev->mt76.mmio.wed) && 2589 2619 mt76_queue_is_wed_rro(&dev->mt76.q_rx[i])) 2620 + continue; 2621 + 2622 + if (mt76_npu_device_active(&dev->mt76) && 2623 + mt76_queue_is_wed_rro(&dev->mt76.q_rx[i])) 2624 + continue; 2625 + 2626 + if (mt76_queue_is_npu_txfree(&dev->mt76.q_rx[i])) 2590 2627 continue; 2591 2628 2592 2629 napi_enable(&dev->mt76.napi[i]); ··· 2615 2638 mt7996_update_beacons(dev); 2616 2639 2617 2640 mutex_unlock(&dev->mt76.mutex); 2618 - 2619 - mt7996_npu_hw_init(dev); 2620 2641 2621 2642 mt7996_for_each_phy(dev, phy) 2622 2643 ieee80211_queue_delayed_work(hw, &phy->mt76->mac_work, ··· 2710 2735 mt7996_irq_disable(dev, MT_INT_MCU_CMD); 2711 2736 queue_work(dev->mt76.wq, &dev->dump_work); 2712 2737 return; 2738 + } 2739 + 2740 + if (READ_ONCE(dev->recovery.state) & MT_MCU_CMD_STOP_DMA) { 2741 + set_bit(MT76_MCU_RESET, &dev->mphy.state); 2742 + wake_up(&dev->mt76.mcu.wait); 2713 2743 } 2714 2744 2715 2745 queue_work(dev->mt76.wq, &dev->reset_work); ··· 2935 2955 2936 2956 mutex_unlock(&mphy->dev->mutex); 2937 2957 2958 + mt76_beacon_mon_check(mphy); 2938 2959 mt76_tx_status_check(mphy->dev, false); 2939 2960 2940 2961 ieee80211_queue_delayed_work(mphy->hw, &mphy->mac_work, ··· 2955 2974 2956 2975 static int mt7996_dfs_start_rdd(struct mt7996_dev *dev, int rdd_idx) 2957 2976 { 2958 - int err, region; 2977 + int region; 2959 2978 2960 2979 switch (dev->mt76.region) { 2961 2980 case NL80211_DFS_ETSI: ··· 2970 2989 break; 2971 2990 } 2972 2991 2973 - err = mt7996_mcu_rdd_cmd(dev, RDD_START, rdd_idx, region); 2974 - if (err < 0) 2975 - return err; 2976 - 2977 - return mt7996_mcu_rdd_cmd(dev, RDD_DET_MODE, rdd_idx, 1); 2992 + return mt7996_mcu_rdd_cmd(dev, RDD_START, rdd_idx, region); 2978 2993 } 2979 2994 2980 2995 static int mt7996_dfs_start_radar_detector(struct mt7996_phy *phy) ··· 2992 3015 return err; 2993 3016 } 2994 3017 2995 - static int 2996 - mt7996_dfs_init_radar_specs(struct mt7996_phy *phy) 2997 - { 2998 - const struct mt7996_dfs_radar_spec *radar_specs; 2999 - struct mt7996_dev *dev = phy->dev; 3000 - int err, i; 3001 - 3002 - switch (dev->mt76.region) { 3003 - case NL80211_DFS_FCC: 3004 - radar_specs = &fcc_radar_specs; 3005 - err = mt7996_mcu_set_fcc5_lpn(dev, 8); 3006 - if (err < 0) 3007 - return err; 3008 - break; 3009 - case NL80211_DFS_ETSI: 3010 - radar_specs = &etsi_radar_specs; 3011 - break; 3012 - case NL80211_DFS_JP: 3013 - radar_specs = &jp_radar_specs; 3014 - break; 3015 - default: 3016 - return -EINVAL; 3017 - } 3018 - 3019 - for (i = 0; i < ARRAY_SIZE(radar_specs->radar_pattern); i++) { 3020 - err = mt7996_mcu_set_radar_th(dev, i, 3021 - &radar_specs->radar_pattern[i]); 3022 - if (err < 0) 3023 - return err; 3024 - } 3025 - 3026 - return mt7996_mcu_set_pulse_th(dev, &radar_specs->pulse_th); 3027 - } 3028 - 3029 3018 int mt7996_dfs_init_radar_detector(struct mt7996_phy *phy) 3030 3019 { 3031 3020 struct mt7996_dev *dev = phy->dev; ··· 3011 3068 goto stop; 3012 3069 3013 3070 if (prev_state <= MT_DFS_STATE_DISABLED) { 3014 - err = mt7996_dfs_init_radar_specs(phy); 3015 - if (err < 0) 3016 - return err; 3017 - 3018 3071 err = mt7996_dfs_start_radar_detector(phy); 3019 3072 if (err < 0) 3020 3073 return err;
-5
drivers/net/wireless/mediatek/mt76/mt7996/mac.h
··· 37 37 u32 min_stgpr_diff; 38 38 } __packed; 39 39 40 - struct mt7996_dfs_radar_spec { 41 - struct mt7996_dfs_pulse pulse_th; 42 - struct mt7996_dfs_pattern radar_pattern[16]; 43 - }; 44 - 45 40 #endif
+334 -105
drivers/net/wireless/mediatek/mt76/mt7996/main.c
··· 56 56 57 57 mutex_lock(&dev->mt76.mutex); 58 58 ret = mt7996_mcu_set_hdr_trans(dev, true); 59 - if (!ret && is_mt7992(&dev->mt76)) { 59 + if (!ret && !is_mt7996(&dev->mt76)) { 60 60 u8 queue = mt76_connac_lmac_mapping(IEEE80211_AC_VI); 61 61 62 62 ret = mt7996_mcu_cp_support(dev, queue); ··· 79 79 80 80 mutex_lock(&dev->mt76.mutex); 81 81 82 + mt7996_mcu_rdd_resume_tx(phy); 82 83 mt7996_mcu_set_radio_en(phy, false); 83 84 84 85 clear_bit(MT76_STATE_RUNNING, &phy->mt76->state); ··· 239 238 link_conf = &vif->bss_conf; 240 239 241 240 if (cmd == SET_KEY && !sta && !link->mt76.cipher) { 241 + struct mt7996_phy *phy = mt7996_vif_link_phy(link); 242 + 242 243 link->mt76.cipher = 243 244 mt76_connac_mcu_get_cipher(key->cipher); 244 - mt7996_mcu_add_bss_info(link->phy, vif, link_conf, 245 - &link->mt76, msta_link, true); 245 + if (phy) 246 + mt7996_mcu_add_bss_info(phy, vif, link_conf, 247 + &link->mt76, msta_link, true); 246 248 } 247 249 248 250 if (cmd == SET_KEY) ··· 304 300 .cmd = SET_KEY, 305 301 .link_id = link_conf->link_id, 306 302 }; 307 - struct mt76_txq *mtxq; 308 303 int mld_idx, idx, ret; 304 + 305 + if ((mvif->mt76.valid_links & BIT(link_conf->link_id)) && 306 + !mlink->offchannel) { 307 + if (vif->type == NL80211_IFTYPE_AP) 308 + return mt7996_mcu_mld_link_oper(dev, link_conf, link, 309 + true); 310 + return 0; 311 + } 309 312 310 313 mlink->idx = __ffs64(~dev->mt76.vif_mask); 311 314 if (mlink->idx >= mt7996_max_interface_num(dev)) ··· 327 316 return -ENOSPC; 328 317 329 318 link->mld_idx = mld_idx; 330 - link->phy = phy; 331 319 mlink->omac_idx = idx; 332 320 mlink->band_idx = band_idx; 333 321 mlink->wmm_idx = vif->type == NL80211_IFTYPE_AP ? 0 : 3; ··· 353 343 mt7996_mac_wtbl_update(dev, idx, 354 344 MT_WTBL_UPDATE_ADM_COUNT_CLEAR); 355 345 356 - if (vif->txq) { 357 - mtxq = (struct mt76_txq *)vif->txq->drv_priv; 358 - mtxq->wcid = idx; 359 - } 360 - 361 346 if (vif->type != NL80211_IFTYPE_AP && 362 347 (!mlink->omac_idx || mlink->omac_idx > 3)) 363 348 vif->offload_flags = 0; ··· 375 370 376 371 ieee80211_iter_keys(mphy->hw, vif, mt7996_key_iter, &it); 377 372 378 - if (!mlink->wcid->offchannel && 379 - mvif->mt76.deflink_id == IEEE80211_LINK_UNSPECIFIED) 380 - mvif->mt76.deflink_id = link_conf->link_id; 373 + if (!mlink->wcid->offchannel) { 374 + if (vif->txq && 375 + mvif->mt76.deflink_id == IEEE80211_LINK_UNSPECIFIED) { 376 + struct mt76_txq *mtxq; 377 + 378 + mtxq = (struct mt76_txq *)vif->txq->drv_priv; 379 + mvif->mt76.deflink_id = link_conf->link_id; 380 + mtxq->wcid = idx; 381 + } 382 + mvif->mt76.valid_links |= BIT(link_conf->link_id); 383 + } 384 + 385 + if (vif->type == NL80211_IFTYPE_STATION) { 386 + vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER; 387 + 388 + if (vif->cfg.assoc && link_conf->beacon_int) { 389 + mlink->beacon_mon_interval = 390 + msecs_to_jiffies(ieee80211_tu_to_usec( 391 + link_conf->beacon_int) / 1000); 392 + WRITE_ONCE(mlink->beacon_mon_last, jiffies); 393 + } 394 + } 381 395 382 396 return 0; 397 + } 398 + 399 + static void mt7996_vif_link_destroy(struct mt7996_phy *phy, 400 + struct mt7996_vif_link *link, 401 + struct ieee80211_vif *vif, 402 + struct ieee80211_bss_conf *link_conf) 403 + { 404 + struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv; 405 + struct mt7996_sta_link *msta_link = &link->msta_link; 406 + unsigned int link_id = msta_link->wcid.link_id; 407 + struct mt76_vif_link *mlink = &link->mt76; 408 + struct mt7996_key_iter_data it = { 409 + .cmd = SET_KEY, 410 + .link_id = link_id, 411 + }; 412 + struct mt7996_dev *dev = phy->dev; 413 + int idx = msta_link->wcid.idx; 414 + 415 + if (!link_conf) 416 + link_conf = &vif->bss_conf; 417 + 418 + if (!mlink->wcid->offchannel) 419 + ieee80211_iter_keys(phy->mt76->hw, vif, mt7996_key_iter, &it); 420 + 421 + mt7996_mcu_add_sta(dev, link_conf, NULL, link, NULL, 422 + CONN_STATE_DISCONNECT, false); 423 + mt7996_mcu_add_bss_info(phy, vif, link_conf, mlink, msta_link, false); 424 + mt7996_mcu_add_dev_info(phy, vif, link_conf, mlink, false); 425 + 426 + rcu_assign_pointer(dev->mt76.wcid[idx], NULL); 427 + 428 + dev->mt76.vif_mask &= ~BIT_ULL(mlink->idx); 429 + dev->mld_idx_mask &= ~BIT_ULL(link->mld_idx); 430 + phy->omac_mask &= ~BIT_ULL(mlink->omac_idx); 431 + if (!mlink->wcid->offchannel) 432 + mvif->mt76.valid_links &= ~BIT(link_id); 433 + 434 + spin_lock_bh(&dev->mt76.sta_poll_lock); 435 + if (!list_empty(&msta_link->wcid.poll_list)) 436 + list_del_init(&msta_link->wcid.poll_list); 437 + spin_unlock_bh(&dev->mt76.sta_poll_lock); 438 + 439 + mt76_wcid_cleanup(&dev->mt76, &msta_link->wcid); 440 + 441 + if (mlink != (struct mt76_vif_link *)vif->drv_priv && 442 + !mlink->wcid->offchannel) { 443 + rcu_assign_pointer(mlink->mvif->link[link_id], NULL); 444 + kfree_rcu(mlink, rcu_head); 445 + } 383 446 } 384 447 385 448 void mt7996_vif_link_remove(struct mt76_phy *mphy, struct ieee80211_vif *vif, ··· 457 384 struct mt7996_vif_link *link = container_of(mlink, struct mt7996_vif_link, mt76); 458 385 struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv; 459 386 struct mt7996_sta_link *msta_link = &link->msta_link; 387 + unsigned int link_id = msta_link->wcid.link_id; 460 388 struct mt7996_phy *phy = mphy->priv; 461 - struct mt7996_dev *dev = phy->dev; 462 - struct mt7996_key_iter_data it = { 463 - .cmd = SET_KEY, 464 - .link_id = link_conf->link_id, 465 - }; 466 - int idx = msta_link->wcid.idx; 467 389 468 - if (!mlink->wcid->offchannel) 469 - ieee80211_iter_keys(mphy->hw, vif, mt7996_key_iter, &it); 390 + /* Hw requires to destroy active links tearing down the interface, so 391 + * postpone it removing the interface. 392 + */ 393 + if (mlink->wcid->offchannel) { 394 + mt7996_vif_link_destroy(phy, link, vif, link_conf); 395 + } else { 396 + if (vif->type == NL80211_IFTYPE_AP) { 397 + mt7996_mcu_mld_reconf_stop_link(phy->dev, vif, 398 + BIT(link_id)); 399 + mt7996_mcu_mld_link_oper(phy->dev, link_conf, link, 400 + false); 401 + } 470 402 471 - mt7996_mcu_add_sta(dev, link_conf, NULL, link, NULL, 472 - CONN_STATE_DISCONNECT, false); 473 - mt7996_mcu_add_bss_info(phy, vif, link_conf, mlink, msta_link, false); 403 + if (vif->txq && mvif->mt76.deflink_id == link_id) { 404 + struct ieee80211_bss_conf *iter; 405 + struct mt76_txq *mtxq; 474 406 475 - mt7996_mcu_add_dev_info(phy, vif, link_conf, mlink, false); 407 + mvif->mt76.deflink_id = IEEE80211_LINK_UNSPECIFIED; 408 + mtxq = (struct mt76_txq *)vif->txq->drv_priv; 409 + /* Primary link will be removed, look for a new one */ 410 + for_each_vif_active_link(vif, iter, link_id) { 411 + if (link_id == msta_link->wcid.link_id) 412 + continue; 476 413 477 - rcu_assign_pointer(dev->mt76.wcid[idx], NULL); 414 + link = mt7996_vif_link(phy->dev, vif, link_id); 415 + if (!link) 416 + continue; 478 417 479 - if (!mlink->wcid->offchannel && 480 - mvif->mt76.deflink_id == link_conf->link_id) { 481 - struct ieee80211_bss_conf *iter; 482 - unsigned int link_id; 483 - 484 - mvif->mt76.deflink_id = IEEE80211_LINK_UNSPECIFIED; 485 - for_each_vif_active_link(vif, iter, link_id) { 486 - if (link_id != IEEE80211_LINK_UNSPECIFIED) { 418 + mtxq->wcid = link->msta_link.wcid.idx; 487 419 mvif->mt76.deflink_id = link_id; 488 420 break; 489 421 } 490 422 } 491 423 } 492 - 493 - dev->mt76.vif_mask &= ~BIT_ULL(mlink->idx); 494 - dev->mld_idx_mask &= ~BIT_ULL(link->mld_idx); 495 - phy->omac_mask &= ~BIT_ULL(mlink->omac_idx); 496 - 497 - spin_lock_bh(&dev->mt76.sta_poll_lock); 498 - if (!list_empty(&msta_link->wcid.poll_list)) 499 - list_del_init(&msta_link->wcid.poll_list); 500 - spin_unlock_bh(&dev->mt76.sta_poll_lock); 501 - 502 - mt76_wcid_cleanup(&dev->mt76, &msta_link->wcid); 503 424 } 504 425 505 426 static void mt7996_phy_set_rxfilter(struct mt7996_phy *phy) ··· 539 472 540 473 mt76_rmw_field(dev, MT_DMA_DCR0(phy->mt76->band_idx), 541 474 MT_DMA_DCR0_RXD_G5_EN, enabled); 475 + mt76_rmw_field(dev, MT_MDP_DCR0, 476 + MT_MDP_DCR0_RX_HDR_TRANS_EN, !enabled); 542 477 mt7996_phy_set_rxfilter(phy); 543 478 mt7996_mcu_set_sniffer_mode(phy, enabled); 544 479 } ··· 599 530 static void mt7996_remove_interface(struct ieee80211_hw *hw, 600 531 struct ieee80211_vif *vif) 601 532 { 533 + struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv; 534 + unsigned long rem_links = mvif->mt76.valid_links; 602 535 struct mt7996_dev *dev = mt7996_hw_dev(hw); 603 536 struct mt7996_radio_data rdata = {}; 537 + unsigned int link_id; 604 538 int i; 539 + 540 + /* Remove all active links */ 541 + for_each_set_bit(link_id, &rem_links, IEEE80211_MLD_MAX_NUM_LINKS) { 542 + struct mt7996_vif_link *link; 543 + struct mt7996_phy *phy; 544 + 545 + link = mt7996_vif_link(dev, vif, link_id); 546 + if (!link) 547 + continue; 548 + 549 + phy = __mt7996_phy(dev, link->msta_link.wcid.phy_idx); 550 + if (!phy) 551 + continue; 552 + 553 + mt7996_vif_link_destroy(phy, link, vif, NULL); 554 + } 605 555 606 556 ieee80211_iterate_active_interfaces_mtx(hw, 0, mt7996_remove_iter, 607 557 &rdata); ··· 899 811 900 812 for_each_vif_active_link(vif, link_conf, link_id) { 901 813 struct mt7996_vif_link *link; 814 + struct mt7996_phy *phy; 902 815 903 816 link = mt7996_vif_link(dev, vif, link_id); 904 817 if (!link) 905 818 continue; 906 819 907 - if (!link->phy) 820 + if (vif->type == NL80211_IFTYPE_STATION) { 821 + link->mt76.beacon_mon_interval = 822 + msecs_to_jiffies(ieee80211_tu_to_usec( 823 + link_conf->beacon_int) / 1000); 824 + WRITE_ONCE(link->mt76.beacon_mon_last, jiffies); 825 + } 826 + 827 + phy = mt7996_vif_link_phy(link); 828 + if (!phy) 908 829 continue; 909 830 910 - mt7996_mcu_add_bss_info(link->phy, vif, link_conf, 831 + mt7996_mcu_add_bss_info(phy, vif, link_conf, 911 832 &link->mt76, &link->msta_link, 912 833 true); 913 834 mt7996_mcu_add_sta(dev, link_conf, NULL, link, NULL, 914 835 CONN_STATE_PORT_SECURE, 915 836 !!(changed & BSS_CHANGED_BSSID)); 837 + } 838 + } 839 + 840 + if ((changed & BSS_CHANGED_ASSOC) && !vif->cfg.assoc && 841 + vif->type == NL80211_IFTYPE_STATION) { 842 + struct ieee80211_bss_conf *link_conf; 843 + unsigned long link_id; 844 + 845 + for_each_vif_active_link(vif, link_conf, link_id) { 846 + struct mt7996_vif_link *link; 847 + 848 + link = mt7996_vif_link(dev, vif, link_id); 849 + if (link) 850 + link->mt76.beacon_mon_interval = 0; 916 851 } 917 852 } 918 853 ··· 974 863 CONN_STATE_PORT_SECURE, 975 864 !!(changed & BSS_CHANGED_BSSID)); 976 865 } 866 + 867 + if (changed & BSS_CHANGED_HT || changed & BSS_CHANGED_ERP_CTS_PROT) 868 + mt7996_mcu_set_protection(phy, link, info->ht_operation_mode, 869 + info->use_cts_prot); 977 870 978 871 if (changed & BSS_CHANGED_ERP_SLOT) { 979 872 int slottime = info->use_short_slot ? 9 : 20; ··· 1042 927 struct cfg80211_chan_def *chandef) 1043 928 { 1044 929 struct mt7996_dev *dev = mt7996_hw_dev(hw); 930 + struct mt7996_phy *phy = mt7996_band_phy(dev, chandef->chan->band); 931 + struct ieee80211_bss_conf *link_conf; 932 + unsigned int link_id; 1045 933 1046 934 mutex_lock(&dev->mt76.mutex); 1047 - mt7996_mcu_add_beacon(hw, vif, &vif->bss_conf, vif->bss_conf.enable_beacon); 935 + 936 + for_each_vif_active_link(vif, link_conf, link_id) { 937 + struct mt7996_vif_link *link; 938 + struct mt7996_phy *link_phy; 939 + 940 + link = mt7996_vif_link(dev, vif, link_id); 941 + if (!link) 942 + continue; 943 + 944 + link_phy = mt7996_vif_link_phy(link); 945 + if (link_phy != phy) 946 + continue; 947 + 948 + /* Reset beacon when channel switch triggered during CAC to let 949 + * FW correctly perform CSA countdown 950 + */ 951 + if (!cfg80211_reg_can_beacon(hw->wiphy, &phy->mt76->chandef, 952 + vif->type)) 953 + mt7996_mcu_add_beacon(hw, vif, link_conf, false); 954 + 955 + mt7996_mcu_add_beacon(hw, vif, link_conf, true); 956 + break; 957 + } 958 + 1048 959 mutex_unlock(&dev->mt76.mutex); 960 + } 961 + 962 + static int 963 + mt7996_post_channel_switch(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 964 + struct ieee80211_bss_conf *link_conf) 965 + { 966 + struct cfg80211_chan_def *chandef = &link_conf->chanreq.oper; 967 + struct mt7996_dev *dev = mt7996_hw_dev(hw); 968 + struct mt7996_phy *phy = mt7996_band_phy(dev, chandef->chan->band); 969 + struct mt7996_vif_link *link; 970 + int ret = -EINVAL; 971 + 972 + mutex_lock(&dev->mt76.mutex); 973 + 974 + link = mt7996_vif_conf_link(dev, vif, link_conf); 975 + if (!link) 976 + goto out; 977 + 978 + ret = mt7996_mcu_update_bss_rfch(phy, link); 979 + if (ret) 980 + goto out; 981 + 982 + ieee80211_iterate_stations_mtx(hw, mt7996_mcu_update_sta_rec_bw, link); 983 + 984 + ret = mt7996_mcu_rdd_resume_tx(phy); 985 + 986 + out: 987 + mutex_unlock(&dev->mt76.mutex); 988 + 989 + return ret; 990 + } 991 + 992 + static void 993 + mt7996_sta_init_txq_wcid(struct ieee80211_sta *sta, int idx) 994 + { 995 + int i; 996 + 997 + for (i = 0; i < ARRAY_SIZE(sta->txq); i++) { 998 + struct mt76_txq *mtxq; 999 + 1000 + if (!sta->txq[i]) 1001 + continue; 1002 + 1003 + mtxq = (struct mt76_txq *)sta->txq[i]->drv_priv; 1004 + mtxq->wcid = idx; 1005 + } 1049 1006 } 1050 1007 1051 1008 static int ··· 1128 941 { 1129 942 struct ieee80211_sta *sta = link_sta->sta; 1130 943 struct mt7996_sta *msta = (struct mt7996_sta *)sta->drv_priv; 1131 - struct mt7996_phy *phy = link->phy; 944 + struct mt7996_phy *phy = mt7996_vif_link_phy(link); 1132 945 struct mt7996_sta_link *msta_link; 1133 946 int idx; 947 + 948 + if (!phy) 949 + return -EINVAL; 1134 950 1135 951 idx = mt76_wcid_alloc(dev->mt76.wcid_mask, MT7996_WTBL_STA); 1136 952 if (idx < 0) 1137 953 return -ENOSPC; 1138 954 1139 955 if (msta->deflink_id == IEEE80211_LINK_UNSPECIFIED) { 1140 - int i; 1141 - 1142 956 msta_link = &msta->deflink; 1143 957 msta->deflink_id = link_id; 1144 958 msta->seclink_id = msta->deflink_id; 1145 - 1146 - for (i = 0; i < ARRAY_SIZE(sta->txq); i++) { 1147 - struct mt76_txq *mtxq; 1148 - 1149 - if (!sta->txq[i]) 1150 - continue; 1151 - 1152 - mtxq = (struct mt76_txq *)sta->txq[i]->drv_priv; 1153 - mtxq->wcid = idx; 1154 - } 959 + mt7996_sta_init_txq_wcid(sta, idx); 1155 960 } else { 1156 961 msta_link = kzalloc_obj(*msta_link); 1157 962 if (!msta_link) ··· 1179 1000 return 0; 1180 1001 } 1181 1002 1182 - void mt7996_mac_sta_deinit_link(struct mt7996_dev *dev, 1183 - struct mt7996_sta_link *msta_link) 1003 + void mt7996_mac_sta_remove_link(struct mt7996_dev *dev, 1004 + struct ieee80211_sta *sta, 1005 + unsigned int link_id, bool flush) 1184 1006 { 1007 + struct mt7996_sta *msta = (struct mt7996_sta *)sta->drv_priv; 1008 + struct mt7996_sta_link *msta_link; 1009 + 1010 + msta_link = mt76_dereference(msta->link[link_id], &dev->mt76); 1011 + if (!msta_link) 1012 + return; 1013 + 1185 1014 spin_lock_bh(&dev->mt76.sta_poll_lock); 1186 1015 if (!list_empty(&msta_link->wcid.poll_list)) 1187 1016 list_del_init(&msta_link->wcid.poll_list); ··· 1198 1011 spin_unlock_bh(&dev->mt76.sta_poll_lock); 1199 1012 1200 1013 mt76_wcid_cleanup(&dev->mt76, &msta_link->wcid); 1201 - mt76_wcid_mask_clear(dev->mt76.wcid_mask, msta_link->wcid.idx); 1202 - } 1203 1014 1204 - static void 1205 - mt7996_mac_sta_remove_links(struct mt7996_dev *dev, struct ieee80211_vif *vif, 1206 - struct ieee80211_sta *sta, unsigned long links) 1207 - { 1208 - struct mt7996_sta *msta = (struct mt7996_sta *)sta->drv_priv; 1209 - struct mt76_dev *mdev = &dev->mt76; 1210 - unsigned int link_id; 1211 - 1212 - for_each_set_bit(link_id, &links, IEEE80211_MLD_MAX_NUM_LINKS) { 1213 - struct mt7996_sta_link *msta_link = NULL; 1214 - struct mt7996_vif_link *link; 1215 - struct mt76_phy *mphy; 1216 - 1217 - msta_link = rcu_replace_pointer(msta->link[link_id], msta_link, 1218 - lockdep_is_held(&mdev->mutex)); 1219 - if (!msta_link) 1220 - continue; 1015 + if (msta_link->wcid.link_valid) { 1016 + struct mt7996_phy *phy; 1221 1017 1222 1018 mt7996_mac_wtbl_update(dev, msta_link->wcid.idx, 1223 1019 MT_WTBL_UPDATE_ADM_COUNT_CLEAR); 1224 1020 1225 - mt7996_mac_sta_deinit_link(dev, msta_link); 1226 - link = mt7996_vif_link(dev, vif, link_id); 1227 - if (!link) 1228 - continue; 1021 + phy = __mt7996_phy(dev, msta_link->wcid.phy_idx); 1022 + if (phy) 1023 + phy->mt76->num_sta--; 1229 1024 1230 - mphy = mt76_vif_link_phy(&link->mt76); 1231 - if (!mphy) 1232 - continue; 1233 - 1234 - mphy->num_sta--; 1235 1025 if (msta->deflink_id == link_id) { 1236 1026 msta->deflink_id = IEEE80211_LINK_UNSPECIFIED; 1237 - continue; 1238 - } else if (msta->seclink_id == link_id) { 1239 - msta->seclink_id = IEEE80211_LINK_UNSPECIFIED; 1240 - } 1027 + if (msta->seclink_id == link_id) { 1028 + /* no secondary link available */ 1029 + msta->seclink_id = msta->deflink_id; 1030 + } else { 1031 + struct mt7996_sta_link *msta_seclink; 1241 1032 1242 - kfree_rcu(msta_link, rcu_head); 1033 + /* switch to the secondary link */ 1034 + msta_seclink = mt76_dereference( 1035 + msta->link[msta->seclink_id], 1036 + &dev->mt76); 1037 + if (msta_seclink) { 1038 + msta->deflink_id = msta->seclink_id; 1039 + mt7996_sta_init_txq_wcid(sta, 1040 + msta_seclink->wcid.idx); 1041 + } 1042 + } 1043 + } else if (msta->seclink_id == link_id) { 1044 + msta->seclink_id = msta->deflink_id; 1045 + } 1046 + msta_link->wcid.link_valid = false; 1243 1047 } 1048 + 1049 + if (flush) { 1050 + rcu_assign_pointer(msta->link[link_id], NULL); 1051 + rcu_assign_pointer(dev->mt76.wcid[msta_link->wcid.idx], NULL); 1052 + mt76_wcid_mask_clear(dev->mt76.wcid_mask, msta_link->wcid.idx); 1053 + if (msta_link != &msta->deflink) 1054 + kfree_rcu(msta_link, rcu_head); 1055 + } 1056 + } 1057 + 1058 + static void 1059 + mt7996_mac_sta_remove_links(struct mt7996_dev *dev, struct ieee80211_vif *vif, 1060 + struct ieee80211_sta *sta, unsigned long links, 1061 + bool flush) 1062 + { 1063 + unsigned int link_id; 1064 + 1065 + for_each_set_bit(link_id, &links, IEEE80211_MLD_MAX_NUM_LINKS) 1066 + mt7996_mac_sta_remove_link(dev, sta, link_id, flush); 1244 1067 } 1245 1068 1246 1069 static int ··· 1264 1067 for_each_set_bit(link_id, &new_links, IEEE80211_MLD_MAX_NUM_LINKS) { 1265 1068 struct ieee80211_bss_conf *link_conf; 1266 1069 struct ieee80211_link_sta *link_sta; 1070 + struct mt7996_sta_link *msta_link; 1267 1071 struct mt7996_vif_link *link; 1268 1072 struct mt76_phy *mphy; 1269 1073 1270 - if (rcu_access_pointer(msta->link[link_id])) 1074 + msta_link = mt76_dereference(msta->link[link_id], &dev->mt76); 1075 + if (msta_link) { 1076 + msta_link->wcid.link_valid = true; 1271 1077 continue; 1078 + } 1272 1079 1273 1080 link_conf = link_conf_dereference_protected(vif, link_id); 1274 1081 if (!link_conf) { ··· 1309 1108 return 0; 1310 1109 1311 1110 error_unlink: 1312 - mt7996_mac_sta_remove_links(dev, vif, sta, new_links); 1111 + mt7996_mac_sta_remove_links(dev, vif, sta, new_links, true); 1313 1112 1314 1113 return err; 1315 1114 } ··· 1326 1125 1327 1126 mutex_lock(&dev->mt76.mutex); 1328 1127 1329 - mt7996_mac_sta_remove_links(dev, vif, sta, rem); 1128 + mt7996_mac_sta_remove_links(dev, vif, sta, rem, false); 1330 1129 ret = mt7996_mac_sta_add_links(dev, vif, sta, add); 1331 1130 1332 1131 mutex_unlock(&dev->mt76.mutex); ··· 1435 1234 mt7996_mac_sta_remove(struct mt7996_dev *dev, struct ieee80211_vif *vif, 1436 1235 struct ieee80211_sta *sta) 1437 1236 { 1438 - unsigned long links = sta->valid_links ? sta->valid_links : BIT(0); 1237 + struct mt7996_sta *msta = (struct mt7996_sta *)sta->drv_priv; 1238 + int i; 1439 1239 1440 1240 mutex_lock(&dev->mt76.mutex); 1441 - mt7996_mac_sta_remove_links(dev, vif, sta, links); 1241 + for (i = 0; i < ARRAY_SIZE(msta->link); i++) 1242 + mt7996_mac_sta_remove_link(dev, sta, i, true); 1442 1243 mutex_unlock(&dev->mt76.mutex); 1443 1244 } 1444 1245 ··· 1692 1489 1693 1490 u64 __mt7996_get_tsf(struct ieee80211_hw *hw, struct mt7996_vif_link *link) 1694 1491 { 1492 + struct mt7996_phy *phy = mt7996_vif_link_phy(link); 1695 1493 struct mt7996_dev *dev = mt7996_hw_dev(hw); 1696 - struct mt7996_phy *phy = link->phy; 1697 1494 union { 1698 1495 u64 t64; 1699 1496 u32 t32[2]; ··· 1752 1549 1753 1550 n = link->mt76.omac_idx > HW_BSSID_MAX ? HW_BSSID_0 1754 1551 : link->mt76.omac_idx; 1755 - phy = link->phy; 1552 + phy = mt7996_vif_link_phy(link); 1756 1553 if (!phy) 1757 1554 goto unlock; 1758 1555 ··· 1786 1583 if (!link) 1787 1584 goto unlock; 1788 1585 1789 - phy = link->phy; 1586 + phy = mt7996_vif_link_phy(link); 1790 1587 if (!phy) 1791 1588 goto unlock; 1792 1589 ··· 1916 1713 struct mt7996_sta_link *msta_link) 1917 1714 { 1918 1715 struct mt7996_sta *msta = msta_link->sta; 1919 - struct mt7996_dev *dev = msta->vif->deflink.phy->dev; 1716 + struct mt7996_phy *phy = mt7996_vif_link_phy(&msta->vif->deflink); 1717 + struct mt7996_dev *dev; 1920 1718 u32 *changed = data; 1921 1719 1720 + if (!phy) 1721 + return; 1722 + 1723 + dev = phy->dev; 1922 1724 spin_lock_bh(&dev->mt76.sta_poll_lock); 1923 1725 1924 1726 msta_link->changed |= *changed; ··· 2412 2204 path->mtk_wdma.wdma_idx = wed->wdma_idx; 2413 2205 else 2414 2206 #endif 2207 + if (is_mt7996(&dev->mt76) && mt76_npu_device_active(&dev->mt76) && 2208 + msta_link->wcid.phy_idx == MT_BAND2) 2209 + path->mtk_wdma.wdma_idx = 1; 2210 + else 2415 2211 path->mtk_wdma.wdma_idx = link->mt76.band_idx; 2416 2212 path->mtk_wdma.bss = link->mt76.idx; 2417 2213 path->mtk_wdma.queue = 0; ··· 2487 2275 MT7996_WATCHDOG_TIME); 2488 2276 } 2489 2277 2278 + static int 2279 + mt7996_set_eml_op_mode(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 2280 + struct ieee80211_sta *sta, 2281 + struct ieee80211_eml_params *eml_params) 2282 + { 2283 + struct mt7996_dev *dev = mt7996_hw_dev(hw); 2284 + int ret; 2285 + 2286 + mutex_lock(&dev->mt76.mutex); 2287 + ret = mt7996_mcu_set_emlsr_mode(dev, vif, sta, eml_params); 2288 + mutex_unlock(&dev->mt76.mutex); 2289 + 2290 + return ret; 2291 + } 2292 + 2490 2293 const struct ieee80211_ops mt7996_ops = { 2491 2294 .add_chanctx = mt76_add_chanctx, 2492 2295 .remove_chanctx = mt76_remove_chanctx, ··· 2533 2306 .release_buffered_frames = mt76_release_buffered_frames, 2534 2307 .get_txpower = mt7996_get_txpower, 2535 2308 .channel_switch_beacon = mt7996_channel_switch_beacon, 2309 + .post_channel_switch = mt7996_post_channel_switch, 2536 2310 .get_stats = mt7996_get_stats, 2537 2311 .get_et_sset_count = mt7996_get_et_sset_count, 2538 2312 .get_et_stats = mt7996_get_et_stats, ··· 2565 2337 .change_vif_links = mt7996_change_vif_links, 2566 2338 .change_sta_links = mt7996_mac_sta_change_links, 2567 2339 .reconfig_complete = mt7996_reconfig_complete, 2340 + .set_eml_op_mode = mt7996_set_eml_op_mode, 2568 2341 };
+685 -138
drivers/net/wireless/mediatek/mt76/mt7996/mcu.c
··· 18 18 case MT7992_VAR_TYPE_23: \ 19 19 _fw = MT7992_##name##_23; \ 20 20 break; \ 21 + case MT7992_VAR_TYPE_24: \ 22 + _fw = MT7992_##name##_24; \ 23 + break; \ 21 24 default: \ 22 25 _fw = MT7992_##name; \ 23 26 } \ ··· 128 125 struct mt7996_vif_link *link, 129 126 __le16 *he_mcs, u16 mcs_map) 130 127 { 128 + struct mt76_phy *mphy = mt76_vif_link_phy(&link->mt76); 131 129 int nss, max_nss = link_sta->rx_nss > 3 ? 4 : link_sta->rx_nss; 132 - enum nl80211_band band = link->phy->mt76->chandef.chan->band; 133 - const u16 *mask = link->bitrate_mask.control[band].he_mcs; 130 + enum nl80211_band band; 131 + const u16 *mask; 132 + 133 + if (!mphy) 134 + return; 135 + 136 + band = mphy->chandef.chan->band; 137 + mask = link->bitrate_mask.control[band].he_mcs; 134 138 135 139 for (nss = 0; nss < max_nss; nss++) { 136 140 int mcs; ··· 219 209 mt7996_mcu_parse_response(struct mt76_dev *mdev, int cmd, 220 210 struct sk_buff *skb, int seq) 221 211 { 212 + struct mt7996_dev *dev = container_of(mdev, struct mt7996_dev, mt76); 222 213 struct mt7996_mcu_rxd *rxd; 223 214 struct mt7996_mcu_uni_event *event; 224 215 int mcu_cmd = FIELD_GET(__MCU_CMD_FIELD_ID, cmd); ··· 228 217 if (!skb) { 229 218 dev_err(mdev->dev, "Message %08x (seq %d) timeout\n", 230 219 cmd, seq); 220 + 221 + if (!test_and_set_bit(MT76_MCU_RESET, &dev->mphy.state)) { 222 + dev->recovery.restart = true; 223 + wake_up(&dev->mt76.mcu.wait); 224 + queue_work(dev->mt76.wq, &dev->reset_work); 225 + wake_up(&dev->reset_wait); 226 + } 227 + 231 228 return -ETIMEDOUT; 232 229 } 233 230 ··· 252 233 event = (struct mt7996_mcu_uni_event *)skb->data; 253 234 ret = le32_to_cpu(event->status); 254 235 /* skip invalid event */ 255 - if (mcu_cmd != event->cid) 236 + if (mcu_cmd != le16_to_cpu(event->cid)) 256 237 ret = -EAGAIN; 257 238 } else { 258 239 skb_pull(skb, sizeof(struct mt7996_mcu_rxd)); ··· 278 259 mdev->mcu.timeout = 2 * HZ; 279 260 return; 280 261 case MCU_UNI_CMD_EFUSE_CTRL: 281 - mdev->mcu.timeout = 20 * HZ; 262 + case MCU_UNI_CMD_EXT_EEPROM_CTRL: 263 + mdev->mcu.timeout = 30 * HZ; 282 264 return; 283 265 default: 284 266 break; ··· 333 313 uni_txd->pkt_type = MCU_PKT_ID; 334 314 uni_txd->seq = seq; 335 315 336 - if (cmd & __MCU_CMD_FIELD_QUERY) 337 - uni_txd->option = MCU_CMD_UNI_QUERY_ACK; 338 - else 339 - uni_txd->option = MCU_CMD_UNI_EXT_ACK; 316 + uni_txd->option = MCU_CMD_UNI; 317 + if (!(cmd & __MCU_CMD_FIELD_QUERY)) 318 + uni_txd->option |= MCU_CMD_SET; 340 319 341 - if (mcu_cmd == MCU_UNI_CMD_SDO) 342 - uni_txd->option &= ~MCU_CMD_ACK; 320 + if (wait_seq) 321 + uni_txd->option |= MCU_CMD_ACK; 343 322 344 323 if ((cmd & __MCU_CMD_FIELD_WA) && (cmd & __MCU_CMD_FIELD_WM)) 345 324 uni_txd->s2d_index = MCU_S2D_H2CN; ··· 409 390 sizeof(req), false); 410 391 } 411 392 393 + struct mt7996_mcu_countdown_data { 394 + struct mt76_phy *mphy; 395 + u8 omac_idx; 396 + }; 397 + 412 398 static void 413 399 mt7996_mcu_csa_finish(void *priv, u8 *mac, struct ieee80211_vif *vif) 414 400 { 415 - if (!vif->bss_conf.csa_active || vif->type == NL80211_IFTYPE_STATION) 401 + struct mt7996_mcu_countdown_data *cdata = (void *)priv; 402 + struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv; 403 + struct ieee80211_bss_conf *link_conf = NULL; 404 + unsigned long valid_links = vif->valid_links ?: BIT(0); 405 + unsigned int link_id; 406 + 407 + if (vif->type == NL80211_IFTYPE_STATION) 416 408 return; 417 409 418 - ieee80211_csa_finish(vif, 0); 410 + for_each_set_bit(link_id, &valid_links, IEEE80211_MLD_MAX_NUM_LINKS) { 411 + struct mt76_vif_link *mlink = 412 + rcu_dereference(mvif->mt76.link[link_id]); 413 + 414 + if (mlink && mlink->band_idx == cdata->mphy->band_idx && 415 + mlink->omac_idx == cdata->omac_idx) { 416 + link_conf = rcu_dereference(vif->link_conf[link_id]); 417 + break; 418 + } 419 + } 420 + 421 + if (!link_conf || !link_conf->csa_active) 422 + return; 423 + 424 + ieee80211_csa_finish(vif, link_conf->link_id); 425 + } 426 + 427 + static void 428 + mt7996_mcu_cca_finish(void *priv, u8 *mac, struct ieee80211_vif *vif) 429 + { 430 + struct mt7996_mcu_countdown_data *cdata = (void *)priv; 431 + struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv; 432 + struct ieee80211_bss_conf *link_conf = NULL; 433 + unsigned long valid_links = vif->valid_links ?: BIT(0); 434 + unsigned int link_id; 435 + 436 + if (vif->type == NL80211_IFTYPE_STATION) 437 + return; 438 + 439 + for_each_set_bit(link_id, &valid_links, IEEE80211_MLD_MAX_NUM_LINKS) { 440 + struct mt76_vif_link *mlink = 441 + rcu_dereference(mvif->mt76.link[link_id]); 442 + 443 + if (mlink && mlink->band_idx == cdata->mphy->band_idx && 444 + mlink->omac_idx == cdata->omac_idx) { 445 + link_conf = rcu_dereference(vif->link_conf[link_id]); 446 + break; 447 + } 448 + } 449 + 450 + if (!link_conf || !link_conf->color_change_active) 451 + return; 452 + 453 + ieee80211_color_change_finish(vif, link_conf->link_id); 454 + } 455 + 456 + static void 457 + mt7996_mcu_ie_countdown(struct mt7996_dev *dev, struct sk_buff *skb) 458 + { 459 + #define UNI_EVENT_IE_COUNTDOWN_CSA 0 460 + #define UNI_EVENT_IE_COUNTDOWN_BCC 1 461 + struct header { 462 + u8 band; 463 + u8 rsv[3]; 464 + }; 465 + struct mt7996_mcu_rxd *rxd = (struct mt7996_mcu_rxd *)skb->data; 466 + const char *data = (char *)&rxd[1], *tail; 467 + struct header *hdr = (struct header *)data; 468 + struct tlv *tlv = (struct tlv *)(data + 4); 469 + struct mt7996_mcu_countdown_notify *event; 470 + struct mt7996_mcu_countdown_data cdata; 471 + 472 + if (hdr->band >= ARRAY_SIZE(dev->mt76.phys)) 473 + return; 474 + 475 + cdata.mphy = dev->mt76.phys[hdr->band]; 476 + if (!cdata.mphy) 477 + return; 478 + 479 + tail = skb->data + skb->len; 480 + data += sizeof(*hdr); 481 + while (data + sizeof(*tlv) < tail && le16_to_cpu(tlv->len)) { 482 + event = (struct mt7996_mcu_countdown_notify *)tlv->data; 483 + 484 + cdata.omac_idx = event->omac_idx; 485 + 486 + switch (le16_to_cpu(tlv->tag)) { 487 + case UNI_EVENT_IE_COUNTDOWN_CSA: 488 + ieee80211_iterate_active_interfaces_atomic(mt76_hw(dev), 489 + IEEE80211_IFACE_ITER_RESUME_ALL, 490 + mt7996_mcu_csa_finish, &cdata); 491 + break; 492 + case UNI_EVENT_IE_COUNTDOWN_BCC: 493 + ieee80211_iterate_active_interfaces_atomic(mt76_hw(dev), 494 + IEEE80211_IFACE_ITER_RESUME_ALL, 495 + mt7996_mcu_cca_finish, &cdata); 496 + break; 497 + default: 498 + break; 499 + } 500 + 501 + data += le16_to_cpu(tlv->len); 502 + tlv = (struct tlv *)data; 503 + } 419 504 } 420 505 421 506 static void ··· 539 416 break; 540 417 case MT_RDD_IDX_BACKGROUND: 541 418 if (!dev->rdd2_phy) 542 - return; 419 + goto err; 543 420 mphy = dev->rdd2_phy->mt76; 544 421 break; 545 422 default: 546 - dev_err(dev->mt76.dev, "Unknown RDD idx %d\n", r->rdd_idx); 547 - return; 423 + goto err; 548 424 } 549 425 550 426 if (!mphy) 551 - return; 427 + goto err; 552 428 553 - if (r->rdd_idx == MT_RDD_IDX_BACKGROUND) 429 + if (r->rdd_idx == MT_RDD_IDX_BACKGROUND) { 554 430 cfg80211_background_radar_event(mphy->hw->wiphy, 555 431 &dev->rdd2_chandef, 556 432 GFP_ATOMIC); 557 - else 433 + } else { 434 + struct mt7996_phy *phy = mphy->priv; 435 + 436 + phy->rdd_tx_paused = true; 558 437 ieee80211_radar_detected(mphy->hw, NULL); 438 + } 559 439 dev->hw_pattern++; 440 + 441 + return; 442 + 443 + err: 444 + dev_err(dev->mt76.dev, "Invalid RDD idx %d\n", r->rdd_idx); 560 445 } 561 446 562 447 static void ··· 605 474 } 606 475 607 476 wiphy_info(mt76_hw(dev)->wiphy, "%s: %.*s", type, len, data); 608 - } 609 - 610 - static void 611 - mt7996_mcu_cca_finish(void *priv, u8 *mac, struct ieee80211_vif *vif) 612 - { 613 - if (!vif->bss_conf.color_change_active || vif->type == NL80211_IFTYPE_STATION) 614 - return; 615 - 616 - ieee80211_color_change_finish(vif, 0); 617 - } 618 - 619 - static void 620 - mt7996_mcu_ie_countdown(struct mt7996_dev *dev, struct sk_buff *skb) 621 - { 622 - #define UNI_EVENT_IE_COUNTDOWN_CSA 0 623 - #define UNI_EVENT_IE_COUNTDOWN_BCC 1 624 - struct header { 625 - u8 band; 626 - u8 rsv[3]; 627 - }; 628 - struct mt76_phy *mphy = &dev->mt76.phy; 629 - struct mt7996_mcu_rxd *rxd = (struct mt7996_mcu_rxd *)skb->data; 630 - const char *data = (char *)&rxd[1], *tail; 631 - struct header *hdr = (struct header *)data; 632 - struct tlv *tlv = (struct tlv *)(data + 4); 633 - 634 - if (hdr->band >= ARRAY_SIZE(dev->mt76.phys)) 635 - return; 636 - 637 - if (hdr->band && dev->mt76.phys[hdr->band]) 638 - mphy = dev->mt76.phys[hdr->band]; 639 - 640 - tail = skb->data + skb->len; 641 - data += sizeof(struct header); 642 - while (data + sizeof(struct tlv) < tail && le16_to_cpu(tlv->len)) { 643 - switch (le16_to_cpu(tlv->tag)) { 644 - case UNI_EVENT_IE_COUNTDOWN_CSA: 645 - ieee80211_iterate_active_interfaces_atomic(mphy->hw, 646 - IEEE80211_IFACE_ITER_RESUME_ALL, 647 - mt7996_mcu_csa_finish, mphy->hw); 648 - break; 649 - case UNI_EVENT_IE_COUNTDOWN_BCC: 650 - ieee80211_iterate_active_interfaces_atomic(mphy->hw, 651 - IEEE80211_IFACE_ITER_RESUME_ALL, 652 - mt7996_mcu_cca_finish, mphy->hw); 653 - break; 654 - } 655 - 656 - data += le16_to_cpu(tlv->len); 657 - tlv = (struct tlv *)data; 658 - } 659 477 } 660 478 661 479 static int ··· 1250 1170 MCU_WMWA_UNI_CMD(BSS_INFO_UPDATE), true); 1251 1171 } 1252 1172 1173 + int mt7996_mcu_update_bss_rfch(struct mt7996_phy *phy, struct mt7996_vif_link *link) 1174 + { 1175 + struct mt7996_dev *dev = phy->dev; 1176 + struct sk_buff *skb; 1177 + 1178 + skb = __mt7996_mcu_alloc_bss_req(&dev->mt76, &link->mt76, 1179 + MT7996_BSS_UPDATE_MAX_SIZE); 1180 + if (IS_ERR(skb)) 1181 + return PTR_ERR(skb); 1182 + 1183 + mt7996_mcu_bss_rfch_tlv(skb, phy); 1184 + 1185 + return mt76_mcu_skb_send_msg(&dev->mt76, skb, 1186 + MCU_WMWA_UNI_CMD(BSS_INFO_UPDATE), true); 1187 + } 1188 + 1189 + int mt7996_mcu_set_protection(struct mt7996_phy *phy, struct mt7996_vif_link *link, 1190 + u8 ht_mode, bool use_cts_prot) 1191 + { 1192 + struct mt7996_dev *dev = phy->dev; 1193 + struct bss_prot_tlv *prot; 1194 + struct sk_buff *skb; 1195 + struct tlv *tlv; 1196 + enum { 1197 + PROT_NONMEMBER = BIT(1), 1198 + PROT_20MHZ = BIT(2), 1199 + PROT_NONHT_MIXED = BIT(3), 1200 + PROT_LEGACY_ERP = BIT(5), 1201 + PROT_NONGF_STA = BIT(7), 1202 + }; 1203 + 1204 + skb = __mt7996_mcu_alloc_bss_req(&dev->mt76, &link->mt76, 1205 + MT7996_BSS_UPDATE_MAX_SIZE); 1206 + if (IS_ERR(skb)) 1207 + return PTR_ERR(skb); 1208 + 1209 + tlv = mt7996_mcu_add_uni_tlv(skb, UNI_BSS_INFO_PROTECT_INFO, 1210 + sizeof(*prot)); 1211 + prot = (struct bss_prot_tlv *)tlv; 1212 + 1213 + switch (ht_mode & IEEE80211_HT_OP_MODE_PROTECTION) { 1214 + case IEEE80211_HT_OP_MODE_PROTECTION_NONMEMBER: 1215 + prot->prot_mode = cpu_to_le32(PROT_NONMEMBER); 1216 + break; 1217 + case IEEE80211_HT_OP_MODE_PROTECTION_20MHZ: 1218 + prot->prot_mode = cpu_to_le32(PROT_20MHZ); 1219 + break; 1220 + case IEEE80211_HT_OP_MODE_PROTECTION_NONHT_MIXED: 1221 + prot->prot_mode = cpu_to_le32(PROT_NONHT_MIXED); 1222 + break; 1223 + } 1224 + 1225 + if (ht_mode & IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT) 1226 + prot->prot_mode |= cpu_to_le32(PROT_NONGF_STA); 1227 + 1228 + if (use_cts_prot) 1229 + prot->prot_mode |= cpu_to_le32(PROT_LEGACY_ERP); 1230 + 1231 + return mt76_mcu_skb_send_msg(&dev->mt76, skb, 1232 + MCU_WM_UNI_CMD(BSS_INFO_UPDATE), true); 1233 + } 1234 + 1235 + int mt7996_mcu_set_emlsr_mode(struct mt7996_dev *dev, 1236 + struct ieee80211_vif *vif, 1237 + struct ieee80211_sta *sta, 1238 + struct ieee80211_eml_params *eml_params) 1239 + { 1240 + struct mt7996_sta *msta = (struct mt7996_sta *)sta->drv_priv; 1241 + struct mt7996_sta_link *msta_link; 1242 + struct sta_rec_eml_op *eml_op; 1243 + struct mt7996_vif_link *link; 1244 + struct sk_buff *skb; 1245 + struct tlv *tlv; 1246 + 1247 + msta_link = mt76_dereference(msta->link[eml_params->link_id], 1248 + &dev->mt76); 1249 + if (!msta_link) 1250 + return -EINVAL; 1251 + 1252 + link = mt7996_vif_link(dev, vif, eml_params->link_id); 1253 + if (!link) 1254 + return -EINVAL; 1255 + 1256 + skb = __mt76_connac_mcu_alloc_sta_req(&dev->mt76, &link->mt76, 1257 + &msta_link->wcid, 1258 + MT7996_STA_UPDATE_MAX_SIZE); 1259 + if (IS_ERR(skb)) 1260 + return PTR_ERR(skb); 1261 + 1262 + tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_EML_OP, sizeof(*eml_op)); 1263 + eml_op = (struct sta_rec_eml_op *)tlv; 1264 + eml_op->link_bitmap = 0; 1265 + 1266 + if (eml_params->control & IEEE80211_EML_CTRL_EMLSR_MODE) { 1267 + unsigned long link_bitmap = eml_params->link_bitmap; 1268 + unsigned int link_id; 1269 + 1270 + for_each_set_bit(link_id, &link_bitmap, 1271 + IEEE80211_MLD_MAX_NUM_LINKS) { 1272 + struct mt76_phy *mphy; 1273 + 1274 + link = mt7996_vif_link(dev, vif, link_id); 1275 + if (!link) 1276 + continue; 1277 + 1278 + mphy = mt76_vif_link_phy(&link->mt76); 1279 + if (!mphy) 1280 + continue; 1281 + 1282 + eml_op->link_bitmap |= BIT(mphy->band_idx); 1283 + } 1284 + } 1285 + 1286 + return mt76_mcu_skb_send_msg(&dev->mt76, skb, 1287 + MCU_WMWA_UNI_CMD(STA_REC_UPDATE), true); 1288 + } 1289 + 1253 1290 int mt7996_mcu_set_timing(struct mt7996_phy *phy, struct ieee80211_vif *vif, 1254 1291 struct ieee80211_bss_conf *link_conf) 1255 1292 { ··· 1976 1779 #define EBF_MODE BIT(0) 1977 1780 #define IBF_MODE BIT(1) 1978 1781 #define BF_MAT_ORDER 4 1782 + struct mt7996_phy *phy = mt7996_vif_link_phy(link); 1979 1783 struct ieee80211_vif *vif = link_conf->vif; 1980 - struct mt7996_phy *phy = link->phy; 1981 - int tx_ant = hweight16(phy->mt76->chainmask) - 1; 1982 1784 struct sta_rec_bf *bf; 1983 1785 struct tlv *tlv; 1984 1786 static const u8 matrix[BF_MAT_ORDER][BF_MAT_ORDER] = { ··· 1986 1790 {2, 4, 4, 0}, /* 3x1, 3x2, 3x3, 3x4 */ 1987 1791 {3, 5, 6, 0} /* 4x1, 4x2, 4x3, 4x4 */ 1988 1792 }; 1793 + int tx_ant; 1989 1794 bool ebf; 1795 + 1796 + if (!phy) 1797 + return; 1990 1798 1991 1799 if (!(link_sta->ht_cap.ht_supported || link_sta->he_cap.has_he)) 1992 1800 return; ··· 2007 1807 * ht: iBF only, since mac80211 lacks of eBF support 2008 1808 */ 2009 1809 if (link_sta->eht_cap.has_eht) 2010 - mt7996_mcu_sta_bfer_eht(link_sta, vif, link->phy, bf, ebf); 1810 + mt7996_mcu_sta_bfer_eht(link_sta, vif, phy, bf, ebf); 2011 1811 else if (link_sta->he_cap.has_he) 2012 - mt7996_mcu_sta_bfer_he(link_sta, vif, link->phy, bf, ebf); 1812 + mt7996_mcu_sta_bfer_he(link_sta, vif, phy, bf, ebf); 2013 1813 else if (link_sta->vht_cap.vht_supported) 2014 - mt7996_mcu_sta_bfer_vht(link_sta, link->phy, bf, ebf); 1814 + mt7996_mcu_sta_bfer_vht(link_sta, phy, bf, ebf); 2015 1815 else if (link_sta->ht_cap.ht_supported) 2016 - mt7996_mcu_sta_bfer_ht(link_sta, link->phy, bf, ebf); 1816 + mt7996_mcu_sta_bfer_ht(link_sta, phy, bf, ebf); 2017 1817 else 2018 1818 return; 2019 1819 2020 1820 bf->bf_cap = ebf ? EBF_MODE : (dev->ibf ? IBF_MODE : 0); 1821 + tx_ant = hweight16(phy->mt76->chainmask) - 1; 2021 1822 if (is_mt7992(&dev->mt76) && tx_ant == 4) 2022 1823 bf->bf_cap |= IBF_MODE; 2023 1824 ··· 2050 1849 struct ieee80211_link_sta *link_sta, 2051 1850 struct mt7996_vif_link *link) 2052 1851 { 2053 - struct mt7996_phy *phy = link->phy; 2054 - int tx_ant = hweight8(phy->mt76->antenna_mask) - 1; 1852 + struct mt7996_phy *phy = mt7996_vif_link_phy(link); 2055 1853 struct sta_rec_bfee *bfee; 2056 1854 struct tlv *tlv; 2057 1855 u8 nrow = 0; 1856 + int tx_ant; 1857 + 1858 + if (!phy) 1859 + return; 2058 1860 2059 1861 if (!(link_sta->vht_cap.vht_supported || link_sta->he_cap.has_he)) 2060 1862 return; ··· 2081 1877 } 2082 1878 2083 1879 /* reply with identity matrix to avoid 2x2 BF negative gain */ 1880 + tx_ant = hweight8(phy->mt76->antenna_mask) - 1; 2084 1881 bfee->fb_identity_matrix = (nrow == 1 && tx_ant == 2); 2085 1882 } 2086 1883 ··· 2265 2060 struct ieee80211_sta *sta; 2266 2061 int ret, nrates = 0, idx; 2267 2062 enum nl80211_band band; 2063 + struct mt76_phy *mphy; 2268 2064 bool has_he; 2269 2065 2270 2066 #define __sta_phy_bitrate_mask_check(_mcs, _gi, _ht, _he) \ ··· 2299 2093 if (!link_sta) 2300 2094 goto error_unlock; 2301 2095 2302 - band = link->phy->mt76->chandef.chan->band; 2096 + mphy = mt76_vif_link_phy(&link->mt76); 2097 + if (!mphy) 2098 + goto error_unlock; 2099 + 2100 + band = mphy->chandef.chan->band; 2303 2101 has_he = link_sta->he_cap.has_he; 2304 2102 mask = link->bitrate_mask; 2305 2103 idx = msta_link->wcid.idx; ··· 2383 2173 struct mt7996_vif_link *link) 2384 2174 { 2385 2175 #define INIT_RCPI 180 2386 - struct mt76_phy *mphy = link->phy->mt76; 2387 - struct cfg80211_chan_def *chandef = &mphy->chandef; 2176 + struct mt76_phy *mphy = mt76_vif_link_phy(&link->mt76); 2388 2177 struct cfg80211_bitrate_mask *mask = &link->bitrate_mask; 2389 2178 u32 cap = link_sta->sta->wme ? STA_CAP_WMM : 0; 2390 - enum nl80211_band band = chandef->chan->band; 2179 + struct cfg80211_chan_def *chandef; 2391 2180 struct sta_rec_ra_uni *ra; 2181 + enum nl80211_band band; 2392 2182 struct tlv *tlv; 2393 - u32 supp_rate = link_sta->supp_rates[band]; 2183 + u32 supp_rate; 2184 + 2185 + if (!mphy) 2186 + return; 2394 2187 2395 2188 tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_RA, sizeof(*ra)); 2396 2189 ra = (struct sta_rec_ra_uni *)tlv; 2190 + 2191 + chandef = &mphy->chandef; 2192 + band = chandef->chan->band; 2193 + supp_rate = link_sta->supp_rates[band]; 2397 2194 2398 2195 ra->valid = true; 2399 2196 ra->auto_rate = true; ··· 2583 2366 sizeof(req), true); 2584 2367 } 2585 2368 2369 + int mt7996_mcu_mld_reconf_stop_link(struct mt7996_dev *dev, 2370 + struct ieee80211_vif *vif, 2371 + u16 removed_links) 2372 + { 2373 + unsigned long rem_links = removed_links; 2374 + struct mld_reconf_stop_link *sl; 2375 + struct mld_req_hdr hdr = {}; 2376 + unsigned int link_id; 2377 + struct sk_buff *skb; 2378 + struct tlv *tlv; 2379 + 2380 + skb = mt76_mcu_msg_alloc(&dev->mt76, NULL, sizeof(hdr) + sizeof(*sl)); 2381 + if (!skb) 2382 + return -ENOMEM; 2383 + 2384 + memcpy(hdr.mld_addr, vif->addr, ETH_ALEN); 2385 + skb_put_data(skb, &hdr, sizeof(hdr)); 2386 + 2387 + tlv = mt7996_mcu_add_uni_tlv(skb, UNI_CMD_MLD_RECONF_STOP_LINK, 2388 + sizeof(*sl)); 2389 + sl = (struct mld_reconf_stop_link *)tlv; 2390 + sl->link_bitmap = cpu_to_le16(removed_links); 2391 + 2392 + for_each_set_bit(link_id, &rem_links, IEEE80211_MLD_MAX_NUM_LINKS) { 2393 + struct mt7996_vif_link *link; 2394 + 2395 + link = mt7996_vif_link(dev, vif, link_id); 2396 + if (!link) 2397 + continue; 2398 + 2399 + sl->bss_idx[link_id] = link->mt76.idx; 2400 + } 2401 + 2402 + return mt76_mcu_skb_send_msg(&dev->mt76, skb, MCU_WM_UNI_CMD(MLD), 2403 + true); 2404 + } 2405 + 2406 + int mt7996_mcu_mld_link_oper(struct mt7996_dev *dev, 2407 + struct ieee80211_bss_conf *link_conf, 2408 + struct mt7996_vif_link *link, bool add) 2409 + { 2410 + struct ieee80211_vif *vif = link_conf->vif; 2411 + struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv; 2412 + struct bss_mld_link_op_tlv *mld_op; 2413 + struct sk_buff *skb; 2414 + struct tlv *tlv; 2415 + 2416 + skb = __mt7996_mcu_alloc_bss_req(&dev->mt76, &link->mt76, 2417 + MT7996_BSS_UPDATE_MAX_SIZE); 2418 + if (IS_ERR(skb)) 2419 + return PTR_ERR(skb); 2420 + 2421 + tlv = mt7996_mcu_add_uni_tlv(skb, UNI_BSS_INFO_MLD_LINK_OP, 2422 + sizeof(*mld_op)); 2423 + mld_op = (struct bss_mld_link_op_tlv *)tlv; 2424 + mld_op->link_operation = add; 2425 + mld_op->own_mld_id = link->mld_idx; 2426 + mld_op->link_id = link_conf->link_id; 2427 + mld_op->group_mld_id = add ? mvif->mld_group_idx : 0xff; 2428 + mld_op->remap_idx = add ? mvif->mld_remap_idx : 0xff; 2429 + memcpy(mld_op->mac_addr, vif->addr, ETH_ALEN); 2430 + 2431 + return mt76_mcu_skb_send_msg(&dev->mt76, skb, 2432 + MCU_WMWA_UNI_CMD(BSS_INFO_UPDATE), true); 2433 + } 2434 + 2586 2435 static void 2587 2436 mt7996_mcu_sta_mld_setup_tlv(struct mt7996_dev *dev, struct sk_buff *skb, 2588 2437 struct ieee80211_vif *vif, ··· 2812 2529 MCU_WMWA_UNI_CMD(STA_REC_UPDATE), true); 2813 2530 } 2814 2531 2532 + void mt7996_mcu_update_sta_rec_bw(void *data, struct ieee80211_sta *sta) 2533 + { 2534 + struct mt7996_vif_link *link = (struct mt7996_vif_link *)data; 2535 + struct mt7996_sta *msta = (struct mt7996_sta *)sta->drv_priv; 2536 + struct mt7996_phy *phy = mt7996_vif_link_phy(link); 2537 + struct mt7996_sta_link *msta_link; 2538 + struct mt7996_dev *dev; 2539 + struct ieee80211_bss_conf *link_conf; 2540 + struct ieee80211_link_sta *link_sta; 2541 + struct ieee80211_vif *vif; 2542 + struct sk_buff *skb; 2543 + int link_id; 2544 + 2545 + if (!phy) 2546 + return; 2547 + 2548 + if (link->mt76.mvif != &msta->vif->mt76) 2549 + return; 2550 + 2551 + dev = phy->dev; 2552 + link_id = link->msta_link.wcid.link_id; 2553 + link_sta = link_sta_dereference_protected(sta, link_id); 2554 + if (!link_sta) 2555 + return; 2556 + 2557 + msta_link = mt76_dereference(msta->link[link_id], &dev->mt76); 2558 + if (!msta_link) 2559 + return; 2560 + 2561 + vif = container_of((void *)msta->vif, struct ieee80211_vif, drv_priv); 2562 + link_conf = link_conf_dereference_protected(vif, link_id); 2563 + if (!link_conf) 2564 + return; 2565 + 2566 + skb = __mt76_connac_mcu_alloc_sta_req(&dev->mt76, &link->mt76, 2567 + &msta_link->wcid, 2568 + MT7996_STA_UPDATE_MAX_SIZE); 2569 + if (IS_ERR(skb)) 2570 + return; 2571 + 2572 + mt7996_mcu_sta_bfer_tlv(dev, skb, link_conf, link_sta, link); 2573 + mt7996_mcu_sta_rate_ctrl_tlv(skb, dev, vif, link_conf, link_sta, link); 2574 + 2575 + mt76_mcu_skb_send_msg(&dev->mt76, skb, 2576 + MCU_WMWA_UNI_CMD(STA_REC_UPDATE), true); 2577 + } 2578 + 2815 2579 static int 2816 2580 mt7996_mcu_sta_key_tlv(struct mt76_dev *dev, struct mt76_wcid *wcid, 2817 2581 struct sk_buff *skb, ··· 3006 2676 3007 2677 info = (struct bss_bcn_cntdwn_tlv *)tlv; 3008 2678 info->cnt = skb->data[offs->cntdwn_counter_offs[0]]; 2679 + 2680 + /* abort the CCA countdown when starting CSA countdown */ 2681 + if (csa) { 2682 + struct bss_bcn_cntdwn_tlv *cca_info; 2683 + 2684 + tlv = mt7996_mcu_add_uni_tlv(rskb, UNI_BSS_INFO_BCN_BCC, 2685 + sizeof(*cca_info)); 2686 + cca_info = (struct bss_bcn_cntdwn_tlv *)tlv; 2687 + cca_info->cca.abort = true; 2688 + } 3009 2689 } 3010 2690 3011 2691 static void ··· 3108 2768 { 3109 2769 struct mt7996_dev *dev = mt7996_hw_dev(hw); 3110 2770 struct mt7996_vif_link *link = mt7996_vif_conf_link(dev, vif, link_conf); 2771 + struct mt76_phy *mphy = link ? mt76_vif_link_phy(&link->mt76) : NULL; 3111 2772 struct mt76_vif_link *mlink = link ? &link->mt76 : NULL; 3112 2773 struct ieee80211_mutable_offsets offs; 3113 2774 struct ieee80211_tx_info *info; ··· 3123 2782 if (!mlink) 3124 2783 return -EINVAL; 3125 2784 3126 - if (link->phy && link->phy->mt76->offchannel) 2785 + if (mphy && mphy->offchannel) 3127 2786 enabled = false; 3128 2787 3129 2788 rskb = __mt7996_mcu_alloc_bss_req(&dev->mt76, mlink, ··· 3174 2833 { 3175 2834 #define OFFLOAD_TX_MODE_SU BIT(0) 3176 2835 #define OFFLOAD_TX_MODE_MU BIT(1) 2836 + struct mt76_phy *mphy = mt76_vif_link_phy(&link->mt76); 3177 2837 struct ieee80211_vif *vif = link_conf->vif; 3178 2838 struct ieee80211_hw *hw = mt76_hw(dev); 3179 - struct mt7996_phy *phy = link->phy; 3180 2839 struct mt76_wcid *wcid = &dev->mt76.global_wcid; 3181 2840 struct bss_inband_discovery_tlv *discov; 3182 2841 struct ieee80211_tx_info *info; ··· 3187 2846 u8 *buf, interval; 3188 2847 int len; 3189 2848 3190 - if (!phy) 2849 + if (!mphy) 3191 2850 return -EINVAL; 3192 2851 3193 - chandef = &phy->mt76->chandef; 2852 + chandef = &mphy->chandef; 3194 2853 band = chandef->chan->band; 3195 2854 3196 2855 if (link_conf->nontransmitted) ··· 3228 2887 info = IEEE80211_SKB_CB(skb); 3229 2888 info->control.vif = vif; 3230 2889 info->band = band; 3231 - info->hw_queue |= FIELD_PREP(MT_TX_HW_QUEUE_PHY, phy->mt76->band_idx); 2890 + info->hw_queue |= FIELD_PREP(MT_TX_HW_QUEUE_PHY, mphy->band_idx); 3232 2891 3233 2892 len = ALIGN(sizeof(*discov) + MT_TXD_SIZE + skb->len, 4); 3234 2893 tlv = mt7996_mcu_add_uni_tlv(rskb, UNI_BSS_INFO_OFFLOAD, len); ··· 4083 3742 4084 3743 if (phy->mt76->hw->conf.flags & IEEE80211_CONF_MONITOR) 4085 3744 req.switch_reason = CH_SWITCH_NORMAL; 4086 - else if (phy->mt76->offchannel || 4087 - phy->mt76->hw->conf.flags & IEEE80211_CONF_IDLE) 3745 + else if (phy->mt76->offchannel || !phy->mt76->chanctx) 4088 3746 req.switch_reason = CH_SWITCH_SCAN_BYPASS_DPD; 4089 3747 else if (!cfg80211_reg_can_beacon(phy->mt76->hw->wiphy, chandef, 4090 3748 NL80211_IFTYPE_AP)) ··· 4104 3764 &req, sizeof(req), true); 4105 3765 } 4106 3766 4107 - static int mt7996_mcu_set_eeprom_flash(struct mt7996_dev *dev) 3767 + static int 3768 + mt7996_mcu_get_cal_free_data(struct mt7996_dev *dev) 3769 + { 3770 + #define MT_EE_7977BN_OFFSET (0x1200 - 0x500) 3771 + struct cal_free_data { 3772 + u16 adie_offs; 3773 + u16 eep_offs; 3774 + }; 3775 + static const struct cal_free_data cal_7975[] = { 3776 + { 0x5cd, 0x451 }, { 0x5cf, 0x453 }, { 0x5d1, 0x455 }, 3777 + { 0x5d3, 0x457 }, { 0x6c0, 0x44c }, { 0x6c1, 0x44d }, 3778 + { 0x6c2, 0x44e }, { 0x6c3, 0x44f }, { 0x7a1, 0xba1 }, 3779 + { 0x7a6, 0xba6 }, { 0x7a8, 0xba8 }, { 0x7aa, 0xbaa }, 3780 + }; 3781 + static const struct cal_free_data cal_7976[] = { 3782 + { 0x4c, 0x44c }, { 0x4d, 0x44d }, { 0x4e, 0x44e }, 3783 + { 0x4f, 0x44f }, { 0x50, 0x450 }, { 0x51, 0x451 }, 3784 + { 0x53, 0x453 }, { 0x55, 0x455 }, { 0x57, 0x457 }, 3785 + { 0x59, 0x459 }, { 0x70, 0x470 }, { 0x71, 0x471 }, 3786 + { 0x790, 0xb90 }, { 0x791, 0xb91 }, { 0x794, 0xb94 }, 3787 + { 0x795, 0xb95 }, { 0x7a6, 0xba6 }, { 0x7a8, 0xba8 }, 3788 + { 0x7aa, 0xbaa }, 3789 + }; 3790 + static const struct cal_free_data cal_7977[] = { 3791 + { 0x4c, 0x124c }, { 0x4d, 0x124d }, { 0x4e, 0x124e }, 3792 + { 0x4f, 0x124f }, { 0x50, 0x1250 }, { 0x51, 0x1251 }, 3793 + { 0x53, 0x1253 }, { 0x55, 0x1255 }, { 0x57, 0x1257 }, 3794 + { 0x59, 0x1259 }, { 0x69, 0x1269 }, { 0x6a, 0x126a }, 3795 + { 0x7a, 0x127a }, { 0x7b, 0x127b }, { 0x7c, 0x127c }, 3796 + { 0x7d, 0x127d }, { 0x7e, 0x127e }, 3797 + }; 3798 + static const struct cal_free_data cal_7978[] = { 3799 + { 0x91, 0xb91 }, { 0x95, 0xb95 }, { 0x100, 0x480 }, 3800 + { 0x102, 0x482 }, { 0x104, 0x484 }, { 0x106, 0x486 }, 3801 + { 0x107, 0x487 }, { 0x108, 0x488 }, { 0x109, 0x489 }, 3802 + { 0x10a, 0x48a }, { 0x10b, 0x48b }, { 0x10c, 0x48c }, 3803 + { 0x10e, 0x48e }, { 0x110, 0x490 }, 3804 + }; 3805 + static const struct cal_free_data cal_7979[] = { 3806 + { 0x4c, 0x124c }, { 0x4d, 0x124d }, { 0x4e, 0x124e }, 3807 + { 0x4f, 0x124f }, { 0x50, 0x1250 }, { 0x51, 0x1251 }, 3808 + { 0x53, 0x1253 }, { 0x55, 0x1255 }, { 0x57, 0x1257 }, 3809 + { 0x59, 0x1259 }, { 0x69, 0x1269 }, { 0x6a, 0x126a }, 3810 + { 0x7a, 0x127a }, { 0x7b, 0x127b }, { 0x7c, 0x127c }, 3811 + { 0x7e, 0x127e }, { 0x80, 0x1280 }, 3812 + }; 3813 + const struct cal_free_data *cal_arr[__MT_MAX_BAND]; 3814 + u16 cal_arr_len[__MT_MAX_BAND] = {}; 3815 + u8 *eeprom = (u8 *)dev->mt76.eeprom.data; 3816 + int band, i, ret; 3817 + 3818 + #define CAL_ARR(_band, _adie) do { \ 3819 + cal_arr[_band] = cal_##_adie; \ 3820 + cal_arr_len[_band] = ARRAY_SIZE(cal_##_adie); \ 3821 + } while (0) 3822 + 3823 + switch (mt76_chip(&dev->mt76)) { 3824 + case MT7996_DEVICE_ID: 3825 + /* adie 0 */ 3826 + if (dev->var.fem == MT7996_FEM_INT && 3827 + dev->var.type != MT7996_VAR_TYPE_233) 3828 + CAL_ARR(0, 7975); 3829 + else 3830 + CAL_ARR(0, 7976); 3831 + 3832 + /* adie 1 */ 3833 + if (dev->var.type == MT7996_VAR_TYPE_444) 3834 + CAL_ARR(1, 7977); 3835 + 3836 + /* adie 2 */ 3837 + CAL_ARR(2, 7977); 3838 + break; 3839 + case MT7992_DEVICE_ID: 3840 + /* adie 0 */ 3841 + if (dev->var.type == MT7992_VAR_TYPE_44 && 3842 + dev->var.fem != MT7996_FEM_EXT) 3843 + CAL_ARR(0, 7975); 3844 + else if (dev->var.type == MT7992_VAR_TYPE_24) 3845 + CAL_ARR(0, 7978); 3846 + else 3847 + CAL_ARR(0, 7976); 3848 + 3849 + /* adie 1 */ 3850 + if (dev->var.type == MT7992_VAR_TYPE_44 && 3851 + dev->var.fem != MT7996_FEM_INT) 3852 + CAL_ARR(1, 7977); 3853 + else if (dev->var.type != MT7992_VAR_TYPE_23) 3854 + CAL_ARR(1, 7979); 3855 + break; 3856 + case MT7990_DEVICE_ID: 3857 + /* adie 0 */ 3858 + CAL_ARR(0, 7976); 3859 + break; 3860 + default: 3861 + return -EINVAL; 3862 + } 3863 + 3864 + for (band = 0; band < __MT_MAX_BAND; band++) { 3865 + u8 buf[MT7996_EEPROM_BLOCK_SIZE]; 3866 + const struct cal_free_data *cal; 3867 + u16 prev_block_idx = -1; 3868 + u16 adie_base; 3869 + 3870 + if (!cal_arr_len[band]) 3871 + continue; 3872 + 3873 + if (band == MT_BAND0) 3874 + adie_base = MT7996_EFUSE_BASE_OFFS_ADIE0; 3875 + else if (band == MT_BAND1 && is_mt7992(&dev->mt76)) 3876 + adie_base = MT7992_EFUSE_BASE_OFFS_ADIE1; 3877 + else if (band == MT_BAND1) 3878 + adie_base = MT7996_EFUSE_BASE_OFFS_ADIE1; 3879 + else 3880 + adie_base = MT7996_EFUSE_BASE_OFFS_ADIE2; 3881 + 3882 + cal = cal_arr[band]; 3883 + for (i = 0; i < cal_arr_len[band]; i++) { 3884 + u16 adie_offset = cal[i].adie_offs + adie_base; 3885 + u16 eep_offset = cal[i].eep_offs; 3886 + u16 block_idx = adie_offset / MT7996_EEPROM_BLOCK_SIZE; 3887 + u16 offset = adie_offset % MT7996_EEPROM_BLOCK_SIZE; 3888 + 3889 + if (is_mt7996(&dev->mt76) && band == MT_BAND1 && 3890 + dev->var.type == MT7996_VAR_TYPE_444) 3891 + eep_offset -= MT_EE_7977BN_OFFSET; 3892 + 3893 + if (prev_block_idx != block_idx) { 3894 + memset(buf, 0, sizeof(buf)); 3895 + ret = mt7996_mcu_get_eeprom(dev, adie_offset, buf, 3896 + MT7996_EEPROM_BLOCK_SIZE, 3897 + EEPROM_MODE_EFUSE); 3898 + if (ret) { 3899 + if (ret != -EINVAL) 3900 + return ret; 3901 + prev_block_idx = -1; 3902 + continue; 3903 + } 3904 + } 3905 + eeprom[eep_offset] = buf[offset]; 3906 + prev_block_idx = block_idx; 3907 + } 3908 + } 3909 + 3910 + return 0; 3911 + } 3912 + 3913 + int mt7996_mcu_set_eeprom(struct mt7996_dev *dev) 4108 3914 { 4109 3915 #define MAX_PAGE_IDX_MASK GENMASK(7, 5) 4110 3916 #define PAGE_IDX_MASK GENMASK(4, 2) 4111 3917 #define PER_PAGE_SIZE 0x400 4112 - struct mt7996_mcu_eeprom req = { 3918 + struct mt7996_mcu_eeprom_update req = { 4113 3919 .tag = cpu_to_le16(UNI_EFUSE_BUFFER_MODE), 4114 3920 .buffer_mode = EE_MODE_BUFFER 4115 3921 }; 4116 3922 u16 eeprom_size = MT7996_EEPROM_SIZE; 4117 3923 u8 total = DIV_ROUND_UP(eeprom_size, PER_PAGE_SIZE); 4118 3924 u8 *eep = (u8 *)dev->mt76.eeprom.data; 4119 - int eep_len, i; 3925 + int ret, eep_len, i; 3926 + 3927 + ret = mt7996_mcu_get_cal_free_data(dev); 3928 + if (ret) 3929 + return ret; 4120 3930 4121 3931 for (i = 0; i < total; i++, eep += eep_len) { 4122 3932 struct sk_buff *skb; 4123 - int ret, msg_len; 3933 + int msg_len; 4124 3934 4125 3935 if (i == total - 1 && !!(eeprom_size % PER_PAGE_SIZE)) 4126 3936 eep_len = eeprom_size % PER_PAGE_SIZE; ··· 4299 3809 return 0; 4300 3810 } 4301 3811 4302 - int mt7996_mcu_set_eeprom(struct mt7996_dev *dev) 3812 + int mt7996_mcu_get_eeprom(struct mt7996_dev *dev, u32 offset, u8 *buf, u32 buf_len, 3813 + enum mt7996_eeprom_mode mode) 4303 3814 { 4304 - struct mt7996_mcu_eeprom req = { 4305 - .tag = cpu_to_le16(UNI_EFUSE_BUFFER_MODE), 4306 - .len = cpu_to_le16(sizeof(req) - 4), 4307 - .buffer_mode = EE_MODE_EFUSE, 4308 - .format = EE_FORMAT_WHOLE 3815 + struct mt7996_mcu_eeprom_access req = { 3816 + .info.len = cpu_to_le16(sizeof(req) - 4), 4309 3817 }; 4310 - 4311 - if (dev->flash_mode) 4312 - return mt7996_mcu_set_eeprom_flash(dev); 4313 - 4314 - return mt76_mcu_send_msg(&dev->mt76, MCU_WM_UNI_CMD(EFUSE_CTRL), 4315 - &req, sizeof(req), true); 4316 - } 4317 - 4318 - int mt7996_mcu_get_eeprom(struct mt7996_dev *dev, u32 offset, u8 *buf, u32 buf_len) 4319 - { 4320 - struct { 4321 - u8 _rsv[4]; 4322 - 4323 - __le16 tag; 4324 - __le16 len; 4325 - __le32 addr; 4326 - __le32 valid; 4327 - u8 data[16]; 4328 - } __packed req = { 4329 - .tag = cpu_to_le16(UNI_EFUSE_ACCESS), 4330 - .len = cpu_to_le16(sizeof(req) - 4), 4331 - .addr = cpu_to_le32(round_down(offset, 4332 - MT7996_EEPROM_BLOCK_SIZE)), 4333 - }; 3818 + struct mt7996_mcu_eeprom_access_event *event; 4334 3819 struct sk_buff *skb; 4335 - bool valid; 4336 - int ret; 3820 + int ret, cmd; 3821 + u32 addr; 4337 3822 4338 - ret = mt76_mcu_send_and_get_msg(&dev->mt76, 4339 - MCU_WM_UNI_CMD_QUERY(EFUSE_CTRL), 4340 - &req, sizeof(req), true, &skb); 3823 + switch (mode) { 3824 + case EEPROM_MODE_EFUSE: 3825 + addr = round_down(offset, MT7996_EEPROM_BLOCK_SIZE); 3826 + cmd = MCU_WM_UNI_CMD_QUERY(EFUSE_CTRL); 3827 + req.info.tag = cpu_to_le16(UNI_EFUSE_ACCESS); 3828 + break; 3829 + case EEPROM_MODE_EXT: 3830 + addr = round_down(offset, MT7996_EXT_EEPROM_BLOCK_SIZE); 3831 + cmd = MCU_WM_UNI_CMD_QUERY(EXT_EEPROM_CTRL); 3832 + req.info.tag = cpu_to_le16(UNI_EXT_EEPROM_ACCESS); 3833 + req.eeprom.ext_eeprom.data_len = cpu_to_le32(buf_len); 3834 + break; 3835 + default: 3836 + return -EINVAL; 3837 + } 3838 + 3839 + req.info.addr = cpu_to_le32(addr); 3840 + ret = mt76_mcu_send_and_get_msg(&dev->mt76, cmd, &req, sizeof(req), 3841 + true, &skb); 4341 3842 if (ret) 4342 3843 return ret; 4343 3844 4344 - valid = le32_to_cpu(*(__le32 *)(skb->data + 16)); 4345 - if (valid) { 4346 - u32 addr = le32_to_cpu(*(__le32 *)(skb->data + 12)); 3845 + event = (struct mt7996_mcu_eeprom_access_event *)skb->data; 3846 + if (event->valid) { 3847 + u32 ret_len = le32_to_cpu(event->eeprom.ext_eeprom.data_len); 3848 + 3849 + addr = le32_to_cpu(event->addr); 4347 3850 4348 3851 if (!buf) 4349 3852 buf = (u8 *)dev->mt76.eeprom.data + addr; 4350 - if (!buf_len || buf_len > MT7996_EEPROM_BLOCK_SIZE) 4351 - buf_len = MT7996_EEPROM_BLOCK_SIZE; 4352 3853 4353 - skb_pull(skb, 48); 4354 - memcpy(buf, skb->data, buf_len); 3854 + switch (mode) { 3855 + case EEPROM_MODE_EFUSE: 3856 + if (!buf_len || buf_len > MT7996_EEPROM_BLOCK_SIZE) 3857 + buf_len = MT7996_EEPROM_BLOCK_SIZE; 3858 + 3859 + memcpy(buf, event->eeprom.efuse, buf_len); 3860 + break; 3861 + case EEPROM_MODE_EXT: 3862 + if (!buf_len || buf_len > MT7996_EXT_EEPROM_BLOCK_SIZE) 3863 + buf_len = MT7996_EXT_EEPROM_BLOCK_SIZE; 3864 + 3865 + memcpy(buf, event->eeprom.ext_eeprom.data, 3866 + ret_len < buf_len ? ret_len : buf_len); 3867 + break; 3868 + default: 3869 + ret = -EINVAL; 3870 + break; 3871 + } 4355 3872 } else { 4356 3873 ret = -EINVAL; 4357 3874 } ··· 4368 3871 return ret; 4369 3872 } 4370 3873 4371 - int mt7996_mcu_get_eeprom_free_block(struct mt7996_dev *dev, u8 *block_num) 3874 + int mt7996_mcu_get_efuse_free_block(struct mt7996_dev *dev, u8 *block_num) 4372 3875 { 4373 3876 struct { 4374 3877 u8 _rsv[4]; ··· 4400 3903 4401 3904 int mt7996_mcu_get_chip_config(struct mt7996_dev *dev, u32 *cap) 4402 3905 { 4403 - #define NIC_CAP 3 4404 3906 #define UNI_EVENT_CHIP_CONFIG_EFUSE_VERSION 0x21 4405 3907 struct { 4406 3908 u8 _rsv[4]; ··· 4407 3911 __le16 tag; 4408 3912 __le16 len; 4409 3913 } __packed req = { 4410 - .tag = cpu_to_le16(NIC_CAP), 3914 + .tag = cpu_to_le16(UNI_CHIP_CONFIG_NIC_CAPA), 4411 3915 .len = cpu_to_le16(sizeof(req) - 4), 4412 3916 }; 4413 3917 struct sk_buff *skb; ··· 5055 4559 &req, sizeof(req), true); 5056 4560 } 5057 4561 4562 + int mt7996_mcu_rdd_resume_tx(struct mt7996_phy *phy) 4563 + { 4564 + struct { 4565 + u8 band_idx; 4566 + u8 _rsv[3]; 4567 + 4568 + __le16 tag; 4569 + __le16 len; 4570 + u8 mac_enable; 4571 + u8 _rsv2[3]; 4572 + } __packed req = { 4573 + .band_idx = phy->mt76->band_idx, 4574 + .tag = cpu_to_le16(UNI_BAND_CONFIG_MAC_ENABLE_CTRL), 4575 + .len = cpu_to_le16(sizeof(req) - 4), 4576 + .mac_enable = 2, 4577 + }; 4578 + int ret; 4579 + 4580 + if (!phy->rdd_tx_paused) 4581 + return 0; 4582 + 4583 + ret = mt76_mcu_send_msg(&phy->dev->mt76, MCU_WM_UNI_CMD(BAND_CONFIG), 4584 + &req, sizeof(req), true); 4585 + if (!ret) 4586 + phy->rdd_tx_paused = false; 4587 + 4588 + return ret; 4589 + } 4590 + 5058 4591 int mt7996_mcu_rdd_cmd(struct mt7996_dev *dev, int cmd, u8 rdd_idx, u8 val) 5059 4592 { 5060 4593 struct { ··· 5413 4888 5414 4889 return mt76_mcu_send_msg(&dev->mt76, MCU_WA_EXT_CMD(CP_SUPPORT), 5415 4890 &cp_mode, sizeof(cp_mode), true); 4891 + } 4892 + 4893 + int mt7996_mcu_set_dup_wtbl(struct mt7996_dev *dev) 4894 + { 4895 + #define DUP_WTBL_NUM 80 4896 + struct { 4897 + u8 _rsv[4]; 4898 + 4899 + __le16 tag; 4900 + __le16 len; 4901 + __le16 base; 4902 + __le16 num; 4903 + u8 _rsv2[4]; 4904 + } __packed req = { 4905 + .tag = cpu_to_le16(UNI_CHIP_CONFIG_DUP_WTBL), 4906 + .len = cpu_to_le16(sizeof(req) - 4), 4907 + .base = cpu_to_le16(MT7996_WTBL_STA - DUP_WTBL_NUM + 1), 4908 + .num = cpu_to_le16(DUP_WTBL_NUM), 4909 + }; 4910 + 4911 + return mt76_mcu_send_msg(&dev->mt76, MCU_WM_UNI_CMD(CHIP_CONFIG), &req, 4912 + sizeof(req), true); 5416 4913 }
+103 -9
drivers/net/wireless/mediatek/mt76/mt7996/mcu.h
··· 25 25 }; 26 26 27 27 struct mt7996_mcu_uni_event { 28 - u8 cid; 29 - u8 __rsv[3]; 28 + __le16 cid; 29 + u8 __rsv[2]; 30 30 __le32 status; /* 0: success, others: fail */ 31 31 } __packed; 32 32 ··· 52 52 u8 rsv[2]; 53 53 } __packed; 54 54 55 - struct mt7996_mcu_csa_notify { 56 - struct mt7996_mcu_rxd rxd; 57 - 55 + struct mt7996_mcu_countdown_notify { 58 56 u8 omac_idx; 59 - u8 csa_count; 60 - u8 band_idx; 57 + u8 count; 58 + u8 csa_failure_reason; /* 0: success, 1: beacon disabled */ 61 59 u8 rsv; 62 60 } __packed; 63 61 ··· 145 147 u8 rsv[2]; 146 148 } __packed; 147 149 148 - struct mt7996_mcu_eeprom { 150 + struct mt7996_mcu_eeprom_update { 149 151 u8 _rsv[4]; 150 152 151 153 __le16 tag; ··· 153 155 u8 buffer_mode; 154 156 u8 format; 155 157 __le16 buf_len; 158 + } __packed; 159 + 160 + union eeprom_data { 161 + struct { 162 + __le32 data_len; 163 + DECLARE_FLEX_ARRAY(u8, data); 164 + } ext_eeprom; 165 + DECLARE_FLEX_ARRAY(u8, efuse); 166 + } __packed; 167 + 168 + struct mt7996_mcu_eeprom_info { 169 + u8 _rsv[4]; 170 + 171 + __le16 tag; 172 + __le16 len; 173 + __le32 addr; 174 + __le32 valid; 175 + } __packed; 176 + 177 + struct mt7996_mcu_eeprom_access { 178 + struct mt7996_mcu_eeprom_info info; 179 + union eeprom_data eeprom; 180 + } __packed; 181 + 182 + struct mt7996_mcu_eeprom_access_event { 183 + u8 _rsv[4]; 184 + 185 + __le16 tag; 186 + __le16 len; 187 + __le32 version; 188 + __le32 addr; 189 + __le32 valid; 190 + __le32 size; 191 + __le32 magic_no; 192 + __le32 type; 193 + __le32 rsv[4]; 194 + union eeprom_data eeprom; 156 195 } __packed; 157 196 158 197 struct mt7996_mcu_phy_rx_info { ··· 449 414 __le16 tag; 450 415 __le16 len; 451 416 u8 cnt; 452 - u8 rsv[3]; 417 + union { 418 + struct { 419 + bool static_pp; 420 + bool abort; 421 + } csa; 422 + struct { 423 + bool abort; 424 + } cca; 425 + }; 426 + u8 rsv; 453 427 } __packed; 454 428 455 429 struct bss_bcn_mbss_tlv { ··· 516 472 u8 remap_idx; 517 473 u8 link_id; 518 474 u8 __rsv[2]; 475 + } __packed; 476 + 477 + struct bss_prot_tlv { 478 + __le16 tag; 479 + __le16 len; 480 + __le32 prot_mode; 481 + } __packed; 482 + 483 + struct bss_mld_link_op_tlv { 484 + __le16 tag; 485 + __le16 len; 486 + u8 group_mld_id; 487 + u8 own_mld_id; 488 + u8 mac_addr[ETH_ALEN]; 489 + u8 remap_idx; 490 + u8 link_operation; 491 + u8 link_id; 492 + u8 rsv[2]; 519 493 } __packed; 520 494 521 495 struct sta_rec_ht_uni { ··· 709 647 u8 __rsv; 710 648 } __packed; 711 649 650 + struct mld_req_hdr { 651 + u8 ver; 652 + u8 mld_addr[ETH_ALEN]; 653 + u8 mld_idx; 654 + u8 flag; 655 + u8 rsv[3]; 656 + u8 buf[]; 657 + } __packed; 658 + 659 + struct mld_reconf_stop_link { 660 + __le16 tag; 661 + __le16 len; 662 + __le16 link_bitmap; 663 + u8 rsv[2]; 664 + u8 bss_idx[16]; 665 + } __packed; 666 + 667 + enum { 668 + UNI_CMD_MLD_RECONF_AP_REM_TIMER = 0x03, 669 + UNI_CMD_MLD_RECONF_STOP_LINK = 0x04, 670 + }; 671 + 712 672 struct hdr_trans_en { 713 673 __le16 tag; 714 674 __le16 len; ··· 875 791 UNI_CHANNEL_RX_PATH, 876 792 }; 877 793 794 + enum { 795 + UNI_CHIP_CONFIG_NIC_CAPA = 3, 796 + UNI_CHIP_CONFIG_DUP_WTBL = 4, 797 + }; 798 + 878 799 #define MT7996_BSS_UPDATE_MAX_SIZE (sizeof(struct bss_req_hdr) + \ 879 800 sizeof(struct mt76_connac_bss_basic_tlv) + \ 880 801 sizeof(struct bss_rlm_tlv) + \ ··· 926 837 enum { 927 838 UNI_BAND_CONFIG_RADIO_ENABLE, 928 839 UNI_BAND_CONFIG_RTS_THRESHOLD = 0x08, 840 + UNI_BAND_CONFIG_MAC_ENABLE_CTRL = 0x0c, 929 841 }; 930 842 931 843 enum { ··· 944 854 UNI_EFUSE_BUFFER_MODE, 945 855 UNI_EFUSE_FREE_BLOCK, 946 856 UNI_EFUSE_BUFFER_RD, 857 + }; 858 + 859 + enum { 860 + UNI_EXT_EEPROM_ACCESS = 1, 947 861 }; 948 862 949 863 enum {
+63 -7
drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h
··· 29 29 #define MT7996_RX_RING_SIZE 1536 30 30 #define MT7996_RX_MCU_RING_SIZE 512 31 31 #define MT7996_RX_MCU_RING_SIZE_WA 1024 32 + #define MT7996_NPU_TX_RING_SIZE 1024 33 + #define MT7996_NPU_RX_RING_SIZE 1024 34 + #define MT7996_NPU_TXD_SIZE 3 35 + 32 36 /* scatter-gather of mcu event is not supported in connac3 */ 33 37 #define MT7996_RX_MCU_BUF_SIZE (2048 + \ 34 38 SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) ··· 64 60 #define MT7992_FIRMWARE_DSP_23 "mediatek/mt7996/mt7992_dsp_23.bin" 65 61 #define MT7992_ROM_PATCH_23 "mediatek/mt7996/mt7992_rom_patch_23.bin" 66 62 63 + #define MT7992_FIRMWARE_WA_24 "mediatek/mt7996/mt7992_wa_24.bin" 64 + #define MT7992_FIRMWARE_WM_24 "mediatek/mt7996/mt7992_wm_24.bin" 65 + #define MT7992_FIRMWARE_DSP_24 "mediatek/mt7996/mt7992_dsp_24.bin" 66 + #define MT7992_ROM_PATCH_24 "mediatek/mt7996/mt7992_rom_patch_24.bin" 67 + 67 68 #define MT7990_FIRMWARE_WA "" 68 69 #define MT7990_FIRMWARE_WM "mediatek/mt7996/mt7990_wm.bin" 69 70 #define MT7990_FIRMWARE_DSP "" ··· 84 75 #define MT7992_EEPROM_DEFAULT_MIX "mediatek/mt7996/mt7992_eeprom_2i5e.bin" 85 76 #define MT7992_EEPROM_DEFAULT_23 "mediatek/mt7996/mt7992_eeprom_23.bin" 86 77 #define MT7992_EEPROM_DEFAULT_23_INT "mediatek/mt7996/mt7992_eeprom_23_2i5i.bin" 78 + #define MT7992_EEPROM_DEFAULT_24 "mediatek/mt7996/mt7992_eeprom_24_2i5i.bin" 87 79 88 80 #define MT7990_EEPROM_DEFAULT "mediatek/mt7996/mt7990_eeprom.bin" 89 81 #define MT7990_EEPROM_DEFAULT_INT "mediatek/mt7996/mt7990_eeprom_2i5i.bin" 90 82 91 83 #define MT7996_EEPROM_SIZE 7680 92 84 #define MT7996_EEPROM_BLOCK_SIZE 16 85 + #define MT7996_EXT_EEPROM_BLOCK_SIZE 1024 93 86 #define MT7996_TOKEN_SIZE 16384 94 87 #define MT7996_HW_TOKEN_SIZE 8192 95 88 ··· 164 153 enum mt7992_var_type { 165 154 MT7992_VAR_TYPE_44, 166 155 MT7992_VAR_TYPE_23, 156 + MT7992_VAR_TYPE_24, 167 157 }; 168 158 169 159 enum mt7990_var_type { ··· 176 164 MT7996_FEM_INT, 177 165 MT7996_FEM_MIX, 178 166 }; 167 + 168 + enum mt7996_eeprom_mode { 169 + EEPROM_MODE_DEFAULT_BIN, 170 + EEPROM_MODE_EFUSE, 171 + EEPROM_MODE_FLASH, 172 + EEPROM_MODE_EXT, 173 + }; 174 + 175 + #define MT7996_EFUSE_BASE_OFFS_ADIE0 0x400 176 + #define MT7996_EFUSE_BASE_OFFS_ADIE1 0x1e00 177 + #define MT7996_EFUSE_BASE_OFFS_ADIE2 0x1200 178 + #define MT7992_EFUSE_BASE_OFFS_ADIE1 0x1200 179 179 180 180 enum mt7996_txq_id { 181 181 MT7996_TXQ_FWDL = 16, ··· 276 252 struct mt76_vif_link mt76; /* must be first */ 277 253 278 254 struct mt7996_sta_link msta_link; 279 - struct mt7996_phy *phy; 280 - 281 255 struct cfg80211_bitrate_mask bitrate_mask; 282 256 283 257 u8 mld_idx; ··· 399 377 400 378 bool has_aux_rx; 401 379 bool counter_reset; 380 + bool rdd_tx_paused; 402 381 }; 403 382 404 383 struct mt7996_dev { ··· 459 436 460 437 u32 hw_pattern; 461 438 462 - bool flash_mode:1; 439 + u8 eeprom_mode; 463 440 bool has_eht:1; 464 441 465 442 struct { ··· 495 472 struct list_head page_cache; 496 473 struct list_head page_map[MT7996_RRO_MSDU_PG_HASH_SIZE]; 497 474 } wed_rro; 475 + 476 + dma_addr_t npu_txd_addr[2 * MT7996_NPU_TXD_SIZE]; 498 477 499 478 bool ibf; 500 479 u8 fw_debug_wm; ··· 694 669 struct ieee80211_bss_conf *link_conf, 695 670 struct mt76_vif_link *mlink, 696 671 struct mt7996_sta_link *msta_link, int enable); 672 + int mt7996_mcu_update_bss_rfch(struct mt7996_phy *phy, 673 + struct mt7996_vif_link *link); 697 674 int mt7996_mcu_add_sta(struct mt7996_dev *dev, 698 675 struct ieee80211_bss_conf *link_conf, 699 676 struct ieee80211_link_sta *link_sta, ··· 705 678 int mt7996_mcu_teardown_mld_sta(struct mt7996_dev *dev, 706 679 struct mt7996_vif_link *link, 707 680 struct mt7996_sta_link *msta_link); 681 + void mt7996_mcu_update_sta_rec_bw(void *data, struct ieee80211_sta *sta); 708 682 int mt7996_mcu_add_tx_ba(struct mt7996_dev *dev, 709 683 struct ieee80211_ampdu_params *params, 710 684 struct ieee80211_vif *vif, bool enable); ··· 735 707 int mt7996_mcu_set_fixed_field(struct mt7996_dev *dev, struct mt7996_sta *msta, 736 708 void *data, u8 link_id, u32 field); 737 709 int mt7996_mcu_set_eeprom(struct mt7996_dev *dev); 738 - int mt7996_mcu_get_eeprom(struct mt7996_dev *dev, u32 offset, u8 *buf, u32 buf_len); 739 - int mt7996_mcu_get_eeprom_free_block(struct mt7996_dev *dev, u8 *block_num); 710 + int mt7996_mcu_get_eeprom(struct mt7996_dev *dev, u32 offset, u8 *buf, u32 buf_len, 711 + enum mt7996_eeprom_mode mode); 712 + int mt7996_mcu_get_efuse_free_block(struct mt7996_dev *dev, u8 *block_num); 740 713 int mt7996_mcu_get_chip_config(struct mt7996_dev *dev, u32 *cap); 741 714 int mt7996_mcu_set_ser(struct mt7996_dev *dev, u8 action, u8 set, u8 band); 742 715 int mt7996_mcu_set_txbf(struct mt7996_dev *dev, u8 action); ··· 748 719 const struct mt7996_dfs_pattern *pattern); 749 720 int mt7996_mcu_set_radio_en(struct mt7996_phy *phy, bool enable); 750 721 int mt7996_mcu_set_rts_thresh(struct mt7996_phy *phy, u32 val); 722 + int mt7996_mcu_set_protection(struct mt7996_phy *phy, struct mt7996_vif_link *link, 723 + u8 ht_mode, bool use_cts_prot); 751 724 int mt7996_mcu_set_timing(struct mt7996_phy *phy, struct ieee80211_vif *vif, 752 725 struct ieee80211_bss_conf *link_conf); 753 726 int mt7996_mcu_get_chan_mib_info(struct mt7996_phy *phy, bool chan_switch); ··· 757 726 int mt7996_mcu_set_thermal_throttling(struct mt7996_phy *phy, u8 state); 758 727 int mt7996_mcu_set_thermal_protect(struct mt7996_phy *phy, bool enable); 759 728 int mt7996_mcu_set_txpower_sku(struct mt7996_phy *phy); 729 + int mt7996_mcu_rdd_resume_tx(struct mt7996_phy *phy); 760 730 int mt7996_mcu_rdd_cmd(struct mt7996_dev *dev, int cmd, u8 rdd_idx, u8 val); 761 731 int mt7996_mcu_rdd_background_enable(struct mt7996_phy *phy, 762 732 struct cfg80211_chan_def *chandef); ··· 775 743 int mt7996_mcu_get_all_sta_info(struct mt7996_phy *phy, u16 tag); 776 744 int mt7996_mcu_wed_rro_reset_sessions(struct mt7996_dev *dev, u16 id); 777 745 int mt7996_mcu_set_sniffer_mode(struct mt7996_phy *phy, bool enabled); 746 + int mt7996_mcu_set_dup_wtbl(struct mt7996_dev *dev); 747 + int mt7996_mcu_mld_reconf_stop_link(struct mt7996_dev *dev, 748 + struct ieee80211_vif *vif, 749 + u16 removed_links); 750 + int mt7996_mcu_mld_link_oper(struct mt7996_dev *dev, 751 + struct ieee80211_bss_conf *link_conf, 752 + struct mt7996_vif_link *link, bool add); 778 753 779 754 static inline bool mt7996_has_hwrro(struct mt7996_dev *dev) 780 755 { ··· 841 802 return !is_mt7990(&dev->mt76); 842 803 } 843 804 805 + static inline bool mt7996_has_ext_eeprom(struct mt7996_dev *dev) 806 + { 807 + return !is_mt7996(&dev->mt76); 808 + } 809 + 844 810 void mt7996_mac_init(struct mt7996_dev *dev); 845 811 u32 mt7996_mac_wtbl_lmac_addr(struct mt7996_dev *dev, u16 wcid, u8 dw); 846 812 bool mt7996_mac_wtbl_update(struct mt7996_dev *dev, int idx, u32 mask); ··· 867 823 struct mt7996_vif_link *link, 868 824 struct mt7996_sta_link *msta_link, 869 825 u8 flowid); 870 - void mt7996_mac_sta_deinit_link(struct mt7996_dev *dev, 871 - struct mt7996_sta_link *msta_link); 826 + void mt7996_mac_sta_remove_link(struct mt7996_dev *dev, 827 + struct ieee80211_sta *sta, 828 + unsigned int link_id, bool flush); 872 829 void mt7996_mac_add_twt_setup(struct ieee80211_hw *hw, 873 830 struct ieee80211_sta *sta, 874 831 struct ieee80211_twt_setup *twt); ··· 906 861 struct mt7996_vif_link *link, 907 862 struct mt7996_sta_link *msta_link); 908 863 int mt7996_mcu_cp_support(struct mt7996_dev *dev, u8 mode); 864 + int mt7996_mcu_set_emlsr_mode(struct mt7996_dev *dev, 865 + struct ieee80211_vif *vif, 866 + struct ieee80211_sta *sta, 867 + struct ieee80211_eml_params *eml_params); 909 868 #ifdef CONFIG_MAC80211_DEBUGFS 910 869 void mt7996_sta_add_debugfs(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 911 870 struct ieee80211_sta *sta, struct dentry *dir); ··· 926 877 #endif 927 878 928 879 int mt7996_dma_rro_init(struct mt7996_dev *dev); 880 + void mt7996_dma_rro_start(struct mt7996_dev *dev); 929 881 930 882 #ifdef CONFIG_MT7996_NPU 883 + int __mt7996_npu_hw_init(struct mt7996_dev *dev); 931 884 int mt7996_npu_hw_init(struct mt7996_dev *dev); 932 885 int mt7996_npu_hw_stop(struct mt7996_dev *dev); 933 886 int mt7996_npu_rx_queues_init(struct mt7996_dev *dev); 934 887 #else 888 + static inline int __mt7996_npu_hw_init(struct mt7996_dev *dev) 889 + { 890 + return 0; 891 + } 892 + 935 893 static inline int mt7996_npu_hw_init(struct mt7996_dev *dev) 936 894 { 937 895 return 0;
+374 -95
drivers/net/wireless/mediatek/mt76/mt7996/npu.c
··· 8 8 9 9 #include "mt7996.h" 10 10 11 - static int mt7996_npu_offload_init(struct mt7996_dev *dev, 12 - struct airoha_npu *npu) 11 + static int mt7992_npu_txrx_offload_init(struct mt7996_dev *dev, 12 + struct airoha_npu *npu) 13 13 { 14 + u32 hif1_ofs = dev->hif2 ? MT_WFDMA0_PCIE1(0) - MT_WFDMA0(0) : 0; 14 15 phys_addr_t phy_addr = dev->mt76.mmio.phy_addr; 15 - u32 val, hif1_ofs = 0, dma_addr; 16 + u32 dma_addr; 16 17 int i, err; 17 18 18 - err = mt76_npu_get_msg(npu, 0, WLAN_FUNC_GET_WAIT_NPU_VERSION, 19 - &val, GFP_KERNEL); 20 - if (err) { 21 - dev_warn(dev->mt76.dev, "failed getting NPU fw version\n"); 22 - return err; 23 - } 24 - 25 - dev_info(dev->mt76.dev, "NPU version: %0d.%d\n", 26 - (val >> 16) & 0xffff, val & 0xffff); 27 - 28 - err = mt76_npu_send_msg(npu, 0, WLAN_FUNC_SET_WAIT_PCIE_PORT_TYPE, 29 - dev->mt76.mmio.npu_type, GFP_KERNEL); 30 - if (err) { 31 - dev_warn(dev->mt76.dev, 32 - "failed setting NPU wlan PCIe port type\n"); 33 - return err; 34 - } 35 - 36 - if (dev->hif2) 37 - hif1_ofs = MT_WFDMA0_PCIE1(0) - MT_WFDMA0(0); 38 - 39 19 for (i = MT_BAND0; i < MT_BAND2; i++) { 40 - dma_addr = phy_addr; 41 - if (i) 42 - dma_addr += MT_RXQ_RING_BASE(MT_RXQ_RRO_BAND1) + 0x90 + 43 - hif1_ofs; 44 - else 45 - dma_addr += MT_RXQ_RING_BASE(MT_RXQ_RRO_BAND0) + 0x80; 46 - 47 - err = mt76_npu_send_msg(npu, i, WLAN_FUNC_SET_WAIT_PCIE_ADDR, 48 - dma_addr, GFP_KERNEL); 49 - if (err) { 50 - dev_warn(dev->mt76.dev, 51 - "failed setting NPU wlan PCIe desc addr\n"); 52 - return err; 53 - } 54 - 55 20 err = mt76_npu_send_msg(npu, i, WLAN_FUNC_SET_WAIT_DESC, 56 21 MT7996_RX_RING_SIZE, GFP_KERNEL); 57 22 if (err) { 58 23 dev_warn(dev->mt76.dev, 59 - "failed setting NPU wlan PCIe desc size\n"); 24 + "failed setting NPU wlan rx desc size\n"); 60 25 return err; 61 26 } 62 27 ··· 62 97 phy_addr + MT_RRO_ACK_SN_CTRL, GFP_KERNEL); 63 98 if (err) { 64 99 dev_warn(dev->mt76.dev, 65 - "failed setting NPU wlan rro_ack_sn desc addr\n"); 100 + "failed setting NPU wlan tx desc addr\n"); 66 101 return err; 67 102 } 103 + 104 + return 0; 105 + } 106 + 107 + static int mt7996_npu_txrx_offload_init(struct mt7996_dev *dev, 108 + struct airoha_npu *npu) 109 + { 110 + u32 hif1_ofs = dev->hif2 ? MT_WFDMA0_PCIE1(0) - MT_WFDMA0(0) : 0; 111 + phys_addr_t phy_addr = dev->mt76.mmio.phy_addr; 112 + u32 dma_addr; 113 + int err; 114 + 115 + /* npu rx rro ring0 */ 116 + err = mt76_npu_send_msg(npu, 0, WLAN_FUNC_SET_WAIT_DESC, 117 + MT7996_RX_RING_SIZE, GFP_KERNEL); 118 + if (err) { 119 + dev_warn(dev->mt76.dev, 120 + "failed setting NPU wlan rx desc size\n"); 121 + return err; 122 + } 123 + 124 + /* npu rx rro ring1 */ 125 + err = mt76_npu_send_msg(npu, 2, WLAN_FUNC_SET_WAIT_DESC, 126 + MT7996_NPU_RX_RING_SIZE, GFP_KERNEL); 127 + if (err) { 128 + dev_warn(dev->mt76.dev, 129 + "failed setting NPU wlan rx desc size\n"); 130 + return err; 131 + } 132 + 133 + /* msdu pg 2GHz */ 134 + dma_addr = phy_addr + MT_RXQ_RING_BASE(MT_RXQ_MSDU_PAGE_BAND0) + 0xa0; 135 + err = mt76_npu_send_msg(npu, 5, WLAN_FUNC_SET_WAIT_PCIE_ADDR, 136 + dma_addr, GFP_KERNEL); 137 + if (err) { 138 + dev_warn(dev->mt76.dev, 139 + "failed setting NPU wlan PCIe desc addr\n"); 140 + return err; 141 + } 142 + 143 + err = mt76_npu_send_msg(npu, 5, WLAN_FUNC_SET_WAIT_DESC, 144 + MT7996_NPU_RX_RING_SIZE / 4, GFP_KERNEL); 145 + if (err) { 146 + dev_warn(dev->mt76.dev, 147 + "failed setting NPU wlan rx desc size\n"); 148 + return err; 149 + } 150 + 151 + /* msdu pg 5GHz */ 152 + dma_addr = phy_addr + MT_RXQ_RING_BASE(MT_RXQ_MSDU_PAGE_BAND1) + 0xb0; 153 + err = mt76_npu_send_msg(npu, 6, WLAN_FUNC_SET_WAIT_PCIE_ADDR, 154 + dma_addr, GFP_KERNEL); 155 + if (err) { 156 + dev_warn(dev->mt76.dev, 157 + "failed setting NPU wlan PCIe desc addr\n"); 158 + return err; 159 + } 160 + 161 + err = mt76_npu_send_msg(npu, 6, WLAN_FUNC_SET_WAIT_DESC, 162 + MT7996_NPU_RX_RING_SIZE / 2, GFP_KERNEL); 163 + if (err) { 164 + dev_warn(dev->mt76.dev, 165 + "failed setting NPU wlan rx desc size\n"); 166 + return err; 167 + } 168 + 169 + /* msdu pg 6GHz */ 170 + dma_addr = phy_addr + MT_RXQ_RING_BASE(MT_RXQ_MSDU_PAGE_BAND2) + 0xc0; 171 + err = mt76_npu_send_msg(npu, 7, WLAN_FUNC_SET_WAIT_PCIE_ADDR, 172 + dma_addr, GFP_KERNEL); 173 + if (err) { 174 + dev_warn(dev->mt76.dev, 175 + "failed setting NPU wlan PCIe desc addr\n"); 176 + return err; 177 + } 178 + 179 + err = mt76_npu_send_msg(npu, 7, WLAN_FUNC_SET_WAIT_DESC, 180 + MT7996_NPU_RX_RING_SIZE, GFP_KERNEL); 181 + if (err) { 182 + dev_warn(dev->mt76.dev, 183 + "failed setting NPU wlan rx desc size\n"); 184 + return err; 185 + } 186 + 187 + /* ind cmd ring */ 188 + err = mt76_npu_send_msg(npu, 8, WLAN_FUNC_SET_WAIT_PCIE_ADDR, 189 + phy_addr + MT_RXQ_RRO_IND_RING_BASE, 190 + GFP_KERNEL); 191 + if (err) { 192 + dev_warn(dev->mt76.dev, 193 + "failed setting NPU wlan PCIe desc addr\n"); 194 + return err; 195 + } 196 + 197 + err = mt76_npu_send_msg(npu, 8, WLAN_FUNC_SET_WAIT_DESC, 198 + MT7996_RX_RING_SIZE, GFP_KERNEL); 199 + if (err) { 200 + dev_warn(dev->mt76.dev, 201 + "failed setting NPU wlan rx desc size\n"); 202 + return err; 203 + } 204 + 205 + err = mt76_npu_send_msg(npu, 3, WLAN_FUNC_SET_WAIT_TX_RING_PCIE_ADDR, 206 + phy_addr + MT_RRO_ACK_SN_CTRL, GFP_KERNEL); 207 + if (err) { 208 + dev_warn(dev->mt76.dev, 209 + "failed setting NPU wlan tx desc addr\n"); 210 + return err; 211 + } 212 + 213 + /* npu tx */ 214 + dma_addr = phy_addr + MT_TXQ_RING_BASE(1) + 0x120; 215 + err = mt76_npu_send_msg(npu, 0, WLAN_FUNC_SET_WAIT_TX_RING_PCIE_ADDR, 216 + dma_addr, GFP_KERNEL); 217 + if (err) { 218 + dev_warn(dev->mt76.dev, 219 + "failed setting NPU wlan tx desc addr\n"); 220 + return err; 221 + } 222 + 223 + dma_addr = phy_addr + MT_TXQ_RING_BASE(0) + 0x150 + hif1_ofs; 224 + err = mt76_npu_send_msg(npu, 2, WLAN_FUNC_SET_WAIT_TX_RING_PCIE_ADDR, 225 + dma_addr, GFP_KERNEL); 226 + if (err) { 227 + dev_warn(dev->mt76.dev, 228 + "failed setting NPU wlan tx desc addr\n"); 229 + return err; 230 + } 231 + 232 + return 0; 233 + } 234 + 235 + static int mt7996_npu_offload_init(struct mt7996_dev *dev, 236 + struct airoha_npu *npu) 237 + { 238 + u32 val; 239 + int err; 240 + 241 + err = mt76_npu_get_msg(npu, 0, WLAN_FUNC_GET_WAIT_NPU_VERSION, 242 + &val, GFP_KERNEL); 243 + if (err) { 244 + dev_warn(dev->mt76.dev, "failed getting NPU fw version\n"); 245 + return err; 246 + } 247 + 248 + dev_info(dev->mt76.dev, "NPU version: %0d.%d\n", 249 + (val >> 16) & 0xffff, val & 0xffff); 250 + 251 + err = mt76_npu_send_msg(npu, 0, WLAN_FUNC_SET_WAIT_PCIE_PORT_TYPE, 252 + dev->mt76.mmio.npu_type, GFP_KERNEL); 253 + if (err) { 254 + dev_warn(dev->mt76.dev, 255 + "failed setting NPU wlan PCIe port type\n"); 256 + return err; 257 + } 258 + 259 + if (is_mt7996(&dev->mt76)) 260 + err = mt7996_npu_txrx_offload_init(dev, npu); 261 + else 262 + err = mt7992_npu_txrx_offload_init(dev, npu); 263 + 264 + if (err) 265 + return err; 68 266 69 267 err = mt76_npu_send_msg(npu, 0, WLAN_FUNC_SET_WAIT_TOKEN_ID_SIZE, 70 268 MT7996_HW_TOKEN_SIZE, GFP_KERNEL); ··· 235 107 return err; 236 108 237 109 dev->mt76.token_start = MT7996_HW_TOKEN_SIZE; 110 + 111 + return 0; 112 + } 113 + 114 + static int mt7992_npu_rxd_init(struct mt7996_dev *dev, struct airoha_npu *npu) 115 + { 116 + u32 val; 117 + int err; 118 + 119 + err = mt76_npu_get_msg(npu, 0, WLAN_FUNC_GET_WAIT_RXDESC_BASE, 120 + &val, GFP_KERNEL); 121 + if (err) { 122 + dev_warn(dev->mt76.dev, 123 + "failed retrieving NPU wlan rx ring0 addr\n"); 124 + return err; 125 + } 126 + writel(val, &dev->mt76.q_rx[MT_RXQ_RRO_BAND0].regs->desc_base); 127 + 128 + err = mt76_npu_get_msg(npu, 1, WLAN_FUNC_GET_WAIT_RXDESC_BASE, 129 + &val, GFP_KERNEL); 130 + if (err) { 131 + dev_warn(dev->mt76.dev, 132 + "failed retrieving NPU wlan rx ring1 addr\n"); 133 + return err; 134 + } 135 + writel(val, &dev->mt76.q_rx[MT_RXQ_RRO_BAND1].regs->desc_base); 136 + 137 + err = mt76_npu_get_msg(npu, 9, WLAN_FUNC_GET_WAIT_RXDESC_BASE, 138 + &val, GFP_KERNEL); 139 + if (err) { 140 + dev_warn(dev->mt76.dev, 141 + "failed retrieving NPU wlan rxdmad_c ring addr\n"); 142 + return err; 143 + } 144 + writel(val, &dev->mt76.q_rx[MT_RXQ_RRO_RXDMAD_C].regs->desc_base); 238 145 239 146 return 0; 240 147 } ··· 288 125 } 289 126 writel(val, &dev->mt76.q_rx[MT_RXQ_RRO_BAND0].regs->desc_base); 290 127 291 - err = mt76_npu_get_msg(npu, 1, WLAN_FUNC_GET_WAIT_RXDESC_BASE, 128 + err = mt76_npu_get_msg(npu, 2, WLAN_FUNC_GET_WAIT_RXDESC_BASE, 292 129 &val, GFP_KERNEL); 293 130 if (err) { 294 131 dev_warn(dev->mt76.dev, 295 - "failed retriving NPU wlan rx ring1 addr\n"); 132 + "failed retriving NPU wlan rx ring2 addr\n"); 296 133 return err; 297 134 } 298 - writel(val, &dev->mt76.q_rx[MT_RXQ_RRO_BAND1].regs->desc_base); 135 + writel(val, &dev->mt76.q_rx[MT_RXQ_RRO_BAND2].regs->desc_base); 299 136 300 - err = mt76_npu_get_msg(npu, 9, WLAN_FUNC_GET_WAIT_RXDESC_BASE, 137 + /* msdu pg ring */ 138 + err = mt76_npu_get_msg(npu, 10, WLAN_FUNC_GET_WAIT_RXDESC_BASE, 301 139 &val, GFP_KERNEL); 302 140 if (err) { 303 141 dev_warn(dev->mt76.dev, 304 - "failed retriving NPU wlan rxdmad_c ring addr\n"); 142 + "failed retriving NPU wlan msdu pg ring addr\n"); 305 143 return err; 306 144 } 307 - writel(val, &dev->mt76.q_rx[MT_RXQ_RRO_RXDMAD_C].regs->desc_base); 145 + writel(val, &dev->mt76.q_rx[MT_RXQ_MSDU_PAGE_BAND0].regs->desc_base); 146 + 147 + err = mt76_npu_get_msg(npu, 11, WLAN_FUNC_GET_WAIT_RXDESC_BASE, 148 + &val, GFP_KERNEL); 149 + if (err) { 150 + dev_warn(dev->mt76.dev, 151 + "failed retriving NPU wlan msdu pg ring addr\n"); 152 + return err; 153 + } 154 + writel(val, &dev->mt76.q_rx[MT_RXQ_MSDU_PAGE_BAND1].regs->desc_base); 155 + 156 + err = mt76_npu_get_msg(npu, 12, WLAN_FUNC_GET_WAIT_RXDESC_BASE, 157 + &val, GFP_KERNEL); 158 + if (err) { 159 + dev_warn(dev->mt76.dev, 160 + "failed retriving NPU wlan msdu pg ring addr\n"); 161 + return err; 162 + } 163 + writel(val, &dev->mt76.q_rx[MT_RXQ_MSDU_PAGE_BAND2].regs->desc_base); 164 + 165 + /* ind_cmd ring */ 166 + err = mt76_npu_get_msg(npu, 8, WLAN_FUNC_GET_WAIT_RXDESC_BASE, 167 + &val, GFP_KERNEL); 168 + if (err) { 169 + dev_warn(dev->mt76.dev, 170 + "failed retriving NPU wlan ind_cmd ring addr\n"); 171 + return err; 172 + } 173 + writel(val, &dev->mt76.q_rx[MT_RXQ_RRO_IND].regs->desc_base); 308 174 309 175 return 0; 310 176 } 311 177 312 178 static int mt7996_npu_txd_init(struct mt7996_dev *dev, struct airoha_npu *npu) 313 179 { 314 - int i, err; 180 + const enum mt76_band_id band_list[] = { 181 + MT_BAND0, 182 + is_mt7996(&dev->mt76) ? MT_BAND2 : MT_BAND1, 183 + }; 184 + int i, index = 0; 315 185 316 - for (i = MT_BAND0; i < MT_BAND2; i++) { 317 - dma_addr_t dma_addr; 186 + BUILD_BUG_ON(ARRAY_SIZE(band_list) * 3 != 187 + ARRAY_SIZE(dev->npu_txd_addr)); 188 + 189 + for (i = 0; i < ARRAY_SIZE(band_list); i++) { 190 + int err, band = band_list[i], phy_id; 318 191 u32 val; 319 192 320 - err = mt76_npu_get_msg(npu, i + 5, 193 + err = mt76_npu_get_msg(npu, band + 5, 321 194 WLAN_FUNC_GET_WAIT_RXDESC_BASE, 322 195 &val, GFP_KERNEL); 323 196 if (err) { 324 197 dev_warn(dev->mt76.dev, 325 - "failed retriving NPU wlan tx ring addr\n"); 198 + "failed retrieving NPU wlan tx ring addr\n"); 326 199 return err; 327 200 } 328 - writel(val, &dev->mt76.phys[i]->q_tx[0]->regs->desc_base); 329 201 330 - if (!dmam_alloc_coherent(dev->mt76.dma_dev, 331 - 256 * MT7996_TX_RING_SIZE, 332 - &dma_addr, GFP_KERNEL)) 333 - return -ENOMEM; 202 + phy_id = is_mt7996(&dev->mt76) ? band == MT_BAND0 ? 1 : 0 203 + : band; 204 + writel(val, &dev->mt76.phys[phy_id]->q_tx[0]->regs->desc_base); 334 205 335 - err = mt76_npu_send_msg(npu, i, 206 + err = mt76_npu_send_msg(npu, band, 336 207 WLAN_FUNC_SET_WAIT_TX_BUF_SPACE_HW_BASE, 337 - dma_addr, GFP_KERNEL); 208 + dev->npu_txd_addr[index++], GFP_KERNEL); 338 209 if (err) { 339 210 dev_warn(dev->mt76.dev, 340 211 "failed setting NPU wlan queue buf addr\n"); 341 212 return err; 342 213 } 343 214 344 - if (!dmam_alloc_coherent(dev->mt76.dma_dev, 345 - 256 * MT7996_TX_RING_SIZE, 346 - &dma_addr, GFP_KERNEL)) 347 - return -ENOMEM; 348 - 349 - err = mt76_npu_send_msg(npu, i + 5, 215 + err = mt76_npu_send_msg(npu, band + 5, 350 216 WLAN_FUNC_SET_WAIT_TX_BUF_SPACE_HW_BASE, 351 - dma_addr, GFP_KERNEL); 217 + dev->npu_txd_addr[index++], 218 + GFP_KERNEL); 352 219 if (err) { 353 220 dev_warn(dev->mt76.dev, 354 221 "failed setting NPU wlan tx buf addr\n"); 355 222 return err; 356 223 } 357 224 358 - if (!dmam_alloc_coherent(dev->mt76.dma_dev, 256 * 1024, 359 - &dma_addr, GFP_KERNEL)) 360 - return -ENOMEM; 361 - 362 - err = mt76_npu_send_msg(npu, i + 10, 225 + err = mt76_npu_send_msg(npu, band + 10, 363 226 WLAN_FUNC_SET_WAIT_TX_BUF_SPACE_HW_BASE, 364 - dma_addr, GFP_KERNEL); 227 + dev->npu_txd_addr[index++], 228 + GFP_KERNEL); 365 229 if (err) { 366 230 dev_warn(dev->mt76.dev, 367 231 "failed setting NPU wlan tx buf base\n"); ··· 402 212 static int mt7996_npu_rx_event_init(struct mt7996_dev *dev, 403 213 struct airoha_npu *npu) 404 214 { 405 - struct mt76_queue *q = &dev->mt76.q_rx[MT_RXQ_MAIN_WA]; 215 + int qid = is_mt7996(&dev->mt76) ? MT_RXQ_TXFREE_BAND0 : MT_RXQ_MAIN_WA; 406 216 phys_addr_t phy_addr = dev->mt76.mmio.phy_addr; 217 + struct mt76_queue *q = &dev->mt76.q_rx[qid]; 407 218 int err; 408 219 409 220 err = mt76_npu_send_msg(npu, 0, ··· 424 233 return err; 425 234 } 426 235 427 - phy_addr += MT_RXQ_RING_BASE(MT_RXQ_MAIN_WA) + 0x20; 236 + phy_addr += MT_RXQ_RING_BASE(qid); 237 + phy_addr += is_mt7996(&dev->mt76) ? 0x90 : 0x20; 428 238 err = mt76_npu_send_msg(npu, 10, WLAN_FUNC_SET_WAIT_PCIE_ADDR, 429 239 phy_addr, GFP_KERNEL); 430 240 if (err) ··· 434 242 return err; 435 243 } 436 244 245 + static int mt7996_npu_set_pcie_addr(struct mt7996_dev *dev, 246 + struct airoha_npu *npu) 247 + { 248 + u32 hif1_ofs = dev->hif2 ? MT_WFDMA0_PCIE1(0) - MT_WFDMA0(0) : 0; 249 + dma_addr_t dma_addr = dev->mt76.mmio.phy_addr; 250 + int err; 251 + 252 + dma_addr += MT_RXQ_RING_BASE(MT_RXQ_RRO_BAND0) + 0x80; 253 + err = mt76_npu_send_msg(npu, 0, WLAN_FUNC_SET_WAIT_PCIE_ADDR, 254 + dma_addr, GFP_KERNEL); 255 + if (err) { 256 + dev_warn(dev->mt76.dev, 257 + "failed setting NPU wlan PCIe desc addr\n"); 258 + return err; 259 + } 260 + 261 + dma_addr = dev->mt76.mmio.phy_addr + hif1_ofs; 262 + if (is_mt7996(&dev->mt76)) { 263 + dma_addr += MT_RXQ_RING_BASE(MT_RXQ_RRO_BAND2) + 0x60; 264 + err = mt76_npu_send_msg(npu, 2, WLAN_FUNC_SET_WAIT_PCIE_ADDR, 265 + dma_addr, GFP_KERNEL); 266 + } else { 267 + dma_addr += MT_RXQ_RING_BASE(MT_RXQ_RRO_BAND1) + 0x90; 268 + err = mt76_npu_send_msg(npu, 1, WLAN_FUNC_SET_WAIT_PCIE_ADDR, 269 + dma_addr, GFP_KERNEL); 270 + } 271 + 272 + if (err) 273 + dev_warn(dev->mt76.dev, 274 + "failed setting NPU wlan PCIe desc addr\n"); 275 + 276 + return err; 277 + } 278 + 437 279 static int mt7996_npu_tx_done_init(struct mt7996_dev *dev, 438 280 struct airoha_npu *npu) 439 281 { 440 282 int err; 283 + 284 + /* rro ring cpu idx */ 285 + err = mt76_npu_send_msg(npu, 15, WLAN_FUNC_SET_WAIT_PCIE_ADDR, 286 + 0, GFP_KERNEL); 287 + if (err) { 288 + dev_warn(dev->mt76.dev, 289 + "failed setting NPU wlan PCIe desc addr\n"); 290 + return err; 291 + } 441 292 442 293 err = mt76_npu_send_msg(npu, 2, WLAN_FUNC_SET_WAIT_INODE_TXRX_REG_ADDR, 443 294 0, GFP_KERNEL); ··· 513 278 &dev->mt76.q_rx[MT_RXQ_NPU1]); 514 279 } 515 280 516 - int mt7996_npu_hw_init(struct mt7996_dev *dev) 281 + int __mt7996_npu_hw_init(struct mt7996_dev *dev) 517 282 { 518 283 struct airoha_npu *npu; 519 - int i, err = 0; 520 - 521 - mutex_lock(&dev->mt76.mutex); 284 + int i, err; 522 285 523 286 npu = rcu_dereference_protected(dev->mt76.mmio.npu, &dev->mt76.mutex); 524 287 if (!npu) 525 - goto unlock; 288 + return 0; 526 289 527 290 err = mt7996_npu_offload_init(dev, npu); 528 291 if (err) 529 - goto unlock; 292 + return err; 530 293 531 - err = mt7996_npu_rxd_init(dev, npu); 294 + if (is_mt7996(&dev->mt76)) 295 + err = mt7996_npu_rxd_init(dev, npu); 296 + else 297 + err = mt7992_npu_rxd_init(dev, npu); 298 + 532 299 if (err) 533 - goto unlock; 300 + return err; 534 301 535 302 err = mt7996_npu_txd_init(dev, npu); 536 303 if (err) 537 - goto unlock; 304 + return err; 538 305 539 306 err = mt7996_npu_rx_event_init(dev, npu); 540 307 if (err) 541 - goto unlock; 308 + return err; 309 + 310 + err = mt7996_npu_set_pcie_addr(dev, npu); 311 + if (err) 312 + return err; 542 313 543 314 err = mt7996_npu_tx_done_init(dev, npu); 544 315 if (err) 545 - goto unlock; 316 + return err; 546 317 547 318 for (i = MT_RXQ_NPU0; i <= MT_RXQ_NPU1; i++) 548 319 airoha_npu_wlan_enable_irq(npu, i - MT_RXQ_NPU0); 549 - unlock: 320 + 321 + return 0; 322 + } 323 + 324 + int mt7996_npu_hw_init(struct mt7996_dev *dev) 325 + { 326 + int i, err; 327 + 328 + BUILD_BUG_ON(ARRAY_SIZE(dev->npu_txd_addr) % 3); 329 + 330 + for (i = 0; i < ARRAY_SIZE(dev->npu_txd_addr); i += 3) { 331 + int band = i && is_mt7996(&dev->mt76) ? MT_BAND2 : MT_BAND0; 332 + u32 size = is_mt7996(&dev->mt76) ? band == MT_BAND2 333 + ? MT7996_NPU_TX_RING_SIZE 334 + : MT7996_NPU_RX_RING_SIZE / 2 335 + : MT7996_TX_RING_SIZE; 336 + 337 + if (!dmam_alloc_coherent(dev->mt76.dma_dev, 256 * size, 338 + &dev->npu_txd_addr[i], GFP_KERNEL)) 339 + return -ENOMEM; 340 + 341 + if (!dmam_alloc_coherent(dev->mt76.dma_dev, 256 * size, 342 + &dev->npu_txd_addr[i + 1], 343 + GFP_KERNEL)) 344 + return -ENOMEM; 345 + 346 + if (!dmam_alloc_coherent(dev->mt76.dma_dev, 256 * 1024, 347 + &dev->npu_txd_addr[i + 2], 348 + GFP_KERNEL)) 349 + return -ENOMEM; 350 + } 351 + 352 + mutex_lock(&dev->mt76.mutex); 353 + err = __mt7996_npu_hw_init(dev); 550 354 mutex_unlock(&dev->mt76.mutex); 551 355 552 356 return err; ··· 594 320 int mt7996_npu_hw_stop(struct mt7996_dev *dev) 595 321 { 596 322 struct airoha_npu *npu; 597 - int i, err; 323 + int i, err = 0; 598 324 u32 info; 325 + 326 + mutex_lock(&dev->mt76.mutex); 599 327 600 328 npu = rcu_dereference_protected(dev->mt76.mmio.npu, &dev->mt76.mutex); 601 329 if (!npu) 602 - return 0; 330 + goto unlock; 603 331 604 332 err = mt76_npu_send_msg(npu, 4, WLAN_FUNC_SET_WAIT_INODE_TXRX_REG_ADDR, 605 333 0, GFP_KERNEL); 606 334 if (err) 607 - return err; 335 + goto unlock; 608 336 609 337 for (i = 0; i < 10; i++) { 610 338 err = mt76_npu_get_msg(npu, 3, WLAN_FUNC_GET_WAIT_NPU_INFO, 611 339 &info, GFP_KERNEL); 612 - if (err) 613 - continue; 340 + if (!err && !info) 341 + break; 614 342 615 - if (info) { 616 - err = -ETIMEDOUT; 617 - continue; 618 - } 343 + err = -ETIMEDOUT; 344 + usleep_range(10000, 15000); 619 345 } 620 346 621 347 if (!err) 622 348 err = mt76_npu_send_msg(npu, 6, 623 349 WLAN_FUNC_SET_WAIT_INODE_TXRX_REG_ADDR, 624 350 0, GFP_KERNEL); 351 + else 352 + dev_err(dev->mt76.dev, "npu stop failed\n"); 353 + unlock: 354 + mutex_unlock(&dev->mt76.mutex); 355 + 625 356 return err; 626 357 }
+11
drivers/net/wireless/mediatek/mt76/mt7996/regs.h
··· 159 159 #define MT_MDP_BASE 0x820cc000 160 160 #define MT_MDP(ofs) (MT_MDP_BASE + (ofs)) 161 161 162 + #define MT_MDP_DCR0 MT_MDP(0x800) 163 + #define MT_MDP_DCR0_RX_HDR_TRANS_EN BIT(19) 164 + 162 165 #define MT_MDP_DCR2 MT_MDP(0x8e8) 163 166 #define MT_MDP_DCR2_RX_TRANS_SHORT BIT(2) 164 167 ··· 736 733 #define MT_HW_REV 0x70010204 737 734 #define MT_HW_REV1 0x8a00 738 735 736 + #define MT_WF_L05_RST 0x70028550 737 + #define MT_WF_L05_RST_WF_RST_MASK GENMASK(4, 0) 738 + 739 739 #define MT_WF_SUBSYS_RST 0x70028600 740 + #define MT_WF_SUBSYS_RST_WHOLE_PATH_RST BIT(0) 741 + #define MT_WF_SUBSYS_RST_WHOLE_PATH_RST_REVERT BIT(5) 742 + #define MT_WF_SUBSYS_RST_BYPASS_WFDMA_SLP_PROT BIT(6) 743 + #define MT_WF_SUBSYS_RST_BYPASS_WFDMA2_SLP_PROT BIT(16) 744 + #define MT_WF_SUBSYS_RST_WHOLE_PATH_RST_REVERT_CYCLE GENMASK(15, 8) 740 745 741 746 /* PCIE MAC */ 742 747 #define MT_PCIE_MAC_BASE 0x74030000
+32 -5
drivers/net/wireless/mediatek/mt76/npu.c
··· 390 390 } 391 391 EXPORT_SYMBOL_GPL(mt76_npu_net_setup_tc); 392 392 393 + int mt76_npu_send_txrx_addr(struct mt76_dev *dev, int ifindex, 394 + u32 direction, u32 i_count_addr, 395 + u32 o_status_addr, u32 o_count_addr) 396 + { 397 + struct { 398 + __le32 dir; 399 + __le32 in_count_addr; 400 + __le32 out_status_addr; 401 + __le32 out_count_addr; 402 + } info = { 403 + .dir = cpu_to_le32(direction), 404 + .in_count_addr = cpu_to_le32(i_count_addr), 405 + .out_status_addr = cpu_to_le32(o_status_addr), 406 + .out_count_addr = cpu_to_le32(o_count_addr), 407 + }; 408 + struct airoha_npu *npu; 409 + int err = -ENODEV; 410 + 411 + rcu_read_lock(); 412 + npu = rcu_dereference(dev->mmio.npu); 413 + if (npu) 414 + err = airoha_npu_wlan_send_msg(npu, ifindex, 415 + WLAN_FUNC_SET_WAIT_INODE_TXRX_REG_ADDR, 416 + &info, sizeof(info), GFP_ATOMIC); 417 + rcu_read_unlock(); 418 + 419 + return err; 420 + } 421 + EXPORT_SYMBOL_GPL(mt76_npu_send_txrx_addr); 422 + 393 423 void mt76_npu_disable_irqs(struct mt76_dev *dev) 394 424 { 395 425 struct airoha_npu *npu; ··· 449 419 struct airoha_ppe_dev *ppe_dev; 450 420 struct airoha_npu *npu; 451 421 int err = 0; 452 - 453 - /* NPU offloading is only supported by MT7992 */ 454 - if (!is_mt7992(dev)) 455 - return 0; 456 422 457 423 mutex_lock(&dev->mutex); 458 424 ··· 482 456 dev->mmio.phy_addr = phy_addr; 483 457 dev->mmio.npu_type = type; 484 458 /* NPU offloading requires HW-RRO for RX packet reordering. */ 485 - dev->hwrro_mode = MT76_HWRRO_V3_1; 459 + dev->hwrro_mode = is_mt7996(dev) ? MT76_HWRRO_V3 : MT76_HWRRO_V3_1; 460 + dev->rx_token_size = 32768; 486 461 487 462 rcu_assign_pointer(dev->mmio.npu, npu); 488 463 rcu_assign_pointer(dev->mmio.ppe_dev, ppe_dev);
+61 -9
drivers/net/wireless/mediatek/mt76/scan.c
··· 16 16 17 17 clear_bit(MT76_SCANNING, &phy->state); 18 18 19 - if (dev->scan.chan && phy->main_chandef.chan && 20 - !test_bit(MT76_MCU_RESET, &dev->phy.state)) 19 + if (dev->scan.chan && phy->main_chandef.chan && phy->offchannel && 20 + !test_bit(MT76_MCU_RESET, &dev->phy.state)) { 21 21 mt76_set_channel(phy, &phy->main_chandef, false); 22 + mt76_offchannel_notify(phy, false); 23 + } 22 24 mt76_put_vif_phy_link(phy, dev->scan.vif, dev->scan.mlink); 23 25 memset(&dev->scan, 0, sizeof(dev->scan)); 24 26 if (!test_bit(MT76_MCU_RESET, &dev->phy.state)) ··· 29 27 30 28 void mt76_abort_scan(struct mt76_dev *dev) 31 29 { 30 + spin_lock_bh(&dev->scan_lock); 31 + dev->scan.beacon_wait = false; 32 + spin_unlock_bh(&dev->scan_lock); 33 + 32 34 cancel_delayed_work_sync(&dev->scan_work); 33 35 mt76_scan_complete(dev, true); 34 36 } ··· 83 77 rcu_read_unlock(); 84 78 } 85 79 80 + void mt76_scan_rx_beacon(struct mt76_dev *dev, struct ieee80211_channel *chan) 81 + { 82 + struct mt76_phy *phy; 83 + 84 + spin_lock(&dev->scan_lock); 85 + 86 + if (!dev->scan.beacon_wait || dev->scan.beacon_received || 87 + dev->scan.chan != chan) 88 + goto out; 89 + 90 + phy = dev->scan.phy; 91 + if (!phy) 92 + goto out; 93 + 94 + dev->scan.beacon_received = true; 95 + ieee80211_queue_delayed_work(phy->hw, &dev->scan_work, 0); 96 + 97 + out: 98 + spin_unlock(&dev->scan_lock); 99 + } 100 + 86 101 void mt76_scan_work(struct work_struct *work) 87 102 { 88 103 struct mt76_dev *dev = container_of(work, struct mt76_dev, ··· 112 85 struct cfg80211_chan_def chandef = {}; 113 86 struct mt76_phy *phy = dev->scan.phy; 114 87 int duration = HZ / 9; /* ~110 ms */ 88 + bool beacon_rx, offchannel = true; 115 89 int i; 90 + 91 + if (!phy || !req) 92 + return; 93 + 94 + spin_lock_bh(&dev->scan_lock); 95 + beacon_rx = dev->scan.beacon_wait && dev->scan.beacon_received; 96 + dev->scan.beacon_wait = false; 97 + spin_unlock_bh(&dev->scan_lock); 98 + 99 + if (beacon_rx) 100 + goto probe; 116 101 117 102 if (dev->scan.chan_idx >= req->n_channels) { 118 103 mt76_scan_complete(dev, false); 119 104 return; 120 105 } 121 106 122 - if (dev->scan.chan && phy->num_sta) { 107 + if (dev->scan.chan && phy->num_sta && phy->offchannel) { 123 108 dev->scan.chan = NULL; 124 109 mt76_set_channel(phy, &phy->main_chandef, false); 110 + mt76_offchannel_notify(phy, false); 125 111 goto out; 126 112 } 127 113 128 114 dev->scan.chan = req->channels[dev->scan.chan_idx++]; 129 - cfg80211_chandef_create(&chandef, dev->scan.chan, NL80211_CHAN_HT20); 130 - mt76_set_channel(phy, &chandef, true); 115 + offchannel = mt76_offchannel_chandef(phy, dev->scan.chan, &chandef); 131 116 132 - if (!req->n_ssids || 133 - chandef.chan->flags & (IEEE80211_CHAN_NO_IR | IEEE80211_CHAN_RADAR)) 117 + if (offchannel) 118 + mt76_offchannel_notify(phy, true); 119 + mt76_set_channel(phy, &chandef, offchannel); 120 + 121 + if (!req->n_ssids) 134 122 goto out; 135 123 136 - duration = HZ / 16; /* ~60 ms */ 124 + if (chandef.chan->flags & (IEEE80211_CHAN_NO_IR | IEEE80211_CHAN_RADAR)) { 125 + spin_lock_bh(&dev->scan_lock); 126 + dev->scan.beacon_received = false; 127 + dev->scan.beacon_wait = true; 128 + spin_unlock_bh(&dev->scan_lock); 129 + goto out; 130 + } 131 + 132 + probe: 133 + if (phy->offchannel) 134 + duration = HZ / 16; /* ~60 ms */ 137 135 local_bh_disable(); 138 136 for (i = 0; i < req->n_ssids; i++) 139 137 mt76_scan_send_probe(dev, &req->ssids[i]); 140 138 local_bh_enable(); 141 139 142 140 out: 143 - if (dev->scan.chan) 141 + if (dev->scan.chan && phy->offchannel) 144 142 duration = max_t(int, duration, 145 143 msecs_to_jiffies(req->duration + 146 144 (req->duration >> 5)));
+31 -3
drivers/net/wireless/mediatek/mt76/tx.c
··· 227 227 struct sk_buff *skb) 228 228 { 229 229 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 230 + struct ieee80211_sta *sta; 230 231 int pending; 232 + int i; 231 233 232 234 if (!wcid || info->tx_time_est) 233 235 return; ··· 237 235 pending = atomic_dec_return(&wcid->non_aql_packets); 238 236 if (pending < 0) 239 237 atomic_cmpxchg(&wcid->non_aql_packets, pending, 0); 238 + 239 + sta = wcid_to_sta(wcid); 240 + if (!sta || pending != MT_MAX_NON_AQL_PKT - 1) 241 + return; 242 + 243 + for (i = 0; i < ARRAY_SIZE(sta->txq); i++) { 244 + if (!sta->txq[i]) 245 + continue; 246 + 247 + ieee80211_schedule_txq(dev->hw, sta->txq[i]); 248 + } 240 249 } 241 250 242 251 void __mt76_tx_complete_skb(struct mt76_dev *dev, u16 wcid_idx, struct sk_buff *skb, ··· 555 542 if (!wcid || test_bit(MT_WCID_FLAG_PS, &wcid->flags)) 556 543 continue; 557 544 545 + if (atomic_read(&wcid->non_aql_packets) >= MT_MAX_NON_AQL_PKT) 546 + continue; 547 + 558 548 phy = mt76_dev_phy(dev, wcid->phy_idx); 559 549 if (test_bit(MT76_RESET, &phy->state) || phy->offchannel) 560 550 continue; ··· 632 616 633 617 if ((dev->drv->drv_flags & MT_DRV_HW_MGMT_TXQ) && 634 618 !(info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP) && 635 - !ieee80211_is_data(hdr->frame_control) && 619 + !ieee80211_is_data_present(hdr->frame_control) && 636 620 (!ieee80211_is_bufferable_mmpdu(skb) || 637 621 ieee80211_is_deauth(hdr->frame_control) || 638 622 head == &wcid->tx_offchannel)) ··· 660 644 return ret; 661 645 } 662 646 663 - static void mt76_txq_schedule_pending(struct mt76_phy *phy) 647 + void mt76_txq_schedule_pending(struct mt76_phy *phy) 664 648 { 665 649 LIST_HEAD(tx_list); 666 650 int ret = 0; ··· 866 850 token = idr_alloc(&dev->token, *ptxwi, dev->token_start, 867 851 dev->token_start + dev->token_size, 868 852 GFP_ATOMIC); 869 - if (token >= dev->token_start) 853 + if (token >= dev->token_start) { 870 854 dev->token_count++; 855 + 856 + if ((*ptxwi)->qid == MT_TXQ_PSD) { 857 + struct mt76_phy *mphy = mt76_dev_phy(dev, (*ptxwi)->phy_idx); 858 + atomic_inc(&mphy->mgmt_tx_pending); 859 + } 860 + } 871 861 872 862 #ifdef CONFIG_NET_MEDIATEK_SOC_WED 873 863 if (mtk_wed_device_active(&dev->mmio.wed) && ··· 918 896 txwi = idr_remove(&dev->token, token); 919 897 if (txwi) { 920 898 dev->token_count--; 899 + 900 + if (txwi->qid == MT_TXQ_PSD) { 901 + struct mt76_phy *mphy = mt76_dev_phy(dev, txwi->phy_idx); 902 + if (atomic_dec_and_test(&mphy->mgmt_tx_pending)) 903 + wake_up(&dev->tx_wait); 904 + } 921 905 922 906 #ifdef CONFIG_NET_MEDIATEK_SOC_WED 923 907 if (mtk_wed_device_active(&dev->mmio.wed) &&
+14 -1
drivers/net/wireless/mediatek/mt7601u/mcu.c
··· 403 403 return ret; 404 404 } 405 405 406 + static const char * const mt7601u_fw_paths[] = { 407 + "mediatek/" MT7601U_FIRMWARE, 408 + MT7601U_FIRMWARE, 409 + }; 410 + 406 411 static int mt7601u_load_firmware(struct mt7601u_dev *dev) 407 412 { 408 413 const struct firmware *fw; 409 414 const struct mt76_fw_header *hdr; 410 415 int len, ret; 411 416 u32 val; 417 + int i; 412 418 413 419 mt7601u_wr(dev, MT_USB_DMA_CFG, (MT_USB_DMA_CFG_RX_BULK_EN | 414 420 MT_USB_DMA_CFG_TX_BULK_EN)); ··· 422 416 if (firmware_running(dev)) 423 417 return firmware_request_cache(dev->dev, MT7601U_FIRMWARE); 424 418 425 - ret = request_firmware(&fw, MT7601U_FIRMWARE, dev->dev); 419 + /* Try loading firmware from multiple locations */ 420 + fw = NULL; 421 + for (i = 0; i < MT7601U_FIRMWARE_PATHS; i++) { 422 + ret = request_firmware(&fw, mt7601u_fw_paths[i], dev->dev); 423 + if (ret == 0) 424 + break; 425 + } 426 + 426 427 if (ret) 427 428 return ret; 428 429
+1
drivers/net/wireless/mediatek/mt7601u/usb.h
··· 9 9 #include "mt7601u.h" 10 10 11 11 #define MT7601U_FIRMWARE "mt7601u.bin" 12 + #define MT7601U_FIRMWARE_PATHS ARRAY_SIZE(mt7601u_fw_paths) 12 13 13 14 #define MT_VEND_REQ_MAX_RETRY 10 14 15 #define MT_VEND_REQ_TOUT_MS 300
+35
drivers/net/wireless/virtual/mac80211_hwsim.c
··· 4640 4640 }, 4641 4641 /* PPE threshold information is not supported */ 4642 4642 }, 4643 + .uhr_cap = { 4644 + .has_uhr = true, 4645 + .phy.cap = IEEE80211_UHR_PHY_CAP_ELR_RX | 4646 + IEEE80211_UHR_PHY_CAP_ELR_TX, 4647 + }, 4643 4648 }, 4644 4649 { 4645 4650 .types_mask = BIT(NL80211_IFTYPE_AP) | ··· 4752 4747 }, 4753 4748 }, 4754 4749 /* PPE threshold information is not supported */ 4750 + }, 4751 + .uhr_cap = { 4752 + .has_uhr = true, 4753 + .phy.cap = IEEE80211_UHR_PHY_CAP_ELR_RX | 4754 + IEEE80211_UHR_PHY_CAP_ELR_TX, 4755 4755 }, 4756 4756 }, 4757 4757 #ifdef CONFIG_MAC80211_MESH ··· 4927 4917 }, 4928 4918 /* PPE threshold information is not supported */ 4929 4919 }, 4920 + .uhr_cap = { 4921 + .has_uhr = true, 4922 + .phy.cap = IEEE80211_UHR_PHY_CAP_ELR_RX | 4923 + IEEE80211_UHR_PHY_CAP_ELR_TX, 4924 + }, 4930 4925 }, 4931 4926 { 4932 4927 .types_mask = BIT(NL80211_IFTYPE_AP) | ··· 5056 5041 }, 5057 5042 }, 5058 5043 /* PPE threshold information is not supported */ 5044 + }, 5045 + .uhr_cap = { 5046 + .has_uhr = true, 5047 + .phy.cap = IEEE80211_UHR_PHY_CAP_ELR_RX | 5048 + IEEE80211_UHR_PHY_CAP_ELR_TX, 5059 5049 }, 5060 5050 }, 5061 5051 #ifdef CONFIG_MAC80211_MESH ··· 5255 5235 }, 5256 5236 /* PPE threshold information is not supported */ 5257 5237 }, 5238 + .uhr_cap = { 5239 + .has_uhr = true, 5240 + .phy.cap = IEEE80211_UHR_PHY_CAP_ELR_RX | 5241 + IEEE80211_UHR_PHY_CAP_ELR_TX, 5242 + }, 5258 5243 }, 5259 5244 { 5260 5245 .types_mask = BIT(NL80211_IFTYPE_AP) | ··· 5406 5381 }, 5407 5382 /* PPE threshold information is not supported */ 5408 5383 }, 5384 + .uhr_cap = { 5385 + .has_uhr = true, 5386 + .phy.cap = IEEE80211_UHR_PHY_CAP_ELR_RX | 5387 + IEEE80211_UHR_PHY_CAP_ELR_TX, 5388 + }, 5409 5389 }, 5410 5390 #ifdef CONFIG_MAC80211_MESH 5411 5391 { ··· 5501 5471 }, 5502 5472 }, 5503 5473 /* PPE threshold information is not supported */ 5474 + }, 5475 + .uhr_cap = { 5476 + .has_uhr = true, 5477 + .phy.cap = IEEE80211_UHR_PHY_CAP_ELR_RX | 5478 + IEEE80211_UHR_PHY_CAP_ELR_TX, 5504 5479 }, 5505 5480 }, 5506 5481 #endif
+6 -1
include/linux/ieee80211-nan.h
··· 9 9 * Copyright (c) 2006, Michael Wu <flamingice@sourmilk.net> 10 10 * Copyright (c) 2013 - 2014 Intel Mobile Communications GmbH 11 11 * Copyright (c) 2016 - 2017 Intel Deutschland GmbH 12 - * Copyright (c) 2018 - 2025 Intel Corporation 12 + * Copyright (c) 2018 - 2026 Intel Corporation 13 13 */ 14 14 15 15 #ifndef LINUX_IEEE80211_NAN_H ··· 22 22 #define NAN_OP_MODE_80P80MHZ 0x02 23 23 #define NAN_OP_MODE_160MHZ 0x04 24 24 #define NAN_OP_MODE_PNDL_SUPPRTED 0x08 25 + 26 + #define NAN_DEV_CAPA_NUM_TX_ANT_POS 0 27 + #define NAN_DEV_CAPA_NUM_TX_ANT_MASK 0x0f 28 + #define NAN_DEV_CAPA_NUM_RX_ANT_POS 4 29 + #define NAN_DEV_CAPA_NUM_RX_ANT_MASK 0xf0 25 30 26 31 /* NAN Device capabilities, as defined in Wi-Fi Aware (TM) specification 27 32 * Table 79
+7
include/linux/ieee80211.h
··· 1194 1194 1195 1195 #define IEEE80211_MIN_ACTION_SIZE(type) offsetofend(struct ieee80211_mgmt, u.action.type) 1196 1196 1197 + /* Link Reconfiguration Status Duple field */ 1198 + struct ieee80211_ml_reconf_status { 1199 + u8 info; 1200 + __le16 status; 1201 + } __packed; 1202 + 1203 + #define IEEE80211_ML_RECONF_LINK_ID_MASK 0xf 1197 1204 1198 1205 /* Management MIC information element (IEEE 802.11w) for CMAC */ 1199 1206 struct ieee80211_mmie {
+258 -7
include/net/cfg80211.h
··· 1831 1831 * @eml_cap: EML capabilities of this station 1832 1832 * @link_sta_params: link related params. 1833 1833 * @epp_peer: EPP peer indication 1834 + * @nmi_mac: MAC address of the NMI station of the NAN peer 1834 1835 */ 1835 1836 struct station_parameters { 1836 1837 struct net_device *vlan; ··· 1859 1858 u16 eml_cap; 1860 1859 struct link_station_parameters link_sta_params; 1861 1860 bool epp_peer; 1861 + const u8 *nmi_mac; 1862 1862 }; 1863 1863 1864 1864 /** ··· 1899 1897 * entry that is operating, has been marked authorized by userspace) 1900 1898 * @CFG80211_STA_MESH_PEER_KERNEL: peer on mesh interface (kernel managed) 1901 1899 * @CFG80211_STA_MESH_PEER_USER: peer on mesh interface (user managed) 1900 + * @CFG80211_STA_NAN_MGMT: NAN management interface station 1901 + * @CFG80211_STA_NAN_DATA: NAN data path station 1902 1902 */ 1903 1903 enum cfg80211_station_type { 1904 1904 CFG80211_STA_AP_CLIENT, ··· 1912 1908 CFG80211_STA_TDLS_PEER_ACTIVE, 1913 1909 CFG80211_STA_MESH_PEER_KERNEL, 1914 1910 CFG80211_STA_MESH_PEER_USER, 1911 + CFG80211_STA_NAN_MGMT, 1912 + CFG80211_STA_NAN_DATA, 1915 1913 }; 1916 1914 1917 1915 /** ··· 3987 3981 }; 3988 3982 3989 3983 /** 3984 + * DOC: Neighbor Awareness Networking (NAN) 3985 + * 3986 + * NAN uses two interface types: 3987 + * 3988 + * - %NL80211_IFTYPE_NAN: a non-netdev interface. This has two roles: (1) holds 3989 + * the configuration of all NAN activities (DE parameters, synchronisation 3990 + * parameters, local schedule, etc.), and (2) uses as the NAN Management 3991 + * Interface (NMI), which is used for NAN management communication. 3992 + * 3993 + * - %NL80211_IFTYPE_NAN_DATA: The NAN Data Interface (NDI), used for data 3994 + * communication with NAN peers. 3995 + * 3996 + * An NDI interface can only be started (IFF_UP) if the NMI one is running and 3997 + * NAN is started. Before NAN is stopped, all associated NDI interfaces 3998 + * must be stopped first. 3999 + * 4000 + * The local schedule specifies which channels the device is available on and 4001 + * when. Must be cancelled before NAN is stopped. 4002 + * 4003 + * NAN Stations 4004 + * ~~~~~~~~~~~~ 4005 + * 4006 + * There are two types of stations corresponding to the two interface types: 4007 + * 4008 + * - NMI station: Represents the NAN peer. Peer-specific data such as the peer's 4009 + * schedule and the HT, VHT and HE capabilities belongs to the NMI station. 4010 + * Also used for Tx/Rx of NAN management frames to/from the peer. 4011 + * Added on the %NL80211_IFTYPE_NAN interface. 4012 + * 4013 + * - NDI station: Used for Tx/Rx of data frames (and non-NAN management frames) 4014 + * for a specific NDP established with the NAN peer. Added on the 4015 + * %NL80211_IFTYPE_NAN_DATA interface. 4016 + * 4017 + * A peer may reuse its NMI address as the NDI address. In that case, two 4018 + * separate stations should be added even though they share the same MAC 4019 + * address. 4020 + * 4021 + * HT, VHT and HE capabilities should not changes after it was set. It is the 4022 + * driver's responsibility to check that. 4023 + * 4024 + * An NDI station can only be added if the corresponding NMI station has already 4025 + * been configured with HT (and possibly VHT and HE) capabilities. It is the 4026 + * driver's responsibility to check that. 4027 + * 4028 + * All NDI stations must be removed before corresponding NMI station is removed. 4029 + * Therefore, removing a NMI station implies that the associated NDI station(s) 4030 + * (if any) will be removed first. 4031 + * 4032 + * NAN Dependencies 4033 + * ~~~~~~~~~~~~~~~~ 4034 + * 4035 + * The following diagram shows the dependencies between NAN components. 4036 + * An arrow from A to B means A must be started/added before B, and B must be 4037 + * stopped/removed before A: 4038 + * 4039 + * +-------------+ 4040 + * | NMI iface |---(local schedule) 4041 + * +------+------+ 4042 + * / \ 4043 + * v v 4044 + * +-----------+ +-------------+ 4045 + * | NDI iface | | NMI sta |---(peer schedule) 4046 + * +-----+-----+ +------+------+ 4047 + * \ / 4048 + * v v 4049 + * +----------+ 4050 + * | NDI sta | 4051 + * +----------+ 4052 + */ 4053 + 4054 + /** 3990 4055 * struct cfg80211_nan_band_config - NAN band specific configuration 3991 4056 * 3992 4057 * @chan: Pointer to the IEEE 802.11 channel structure. The channel to be used ··· 4125 4048 u16 extra_nan_attrs_len; 4126 4049 const u8 *vendor_elems; 4127 4050 u16 vendor_elems_len; 4051 + }; 4052 + 4053 + #define CFG80211_NAN_SCHED_NUM_TIME_SLOTS 32 4054 + 4055 + /** 4056 + * struct cfg80211_nan_channel - NAN channel configuration 4057 + * 4058 + * This struct defines a NAN channel configuration 4059 + * 4060 + * @chandef: the channel definition 4061 + * @channel_entry: pointer to the Channel Entry blob as defined in Wi-Fi Aware 4062 + * (TM) 4.0 specification Table 100 (Channel Entry format for the NAN 4063 + * Availability attribute). 4064 + * @rx_nss: number of spatial streams supported on this channel 4065 + */ 4066 + struct cfg80211_nan_channel { 4067 + struct cfg80211_chan_def chandef; 4068 + const u8 *channel_entry; 4069 + u8 rx_nss; 4070 + }; 4071 + 4072 + /** 4073 + * struct cfg80211_nan_local_sched - NAN local schedule 4074 + * 4075 + * This struct defines NAN local schedule parameters 4076 + * 4077 + * @schedule: a mapping of time slots to chandef indexes in %nan_channels. 4078 + * An unscheduled slot will be set to %NL80211_NAN_SCHED_NOT_AVAIL_SLOT. 4079 + * @n_channels: number of channel definitions in %nan_channels. 4080 + * @nan_avail_blob: pointer to NAN Availability attribute blob. 4081 + * See %NL80211_ATTR_NAN_AVAIL_BLOB for more details. 4082 + * @nan_avail_blob_len: length of the @nan_avail_blob in bytes. 4083 + * @deferred: if true, the command containing this schedule configuration is a 4084 + * request from the device to perform an announced schedule update. This 4085 + * means that it needs to send the updated NAN availability to the peers, 4086 + * and do the actual switch on the right time (i.e. at the end of the slot 4087 + * after the slot in which the updated NAN Availability was sent). 4088 + * See %NL80211_ATTR_NAN_SCHED_DEFERRED for more details. 4089 + * If false, the schedule is applied immediately. 4090 + * @nan_channels: array of NAN channel definitions that can be scheduled. 4091 + */ 4092 + struct cfg80211_nan_local_sched { 4093 + u8 schedule[CFG80211_NAN_SCHED_NUM_TIME_SLOTS]; 4094 + u8 n_channels; 4095 + const u8 *nan_avail_blob; 4096 + u16 nan_avail_blob_len; 4097 + bool deferred; 4098 + struct cfg80211_nan_channel nan_channels[] __counted_by(n_channels); 4099 + }; 4100 + 4101 + /** 4102 + * struct cfg80211_nan_peer_map - NAN peer schedule map 4103 + * 4104 + * This struct defines a single NAN peer schedule map 4105 + * 4106 + * @map_id: map ID of this schedule map 4107 + * @schedule: a mapping of time slots to chandef indexes in the schedule's 4108 + * @nan_channels. Each slot lasts 16TUs. An unscheduled slot will be 4109 + * set to %NL80211_NAN_SCHED_NOT_AVAIL_SLOT. 4110 + */ 4111 + struct cfg80211_nan_peer_map { 4112 + u8 map_id; 4113 + u8 schedule[CFG80211_NAN_SCHED_NUM_TIME_SLOTS]; 4114 + }; 4115 + 4116 + #define CFG80211_NAN_MAX_PEER_MAPS 2 4117 + #define CFG80211_NAN_INVALID_MAP_ID 0xff 4118 + 4119 + /** 4120 + * struct cfg80211_nan_peer_sched - NAN peer schedule 4121 + * 4122 + * This struct defines NAN peer schedule parameters for a peer. 4123 + * 4124 + * @peer_addr: MAC address of the peer (NMI address) 4125 + * @seq_id: sequence ID of the peer schedule. 4126 + * @committed_dw: committed DW as published by the peer. 4127 + * See %NL80211_ATTR_NAN_COMMITTED_DW 4128 + * @max_chan_switch: maximum channel switch time in microseconds as published 4129 + * by the peer. See %NL80211_ATTR_NAN_MAX_CHAN_SWITCH_TIME. 4130 + * @init_ulw: initial ULWs as published by the peer. 4131 + * @ulw_size: number of bytes in @init_ulw. 4132 + * @n_channels: number of channel definitions in @nan_channels. 4133 + * @nan_channels: array of NAN channel definitions for this schedule. 4134 + * @maps: array of peer schedule maps. Unused entries have 4135 + * map_id = %CFG80211_NAN_INVALID_MAP_ID. 4136 + */ 4137 + struct cfg80211_nan_peer_sched { 4138 + const u8 *peer_addr; 4139 + u8 seq_id; 4140 + u16 committed_dw; 4141 + u16 max_chan_switch; 4142 + const u8 *init_ulw; 4143 + u16 ulw_size; 4144 + u8 n_channels; 4145 + struct cfg80211_nan_channel *nan_channels; 4146 + struct cfg80211_nan_peer_map maps[CFG80211_NAN_MAX_PEER_MAPS]; 4128 4147 }; 4129 4148 4130 4149 /** ··· 5003 4830 * @nan_change_conf: changes NAN configuration. The changed parameters must 5004 4831 * be specified in @changes (using &enum cfg80211_nan_conf_changes); 5005 4832 * All other parameters must be ignored. 4833 + * @nan_set_local_sched: configure the local schedule for NAN. The schedule 4834 + * consists of an array of %cfg80211_nan_channel and the schedule itself, 4835 + * in which each entry maps each time slot to the channel on which the 4836 + * radio should operate on. If the chandef of a NAN channel is not 4837 + * changed, the channel entry must also remain unchanged. It is the 4838 + * driver's responsibility to verify this. 4839 + * @nan_set_peer_sched: configure the peer schedule for NAN. The schedule 4840 + * consists of an array of %cfg80211_nan_channel and the schedule itself, 4841 + * in which each entry maps each time slot to a channel on which the 4842 + * radio should operate on. In addition, it contains more peer's schedule 4843 + * information such as committed DW, etc. When updating an existing peer 4844 + * schedule, the full new schedule is provided - partial updates are not 4845 + * supported, and the new schedule completely replaces the previous one. 5006 4846 * 5007 4847 * @set_multicast_to_unicast: configure multicast to unicast conversion for BSS 5008 4848 * ··· 5393 5207 struct wireless_dev *wdev, 5394 5208 struct cfg80211_nan_conf *conf, 5395 5209 u32 changes); 5396 - 5210 + int (*nan_set_local_sched)(struct wiphy *wiphy, 5211 + struct wireless_dev *wdev, 5212 + struct cfg80211_nan_local_sched *sched); 5213 + int (*nan_set_peer_sched)(struct wiphy *wiphy, 5214 + struct wireless_dev *wdev, 5215 + struct cfg80211_nan_peer_sched *sched); 5397 5216 int (*set_multicast_to_unicast)(struct wiphy *wiphy, 5398 5217 struct net_device *dev, 5399 5218 const bool enabled); ··· 6027 5836 * @max_channel_switch_time: maximum channel switch time in milliseconds. 6028 5837 * @dev_capabilities: NAN device capabilities as defined in Wi-Fi Aware (TM) 6029 5838 * specification Table 79 (Capabilities field). 5839 + * @phy: Band-agnostic capabilities for NAN data interfaces. Since NAN 5840 + * operates on multiple channels simultaneously, these capabilities apply 5841 + * across all bands. Valid only if NL80211_IFTYPE_NAN_DATA is supported. 5842 + * @phy.ht: HT capabilities (mandatory for NAN data) 5843 + * @phy.vht: VHT capabilities (optional) 5844 + * @phy.he: HE capabilities (optional) 6030 5845 */ 6031 5846 struct wiphy_nan_capa { 6032 5847 u32 flags; ··· 6040 5843 u8 n_antennas; 6041 5844 u16 max_channel_switch_time; 6042 5845 u8 dev_capabilities; 5846 + struct { 5847 + struct ieee80211_sta_ht_cap ht; 5848 + struct ieee80211_sta_vht_cap vht; 5849 + struct ieee80211_sta_he_cap he; 5850 + } phy; 6043 5851 }; 6044 5852 6045 5853 #define CFG80211_HW_TIMESTAMP_ALL_PEERS 0xffff ··· 6938 6736 * the P2P Device. 6939 6737 * @ps: powersave mode is enabled 6940 6738 * @ps_timeout: dynamic powersave timeout 6941 - * @ap_unexpected_nlportid: (private) netlink port ID of application 6942 - * registered for unexpected class 3 frames (AP mode) 6739 + * @unexpected_nlportid: (private) netlink port ID of application 6740 + * registered for unexpected frames (AP mode or NAN_DATA mode) 6943 6741 * @conn: (private) cfg80211 software SME connection state machine data 6944 6742 * @connect_keys: (private) keys to set after connection is established 6945 6743 * @conn_bss_type: connecting/connected BSS type ··· 7001 6799 bool ps; 7002 6800 int ps_timeout; 7003 6801 7004 - u32 ap_unexpected_nlportid; 6802 + u32 unexpected_nlportid; 7005 6803 7006 6804 u32 owner_nlportid; 7007 6805 bool nl_owner_dead; ··· 7061 6859 } ocb; 7062 6860 struct { 7063 6861 u8 cluster_id[ETH_ALEN] __aligned(2); 6862 + u8 n_channels; 6863 + struct cfg80211_chan_def *chandefs; 6864 + bool sched_update_pending; 7064 6865 } nan; 7065 6866 } u; 7066 6867 ··· 9572 9367 * @addr: the transmitter address 9573 9368 * @gfp: context flags 9574 9369 * 9575 - * This function is used in AP mode (only!) to inform userspace that 9576 - * a spurious class 3 frame was received, to be able to deauth the 9577 - * sender. 9370 + * This function is used in AP mode to inform userspace that a spurious 9371 + * class 3 frame was received, to be able to deauth the sender. 9372 + * It is also used in NAN_DATA mode to report frames from unknown peers 9373 + * (A2 not assigned to any active NDP), per Wi-Fi Aware (TM) 4.0 specification 6.2.5. 9578 9374 * Return: %true if the frame was passed to userspace (or this failed 9579 9375 * for a reason other than not having a subscription.) 9580 9376 */ ··· 10222 10016 enum nl80211_nan_func_term_reason reason, 10223 10017 u64 cookie, gfp_t gfp); 10224 10018 10019 + /** 10020 + * cfg80211_nan_sched_update_done - notify deferred schedule update completion 10021 + * @wdev: the wireless device reporting the event 10022 + * @success: whether or not the schedule update was successful 10023 + * @gfp: allocation flags 10024 + * 10025 + * This function notifies user space that a deferred local NAN schedule update 10026 + * (requested with %NL80211_ATTR_NAN_SCHED_DEFERRED) has been completed. 10027 + */ 10028 + void cfg80211_nan_sched_update_done(struct wireless_dev *wdev, bool success, 10029 + gfp_t gfp); 10030 + 10225 10031 /* ethtool helper */ 10226 10032 void cfg80211_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info); 10227 10033 ··· 10573 10355 void cfg80211_nan_cluster_joined(struct wireless_dev *wdev, 10574 10356 const u8 *cluster_id, bool new_cluster, 10575 10357 gfp_t gfp); 10358 + 10359 + /** 10360 + * cfg80211_nan_ulw_update - Notify user space about ULW update 10361 + * @wdev: Pointer to the wireless device structure 10362 + * @ulw: Pointer to the ULW blob data 10363 + * @ulw_len: Length of the ULW blob in bytes 10364 + * @gfp: Memory allocation flags 10365 + * 10366 + * This function is used by drivers to notify user space when the device's 10367 + * ULW (Unaligned Schedule) blob has been updated. User space can use this 10368 + * blob to attach to frames sent to peers. 10369 + */ 10370 + void cfg80211_nan_ulw_update(struct wireless_dev *wdev, 10371 + const u8 *ulw, size_t ulw_len, gfp_t gfp); 10372 + 10373 + /** 10374 + * cfg80211_nan_channel_evac - Notify user space about NAN channel evacuation 10375 + * @wdev: Pointer to the wireless device structure 10376 + * @chandef: Pointer to the channel definition of the NAN channel that was 10377 + * evacuated 10378 + * @gfp: Memory allocation flags 10379 + * 10380 + * This function is used by drivers to notify user space when a NAN 10381 + * channel has been evacuated (i.e. ULWed) due to channel resource conflicts 10382 + * with other interfaces. 10383 + * This can happen when another interface sharing the channel resource with NAN 10384 + * needs to move to a different channel (e.g. due to channel switch or link 10385 + * switch). User space may reconfigure the local schedule to exclude the 10386 + * evacuated channel. 10387 + */ 10388 + void cfg80211_nan_channel_evac(struct wireless_dev *wdev, 10389 + const struct cfg80211_chan_def *chandef, 10390 + gfp_t gfp); 10576 10391 10577 10392 #ifdef CONFIG_CFG80211_DEBUGFS 10578 10393 /**
+229 -3
include/uapi/linux/nl80211.h
··· 906 906 * @NL80211_CMD_UNEXPECTED_FRAME: Used by an application controlling an AP 907 907 * (or GO) interface (i.e. hostapd) to ask for unexpected frames to 908 908 * implement sending deauth to stations that send unexpected class 3 909 - * frames. Also used as the event sent by the kernel when such a frame 910 - * is received. 909 + * frames. For NAN_DATA interfaces, this is used to report frames from 910 + * unknown peers (A2 not assigned to any active NDP). 911 + * Also used as the event sent by the kernel when such a frame is received. 911 912 * For the event, the %NL80211_ATTR_MAC attribute carries the TA and 912 913 * other attributes like the interface index are present. 913 914 * If used as the command it must have an interface index and you can ··· 1368 1367 * %NL80211_ATTR_INCUMBENT_SIGNAL_INTERFERENCE_BITMAP. The current channel 1369 1368 * definition is also sent. 1370 1369 * 1370 + * @NL80211_CMD_NAN_SET_LOCAL_SCHED: Set the local NAN schedule. NAN must be 1371 + * operational (%NL80211_CMD_START_NAN was executed). Must contain 1372 + * %NL80211_ATTR_NAN_TIME_SLOTS and %NL80211_ATTR_NAN_AVAIL_BLOB, but 1373 + * %NL80211_ATTR_NAN_CHANNEL is optional (for example in case of a channel 1374 + * removal, that channel won't be provided). 1375 + * If %NL80211_ATTR_NAN_SCHED_DEFERRED is set, the command is a request 1376 + * from the device to perform an announced schedule update. See 1377 + * %NL80211_ATTR_NAN_SCHED_DEFERRED for more details. 1378 + * If not set, the schedule should be applied immediately. 1379 + * @NL80211_CMD_NAN_SCHED_UPDATE_DONE: Event sent to user space to notify that 1380 + * a deferred local NAN schedule update (requested with 1381 + * %NL80211_CMD_NAN_SET_LOCAL_SCHED and %NL80211_ATTR_NAN_SCHED_DEFERRED) 1382 + * has been completed. The presence of %NL80211_ATTR_NAN_SCHED_UPDATE_SUCCESS 1383 + * indicates that the update was successful. 1384 + * @NL80211_CMD_NAN_SET_PEER_SCHED: Set the peer NAN schedule. NAN 1385 + * must be operational (%NL80211_CMD_START_NAN was executed). 1386 + * Required attributes: %NL80211_ATTR_MAC (peer NMI address) and 1387 + * %NL80211_ATTR_NAN_COMMITTED_DW. 1388 + * Optionally, the full schedule can be provided by including all of: 1389 + * %NL80211_ATTR_NAN_SEQ_ID, %NL80211_ATTR_NAN_CHANNEL (one or more), and 1390 + * %NL80211_ATTR_NAN_PEER_MAPS (see &enum nl80211_nan_peer_map_attrs). 1391 + * If any of these three optional attributes is provided, all three must 1392 + * be provided. 1393 + * Each peer channel must be compatible with at least one local channel 1394 + * set by %NL80211_CMD_SET_LOCAL_NAN_SCHED. Different maps must not 1395 + * contain compatible channels. 1396 + * For single-radio devices (n_radio <= 1), different maps must not 1397 + * schedule the same time slot, as the device cannot operate on multiple 1398 + * channels simultaneously. 1399 + * When updating an existing peer schedule, the full new schedule must be 1400 + * provided - partial updates are not supported. The new schedule will 1401 + * completely replace the previous one. 1402 + * The peer schedule is automatically removed when the NMI station is 1403 + * removed. 1404 + * @NL80211_CMD_NAN_ULW_UPDATE: Notification from the driver to user space 1405 + * with the updated ULW blob of the device. User space can use this blob 1406 + * to attach to frames sent to peers. This notification contains 1407 + * %NL80211_ATTR_NAN_ULW with the ULW blob. 1408 + * @NL80211_CMD_NAN_CHANNEL_EVAC: Notification to indicate that a NAN 1409 + * channel has been evacuated due to resource conflicts with other 1410 + * interfaces. This can happen when another interface sharing the channel 1411 + * resource with NAN needs to move to a different channel (e.g., channel 1412 + * switch or link switch on a BSS interface). 1413 + * The notification contains %NL80211_ATTR_NAN_CHANNEL attribute 1414 + * identifying the evacuated channel. 1415 + * User space may reconfigure the local schedule in response to this 1416 + * notification. 1371 1417 * @NL80211_CMD_MAX: highest used command number 1372 1418 * @__NL80211_CMD_AFTER_LAST: internal use 1373 1419 */ ··· 1679 1631 NL80211_CMD_NAN_CLUSTER_JOINED, 1680 1632 1681 1633 NL80211_CMD_INCUMBENT_SIGNAL_DETECT, 1634 + 1635 + NL80211_CMD_NAN_SET_LOCAL_SCHED, 1636 + 1637 + NL80211_CMD_NAN_SCHED_UPDATE_DONE, 1638 + 1639 + NL80211_CMD_NAN_SET_PEER_SCHED, 1640 + 1641 + NL80211_CMD_NAN_ULW_UPDATE, 1642 + 1643 + NL80211_CMD_NAN_CHANNEL_EVAC, 1682 1644 1683 1645 /* add new commands above here */ 1684 1646 ··· 2717 2659 * a flow is assigned on each round of the DRR scheduler. 2718 2660 * @NL80211_ATTR_HE_CAPABILITY: HE Capability information element (from 2719 2661 * association request when used with NL80211_CMD_NEW_STATION). Can be set 2720 - * only if %NL80211_STA_FLAG_WME is set. 2662 + * only if %NL80211_STA_FLAG_WME is set (except for NAN, which uses WME 2663 + * anyway). 2721 2664 * 2722 2665 * @NL80211_ATTR_FTM_RESPONDER: nested attribute which user-space can include 2723 2666 * in %NL80211_CMD_START_AP or %NL80211_CMD_SET_BEACON for fine timing ··· 3050 2991 * @NL80211_ATTR_DISABLE_UHR: Force UHR capable interfaces to disable 3051 2992 * this feature during association. This is a flag attribute. 3052 2993 * Currently only supported in mac80211 drivers. 2994 + * @NL80211_ATTR_NAN_CHANNEL: This is a nested attribute. There can be multiple 2995 + * attributes of this type, each one represents a channel definition and 2996 + * consists of top-level attributes like %NL80211_ATTR_WIPHY_FREQ. 2997 + * When used with %NL80211_CMD_NAN_SET_LOCAL_SCHED, it specifies 2998 + * the channel definitions on which the radio needs to operate during 2999 + * specific time slots. All of the channel definitions should be mutually 3000 + * incompatible. With this command, %NL80211_ATTR_NAN_CHANNEL_ENTRY and 3001 + * %NL80211_ATTR_NAN_RX_NSS are mandatory. 3002 + * When used with %NL80211_CMD_NAN_SET_PEER_SCHED, it configures the 3003 + * peer NAN channels. In that case, the channel definitions can be 3004 + * compatible to each other, or even identical just with different RX NSS. 3005 + * With this command, %NL80211_ATTR_NAN_CHANNEL_ENTRY and 3006 + * %NL80211_ATTR_NAN_RX_NSS are mandatory. 3007 + * The number of channels should fit the current configuration of channels 3008 + * and the possible interface combinations. 3009 + * If an existing NAN channel is changed but the chandef isn't, the 3010 + * channel entry must also remain unchanged. 3011 + * When used with %NL80211_CMD_NAN_CHANNEL_EVAC, this identifies the 3012 + * channels that were evacuated. 3013 + * @NL80211_ATTR_NAN_CHANNEL_ENTRY: a byte array of 6 bytes. contains the 3014 + * Channel Entry as defined in Wi-Fi Aware (TM) 4.0 specification Table 3015 + * 100 (Channel Entry format for the NAN Availability attribute). 3016 + * @NL80211_ATTR_NAN_RX_NSS: (u8) RX NSS used for a NAN channel. This is 3017 + * used with %NL80211_ATTR_NAN_CHANNEL when configuring NAN channels with 3018 + * %NL80211_CMD_NAN_SET_LOCAL_SCHED or %NL80211_CMD_NAN_SET_PEER_SCHED. 3019 + * @NL80211_ATTR_NAN_TIME_SLOTS: an array of u8 values and 32 cells. each value 3020 + * maps a time slot to the chandef on which the radio should operate on in 3021 + * that time. %NL80211_NAN_SCHED_NOT_AVAIL_SLOT indicates unscheduled. 3022 + * The chandef is represented using its index, where the index is the 3023 + * sequential number of the %NL80211_ATTR_NAN_CHANNEL attribute within all 3024 + * the attributes of this type. 3025 + * Each slots spans over 16TUs, hence the entire schedule spans over 3026 + * 512TUs. Other slot durations and periods are currently not supported. 3027 + * @NL80211_ATTR_NAN_AVAIL_BLOB: (Binary) The NAN Availability attribute blob, 3028 + * including the attribute header, as defined in Wi-Fi Aware (TM) 4.0 3029 + * specification Table 93 (NAN Availability attribute format). Required with 3030 + * %NL80211_CMD_NAN_SET_LOCAL_SCHED to provide the raw NAN Availability 3031 + * attribute. Used by the device to publish Schedule Update NAFs. 3032 + * @NL80211_ATTR_NAN_SCHED_DEFERRED: Flag attribute used with 3033 + * %NL80211_CMD_NAN_SET_LOCAL_SCHED. When present, the command is a 3034 + * request from the device to perform an announced schedule update. This 3035 + * means that it needs to send the updated NAN availability to the peers, 3036 + * and do the actual switch on the right time (i.e. at the end of the slot 3037 + * after the slot in which the updated NAN Availability was sent). Since 3038 + * the slots management is done in the device, the update to the peers 3039 + * needs to be sent by the device, so it knows the actual switch time. 3040 + * If the flag is not set, the schedule should be applied immediately. 3041 + * When this flag is set, the total number of NAN channels from both the 3042 + * old and new schedules must not exceed the allowed number of local NAN 3043 + * channels, because with deferred scheduling the old channels cannot be 3044 + * removed before adding the new ones to free up space. 3045 + * @NL80211_ATTR_NAN_SCHED_UPDATE_SUCCESS: flag attribute used with 3046 + * %NL80211_CMD_NAN_SCHED_UPDATE_DONE to indicate that the deferred 3047 + * schedule update completed successfully. If this flag is not present, 3048 + * the update failed. 3049 + * @NL80211_ATTR_NAN_NMI_MAC: The address of the NMI station to which this NDI 3050 + * station belongs. Used with %NL80211_CMD_NEW_STATION when adding an NDI 3051 + * station. 3052 + * @NL80211_ATTR_NAN_ULW: (Binary) The initial ULW(s) as published by the 3053 + * peer, as defined in the Wi-Fi Aware (TM) 4.0 specification Table 109 3054 + * (Unaligned Schedule attribute format). Used to configure the device 3055 + * with the initial ULW(s) of a peer, before the device starts tracking it. 3056 + * @NL80211_ATTR_NAN_COMMITTED_DW: (u16) The committed DW as published by the 3057 + * peer, as defined in the Wi-Fi Aware (TM) 4.0 specification Table 80 3058 + * (Committed DW Information field format). 3059 + * @NL80211_ATTR_NAN_SEQ_ID: (u8) The sequence ID of the peer schedule that 3060 + * %NL80211_CMD_NAN_SET_PEER_SCHED defines. The device follows the 3061 + * sequence ID in the frames to identify newer schedules. Once a schedule 3062 + * with a higher sequence ID is received, the device may stop communicating 3063 + * with that peer until a new peer schedule with a matching sequence ID is 3064 + * received. 3065 + * @NL80211_ATTR_NAN_MAX_CHAN_SWITCH_TIME: (u16) The maximum channel switch 3066 + * time, in microseconds. 3067 + * @NL80211_ATTR_NAN_PEER_MAPS: Nested array of peer schedule maps. 3068 + * Used with %NL80211_CMD_NAN_SET_PEER_SCHED. Contains up to 2 entries, 3069 + * each containing nested attributes from &enum nl80211_nan_peer_map_attrs. 3053 3070 * 3054 3071 * @NL80211_ATTR_INCUMBENT_SIGNAL_INTERFERENCE_BITMAP: u32 attribute specifying 3055 3072 * the signal interference bitmap detected on the operating bandwidth for ··· 3717 3582 3718 3583 NL80211_ATTR_UHR_OPERATION, 3719 3584 3585 + NL80211_ATTR_NAN_CHANNEL, 3586 + NL80211_ATTR_NAN_CHANNEL_ENTRY, 3587 + NL80211_ATTR_NAN_TIME_SLOTS, 3588 + NL80211_ATTR_NAN_RX_NSS, 3589 + NL80211_ATTR_NAN_AVAIL_BLOB, 3590 + NL80211_ATTR_NAN_SCHED_DEFERRED, 3591 + NL80211_ATTR_NAN_SCHED_UPDATE_SUCCESS, 3592 + 3593 + NL80211_ATTR_NAN_NMI_MAC, 3594 + 3595 + NL80211_ATTR_NAN_ULW, 3596 + NL80211_ATTR_NAN_COMMITTED_DW, 3597 + NL80211_ATTR_NAN_SEQ_ID, 3598 + NL80211_ATTR_NAN_MAX_CHAN_SWITCH_TIME, 3599 + NL80211_ATTR_NAN_PEER_MAPS, 3600 + 3720 3601 /* add attributes here, update the policy in nl80211.c */ 3721 3602 3722 3603 __NL80211_ATTR_AFTER_LAST, ··· 3826 3675 * @NL80211_IFTYPE_OCB: Outside Context of a BSS 3827 3676 * This mode corresponds to the MIB variable dot11OCBActivated=true 3828 3677 * @NL80211_IFTYPE_NAN: NAN device interface type (not a netdev) 3678 + * @NL80211_IFTYPE_NAN_DATA: NAN data interface type (netdev); NAN data 3679 + * interfaces can only be brought up (IFF_UP) when a NAN interface 3680 + * already exists and NAN has been started (using %NL80211_CMD_START_NAN). 3829 3681 * @NL80211_IFTYPE_MAX: highest interface type number currently defined 3830 3682 * @NUM_NL80211_IFTYPES: number of defined interface types 3831 3683 * ··· 3850 3696 NL80211_IFTYPE_P2P_DEVICE, 3851 3697 NL80211_IFTYPE_OCB, 3852 3698 NL80211_IFTYPE_NAN, 3699 + NL80211_IFTYPE_NAN_DATA, 3853 3700 3854 3701 /* keep last */ 3855 3702 NUM_NL80211_IFTYPES, ··· 4538 4383 }; 4539 4384 4540 4385 #define NL80211_BAND_ATTR_HT_CAPA NL80211_BAND_ATTR_HT_CAPA 4386 + 4387 + /** 4388 + * enum nl80211_nan_phy_cap_attr - NAN PHY capabilities attributes 4389 + * @__NL80211_NAN_PHY_CAP_ATTR_INVALID: attribute number 0 is reserved 4390 + * @NL80211_NAN_PHY_CAP_ATTR_HT_MCS_SET: 16-byte attribute containing HT MCS set 4391 + * @NL80211_NAN_PHY_CAP_ATTR_HT_CAPA: HT capabilities (u16) 4392 + * @NL80211_NAN_PHY_CAP_ATTR_HT_AMPDU_FACTOR: HT A-MPDU factor (u8) 4393 + * @NL80211_NAN_PHY_CAP_ATTR_HT_AMPDU_DENSITY: HT A-MPDU density (u8) 4394 + * @NL80211_NAN_PHY_CAP_ATTR_VHT_MCS_SET: 8-byte attribute containing VHT MCS set 4395 + * @NL80211_NAN_PHY_CAP_ATTR_VHT_CAPA: VHT capabilities (u32) 4396 + * @NL80211_NAN_PHY_CAP_ATTR_HE_MAC: HE MAC capabilities 4397 + * @NL80211_NAN_PHY_CAP_ATTR_HE_PHY: HE PHY capabilities 4398 + * @NL80211_NAN_PHY_CAP_ATTR_HE_MCS_SET: HE supported NSS/MCS combinations 4399 + * @NL80211_NAN_PHY_CAP_ATTR_HE_PPE: HE PPE thresholds 4400 + * @NL80211_NAN_PHY_CAP_ATTR_MAX: highest NAN PHY cap attribute number 4401 + * @__NL80211_NAN_PHY_CAP_ATTR_AFTER_LAST: internal use 4402 + */ 4403 + enum nl80211_nan_phy_cap_attr { 4404 + __NL80211_NAN_PHY_CAP_ATTR_INVALID, 4405 + 4406 + /* HT capabilities */ 4407 + NL80211_NAN_PHY_CAP_ATTR_HT_MCS_SET, 4408 + NL80211_NAN_PHY_CAP_ATTR_HT_CAPA, 4409 + NL80211_NAN_PHY_CAP_ATTR_HT_AMPDU_FACTOR, 4410 + NL80211_NAN_PHY_CAP_ATTR_HT_AMPDU_DENSITY, 4411 + 4412 + /* VHT capabilities */ 4413 + NL80211_NAN_PHY_CAP_ATTR_VHT_MCS_SET, 4414 + NL80211_NAN_PHY_CAP_ATTR_VHT_CAPA, 4415 + 4416 + /* HE capabilities */ 4417 + NL80211_NAN_PHY_CAP_ATTR_HE_MAC, 4418 + NL80211_NAN_PHY_CAP_ATTR_HE_PHY, 4419 + NL80211_NAN_PHY_CAP_ATTR_HE_MCS_SET, 4420 + NL80211_NAN_PHY_CAP_ATTR_HE_PPE, 4421 + 4422 + /* keep last */ 4423 + __NL80211_NAN_PHY_CAP_ATTR_AFTER_LAST, 4424 + NL80211_NAN_PHY_CAP_ATTR_MAX = __NL80211_NAN_PHY_CAP_ATTR_AFTER_LAST - 1 4425 + }; 4541 4426 4542 4427 /** 4543 4428 * enum nl80211_wmm_rule - regulatory wmm rule ··· 8752 8557 * @NL80211_NAN_CAPA_CAPABILITIES: u8 attribute containing the 8753 8558 * capabilities of the device as defined in Wi-Fi Aware (TM) 8754 8559 * specification Table 79 (Capabilities field). 8560 + * @NL80211_NAN_CAPA_PHY: nested attribute containing band-agnostic 8561 + * capabilities for NAN data path. See &enum nl80211_nan_phy_cap_attr. 8755 8562 * @__NL80211_NAN_CAPABILITIES_LAST: Internal 8756 8563 * @NL80211_NAN_CAPABILITIES_MAX: Highest NAN capability attribute. 8757 8564 */ ··· 8766 8569 NL80211_NAN_CAPA_NUM_ANTENNAS, 8767 8570 NL80211_NAN_CAPA_MAX_CHANNEL_SWITCH_TIME, 8768 8571 NL80211_NAN_CAPA_CAPABILITIES, 8572 + NL80211_NAN_CAPA_PHY, 8769 8573 /* keep last */ 8770 8574 __NL80211_NAN_CAPABILITIES_LAST, 8771 8575 NL80211_NAN_CAPABILITIES_MAX = __NL80211_NAN_CAPABILITIES_LAST - 1, 8772 8576 }; 8577 + 8578 + /** 8579 + * enum nl80211_nan_peer_map_attrs - NAN peer schedule map attributes 8580 + * 8581 + * Nested attributes used within %NL80211_ATTR_NAN_PEER_MAPS to define 8582 + * individual peer schedule maps. 8583 + * 8584 + * @__NL80211_NAN_PEER_MAP_ATTR_INVALID: Invalid 8585 + * @NL80211_NAN_PEER_MAP_ATTR_MAP_ID: (u8) The map ID for this schedule map. 8586 + * @NL80211_NAN_PEER_MAP_ATTR_TIME_SLOTS: An array of u8 values with 32 cells. 8587 + * Each value maps a time slot to a channel index within the schedule's 8588 + * channel list (%NL80211_ATTR_NAN_CHANNEL attributes). 8589 + * %NL80211_NAN_SCHED_NOT_AVAIL_SLOT indicates unscheduled. 8590 + * @__NL80211_NAN_PEER_MAP_ATTR_LAST: Internal 8591 + * @NL80211_NAN_PEER_MAP_ATTR_MAX: Highest peer map attribute 8592 + */ 8593 + enum nl80211_nan_peer_map_attrs { 8594 + __NL80211_NAN_PEER_MAP_ATTR_INVALID, 8595 + 8596 + NL80211_NAN_PEER_MAP_ATTR_MAP_ID, 8597 + NL80211_NAN_PEER_MAP_ATTR_TIME_SLOTS, 8598 + 8599 + /* keep last */ 8600 + __NL80211_NAN_PEER_MAP_ATTR_LAST, 8601 + NL80211_NAN_PEER_MAP_ATTR_MAX = __NL80211_NAN_PEER_MAP_ATTR_LAST - 1, 8602 + }; 8603 + 8604 + #define NL80211_NAN_SCHED_NOT_AVAIL_SLOT 0xff 8773 8605 8774 8606 #endif /* __LINUX_NL80211_H */
+3 -1
net/mac80211/cfg.c
··· 718 718 case NL80211_IFTYPE_P2P_CLIENT: 719 719 case NL80211_IFTYPE_P2P_GO: 720 720 case NL80211_IFTYPE_OCB: 721 + case NL80211_IFTYPE_NAN_DATA: 721 722 /* shouldn't happen */ 722 723 WARN_ON_ONCE(1); 723 724 break; ··· 2141 2140 return -EINVAL; 2142 2141 2143 2142 if (params->ht_capa) 2144 - ieee80211_ht_cap_ie_to_sta_ht_cap(sdata, sband, 2143 + ieee80211_ht_cap_ie_to_sta_ht_cap(sdata, &sband->ht_cap, 2145 2144 params->ht_capa, link_sta); 2146 2145 2147 2146 /* VHT can override some HT caps such as the A-MSDU max length */ 2148 2147 if (params->vht_capa) 2149 2148 ieee80211_vht_cap_ie_to_sta_vht_cap(sdata, sband, 2149 + &sband->vht_cap, 2150 2150 params->vht_capa, NULL, 2151 2151 link_sta); 2152 2152
+123 -76
net/mac80211/chan.c
··· 166 166 for_each_chanctx_user_all(local, ctx, &iter) 167 167 num++; 168 168 169 + /* 170 + * This ctx is in the process of getting used, 171 + * take it into consideration 172 + */ 173 + if (ctx->will_be_used) 174 + num++; 175 + 169 176 return num; 170 177 } 171 178 ··· 455 448 } 456 449 457 450 static enum nl80211_chan_width 451 + ieee80211_get_width_of_link(struct ieee80211_link_data *link) 452 + { 453 + struct ieee80211_local *local = link->sdata->local; 454 + 455 + switch (link->sdata->vif.type) { 456 + case NL80211_IFTYPE_STATION: 457 + if (!link->sdata->vif.cfg.assoc) { 458 + /* 459 + * The AP's sta->bandwidth may not yet be set 460 + * at this point (pre-association), so simply 461 + * take the width from the chandef. We cannot 462 + * have TDLS peers yet (only after association). 463 + */ 464 + return link->conf->chanreq.oper.width; 465 + } 466 + /* 467 + * otherwise just use min_def like in AP, depending on what 468 + * we currently think the AP STA (and possibly TDLS peers) 469 + * require(s) 470 + */ 471 + fallthrough; 472 + case NL80211_IFTYPE_AP: 473 + case NL80211_IFTYPE_AP_VLAN: 474 + return ieee80211_get_max_required_bw(link); 475 + case NL80211_IFTYPE_P2P_DEVICE: 476 + case NL80211_IFTYPE_NAN: 477 + break; 478 + case NL80211_IFTYPE_MONITOR: 479 + WARN_ON_ONCE(!ieee80211_hw_check(&local->hw, 480 + NO_VIRTUAL_MONITOR)); 481 + fallthrough; 482 + case NL80211_IFTYPE_ADHOC: 483 + case NL80211_IFTYPE_MESH_POINT: 484 + case NL80211_IFTYPE_OCB: 485 + return link->conf->chanreq.oper.width; 486 + case NL80211_IFTYPE_WDS: 487 + case NL80211_IFTYPE_UNSPECIFIED: 488 + case NUM_NL80211_IFTYPES: 489 + case NL80211_IFTYPE_P2P_CLIENT: 490 + case NL80211_IFTYPE_P2P_GO: 491 + case NL80211_IFTYPE_NAN_DATA: 492 + WARN_ON_ONCE(1); 493 + break; 494 + } 495 + 496 + /* Take the lowest possible, so it won't change the max width */ 497 + return NL80211_CHAN_WIDTH_20_NOHT; 498 + } 499 + 500 + static enum nl80211_chan_width 458 501 ieee80211_get_chanctx_max_required_bw(struct ieee80211_local *local, 459 502 struct ieee80211_chanctx *ctx, 460 503 struct ieee80211_link_data *rsvd_for, 461 504 bool check_reserved) 462 505 { 463 - struct ieee80211_sub_if_data *sdata; 464 - struct ieee80211_link_data *link; 465 506 enum nl80211_chan_width max_bw = NL80211_CHAN_WIDTH_20_NOHT; 507 + struct ieee80211_chanctx_user_iter iter; 508 + struct ieee80211_sub_if_data *sdata; 509 + enum nl80211_chan_width width; 466 510 467 511 if (WARN_ON(check_reserved && rsvd_for)) 468 512 return ctx->conf.def.width; 469 513 470 - for_each_sdata_link(local, link) { 471 - enum nl80211_chan_width width = NL80211_CHAN_WIDTH_20_NOHT; 472 - 473 - if (check_reserved) { 474 - if (link->reserved_chanctx != ctx) 475 - continue; 476 - } else if (link != rsvd_for && 477 - rcu_access_pointer(link->conf->chanctx_conf) != &ctx->conf) 478 - continue; 479 - 480 - switch (link->sdata->vif.type) { 481 - case NL80211_IFTYPE_STATION: 482 - if (!link->sdata->vif.cfg.assoc) { 483 - /* 484 - * The AP's sta->bandwidth may not yet be set 485 - * at this point (pre-association), so simply 486 - * take the width from the chandef. We cannot 487 - * have TDLS peers yet (only after association). 488 - */ 489 - width = link->conf->chanreq.oper.width; 490 - break; 491 - } 492 - /* 493 - * otherwise just use min_def like in AP, depending on what 494 - * we currently think the AP STA (and possibly TDLS peers) 495 - * require(s) 496 - */ 497 - fallthrough; 498 - case NL80211_IFTYPE_AP: 499 - case NL80211_IFTYPE_AP_VLAN: 500 - width = ieee80211_get_max_required_bw(link); 501 - break; 502 - case NL80211_IFTYPE_P2P_DEVICE: 503 - case NL80211_IFTYPE_NAN: 504 - continue; 505 - case NL80211_IFTYPE_MONITOR: 506 - WARN_ON_ONCE(!ieee80211_hw_check(&local->hw, 507 - NO_VIRTUAL_MONITOR)); 508 - fallthrough; 509 - case NL80211_IFTYPE_ADHOC: 510 - case NL80211_IFTYPE_MESH_POINT: 511 - case NL80211_IFTYPE_OCB: 512 - width = link->conf->chanreq.oper.width; 513 - break; 514 - case NL80211_IFTYPE_WDS: 515 - case NL80211_IFTYPE_UNSPECIFIED: 516 - case NUM_NL80211_IFTYPES: 517 - case NL80211_IFTYPE_P2P_CLIENT: 518 - case NL80211_IFTYPE_P2P_GO: 519 - WARN_ON_ONCE(1); 514 + /* When this is true we only care about the reserving links */ 515 + if (check_reserved) { 516 + for_each_chanctx_user_reserved(local, ctx, &iter) { 517 + width = ieee80211_get_width_of_link(iter.link); 518 + max_bw = max(max_bw, width); 520 519 } 520 + goto check_monitor; 521 + } 521 522 523 + /* Consider all assigned links */ 524 + for_each_chanctx_user_assigned(local, ctx, &iter) { 525 + width = ieee80211_get_width_of_link(iter.link); 522 526 max_bw = max(max_bw, width); 523 527 } 524 528 529 + if (!rsvd_for || 530 + rsvd_for->sdata == rcu_access_pointer(local->monitor_sdata)) 531 + goto check_monitor; 532 + 533 + /* Consider the link for which this chanctx is reserved/going to be assigned */ 534 + width = ieee80211_get_width_of_link(rsvd_for); 535 + max_bw = max(max_bw, width); 536 + 537 + check_monitor: 525 538 /* use the configured bandwidth in case of monitor interface */ 526 539 sdata = wiphy_dereference(local->hw.wiphy, local->monitor_sdata); 527 540 if (sdata && ··· 779 752 _ieee80211_change_chanctx(local, ctx, old_ctx, chanreq, NULL); 780 753 } 781 754 782 - /* Note: if successful, the returned chanctx is reserved for the link */ 755 + /* Note: if successful, the returned chanctx will_be_used flag is set */ 783 756 static struct ieee80211_chanctx * 784 757 ieee80211_find_chanctx(struct ieee80211_local *local, 785 - struct ieee80211_link_data *link, 786 758 const struct ieee80211_chan_req *chanreq, 787 759 enum ieee80211_chanctx_mode mode) 788 760 { ··· 791 765 lockdep_assert_wiphy(local->hw.wiphy); 792 766 793 767 if (mode == IEEE80211_CHANCTX_EXCLUSIVE) 794 - return NULL; 795 - 796 - if (WARN_ON(link->reserved_chanctx)) 797 768 return NULL; 798 769 799 770 list_for_each_entry(ctx, &local->chanctx_list, list) { ··· 813 790 continue; 814 791 815 792 /* 816 - * Reserve the chanctx temporarily, as the driver might change 793 + * Mark the chanctx as will be used, as the driver might change 817 794 * active links during callbacks we make into it below and/or 818 795 * later during assignment, which could (otherwise) cause the 819 796 * context to actually be removed. 820 797 */ 821 - link->reserved_chanctx = ctx; 798 + ctx->will_be_used = true; 822 799 823 800 ieee80211_change_chanctx(local, ctx, ctx, compat); 824 801 ··· 1461 1438 case NL80211_IFTYPE_P2P_GO: 1462 1439 case NL80211_IFTYPE_P2P_DEVICE: 1463 1440 case NL80211_IFTYPE_NAN: 1441 + case NL80211_IFTYPE_NAN_DATA: 1464 1442 case NUM_NL80211_IFTYPES: 1465 1443 WARN_ON(1); 1466 1444 break; ··· 2035 2011 ieee80211_vif_use_reserved_switch(local); 2036 2012 } 2037 2013 2014 + static struct ieee80211_chanctx * 2015 + ieee80211_find_or_create_chanctx(struct ieee80211_sub_if_data *sdata, 2016 + const struct ieee80211_chan_req *chanreq, 2017 + enum ieee80211_chanctx_mode mode, 2018 + bool assign_on_failure, 2019 + bool *reused_ctx) 2020 + { 2021 + struct ieee80211_local *local = sdata->local; 2022 + struct ieee80211_chanctx *ctx; 2023 + int radio_idx; 2024 + 2025 + lockdep_assert_wiphy(local->hw.wiphy); 2026 + 2027 + ctx = ieee80211_find_chanctx(local, chanreq, mode); 2028 + if (ctx) { 2029 + *reused_ctx = true; 2030 + return ctx; 2031 + } 2032 + 2033 + *reused_ctx = false; 2034 + 2035 + if (!ieee80211_find_available_radio(local, chanreq, 2036 + sdata->wdev.radio_mask, 2037 + &radio_idx)) 2038 + return ERR_PTR(-EBUSY); 2039 + 2040 + return ieee80211_new_chanctx(local, chanreq, mode, 2041 + assign_on_failure, radio_idx); 2042 + } 2043 + 2038 2044 int _ieee80211_link_use_channel(struct ieee80211_link_data *link, 2039 2045 const struct ieee80211_chan_req *chanreq, 2040 2046 enum ieee80211_chanctx_mode mode, ··· 2074 2020 struct ieee80211_local *local = sdata->local; 2075 2021 struct ieee80211_chanctx *ctx; 2076 2022 u8 radar_detect_width = 0; 2077 - bool reserved = false; 2078 - int radio_idx; 2023 + bool reused_ctx = false; 2079 2024 int ret; 2080 2025 2081 2026 lockdep_assert_wiphy(local->hw.wiphy); ··· 2102 2049 if (!local->in_reconfig) 2103 2050 __ieee80211_link_release_channel(link, false); 2104 2051 2105 - ctx = ieee80211_find_chanctx(local, link, chanreq, mode); 2106 - /* Note: context is now reserved */ 2107 - if (ctx) 2108 - reserved = true; 2109 - else if (!ieee80211_find_available_radio(local, chanreq, 2110 - sdata->wdev.radio_mask, 2111 - &radio_idx)) 2112 - ctx = ERR_PTR(-EBUSY); 2113 - else 2114 - ctx = ieee80211_new_chanctx(local, chanreq, mode, 2115 - assign_on_failure, radio_idx); 2052 + ctx = ieee80211_find_or_create_chanctx(sdata, chanreq, mode, 2053 + assign_on_failure, &reused_ctx); 2116 2054 if (IS_ERR(ctx)) { 2117 2055 ret = PTR_ERR(ctx); 2118 2056 goto out; ··· 2113 2069 2114 2070 ret = ieee80211_assign_link_chanctx(link, ctx, assign_on_failure); 2115 2071 2116 - if (reserved) { 2117 - /* remove reservation */ 2118 - WARN_ON(link->reserved_chanctx != ctx); 2119 - link->reserved_chanctx = NULL; 2072 + /* 2073 + * In case an existing channel context is being used, we marked it as 2074 + * will_be_used, now that it is assigned - clear this indication 2075 + */ 2076 + if (reused_ctx) { 2077 + WARN_ON(!ctx->will_be_used); 2078 + ctx->will_be_used = false; 2120 2079 } 2121 2080 2122 2081 if (ret) {
+24 -13
net/mac80211/he.c
··· 108 108 } 109 109 110 110 void 111 - ieee80211_he_cap_ie_to_sta_he_cap(struct ieee80211_sub_if_data *sdata, 112 - struct ieee80211_supported_band *sband, 113 - const u8 *he_cap_ie, u8 he_cap_len, 114 - const struct ieee80211_he_6ghz_capa *he_6ghz_capa, 115 - struct link_sta_info *link_sta) 111 + _ieee80211_he_cap_ie_to_sta_he_cap(struct ieee80211_sub_if_data *sdata, 112 + const struct ieee80211_sta_he_cap *own_he_cap_ptr, 113 + const u8 *he_cap_ie, u8 he_cap_len, 114 + const struct ieee80211_he_6ghz_capa *he_6ghz_capa, 115 + struct link_sta_info *link_sta) 116 116 { 117 117 struct ieee80211_sta_he_cap *he_cap = &link_sta->pub->he_cap; 118 - const struct ieee80211_sta_he_cap *own_he_cap_ptr; 119 118 struct ieee80211_sta_he_cap own_he_cap; 120 119 struct ieee80211_he_cap_elem *he_cap_ie_elem = (void *)he_cap_ie; 121 120 u8 he_ppe_size; ··· 124 125 125 126 memset(he_cap, 0, sizeof(*he_cap)); 126 127 127 - if (!he_cap_ie) 128 - return; 129 - 130 - own_he_cap_ptr = 131 - ieee80211_get_he_iftype_cap_vif(sband, &sdata->vif); 132 - if (!own_he_cap_ptr) 128 + if (!he_cap_ie || !own_he_cap_ptr || !own_he_cap_ptr->has_he) 133 129 return; 134 130 135 131 own_he_cap = *own_he_cap_ptr; ··· 158 164 link_sta->cur_max_bandwidth = ieee80211_sta_cap_rx_bw(link_sta); 159 165 link_sta->pub->bandwidth = ieee80211_sta_cur_vht_bw(link_sta); 160 166 161 - if (sband->band == NL80211_BAND_6GHZ && he_6ghz_capa) 167 + if (he_6ghz_capa) 162 168 ieee80211_update_from_he_6ghz_capa(he_6ghz_capa, link_sta); 163 169 164 170 ieee80211_he_mcs_intersection(&own_he_cap.he_mcs_nss_supp.rx_mcs_80, ··· 199 205 he_cap->he_cap_elem.phy_cap_info[0] &= 200 206 ~IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G; 201 207 } 208 + } 209 + 210 + void 211 + ieee80211_he_cap_ie_to_sta_he_cap(struct ieee80211_sub_if_data *sdata, 212 + struct ieee80211_supported_band *sband, 213 + const u8 *he_cap_ie, u8 he_cap_len, 214 + const struct ieee80211_he_6ghz_capa *he_6ghz_capa, 215 + struct link_sta_info *link_sta) 216 + { 217 + const struct ieee80211_sta_he_cap *own_he_cap = 218 + ieee80211_get_he_iftype_cap_vif(sband, &sdata->vif); 219 + 220 + _ieee80211_he_cap_ie_to_sta_he_cap(sdata, own_he_cap, he_cap_ie, 221 + he_cap_len, 222 + (sband->band == NL80211_BAND_6GHZ) ? 223 + he_6ghz_capa : NULL, 224 + link_sta); 202 225 } 203 226 204 227 void
+3 -3
net/mac80211/ht.c
··· 136 136 137 137 138 138 bool ieee80211_ht_cap_ie_to_sta_ht_cap(struct ieee80211_sub_if_data *sdata, 139 - struct ieee80211_supported_band *sband, 139 + const struct ieee80211_sta_ht_cap *own_cap_ptr, 140 140 const struct ieee80211_ht_cap *ht_cap_ie, 141 141 struct link_sta_info *link_sta) 142 142 { ··· 151 151 152 152 memset(&ht_cap, 0, sizeof(ht_cap)); 153 153 154 - if (!ht_cap_ie || !sband->ht_cap.ht_supported) 154 + if (!ht_cap_ie || !own_cap_ptr->ht_supported) 155 155 goto apply; 156 156 157 157 ht_cap.ht_supported = true; 158 158 159 - own_cap = sband->ht_cap; 159 + own_cap = *own_cap_ptr; 160 160 161 161 /* 162 162 * If user has specified capability over-rides, take care
+3 -1
net/mac80211/ibss.c
··· 1014 1014 ieee80211_chandef_ht_oper(elems->ht_operation, &chandef); 1015 1015 1016 1016 memcpy(&htcap_ie, elems->ht_cap_elem, sizeof(htcap_ie)); 1017 - rates_updated |= ieee80211_ht_cap_ie_to_sta_ht_cap(sdata, sband, 1017 + rates_updated |= ieee80211_ht_cap_ie_to_sta_ht_cap(sdata, 1018 + &sband->ht_cap, 1018 1019 &htcap_ie, 1019 1020 &sta->deflink); 1020 1021 ··· 1034 1033 &chandef); 1035 1034 memcpy(&cap_ie, elems->vht_cap_elem, sizeof(cap_ie)); 1036 1035 ieee80211_vht_cap_ie_to_sta_vht_cap(sdata, sband, 1036 + &sband->vht_cap, 1037 1037 &cap_ie, NULL, 1038 1038 &sta->deflink); 1039 1039 if (memcmp(&cap, &sta->sta.deflink.vht_cap, sizeof(cap)))
+11 -1
net/mac80211/ieee80211_i.h
··· 928 928 929 929 bool radar_detected; 930 930 931 + /* This chanctx is in process of getting used */ 932 + bool will_be_used; 933 + 931 934 /* MUST be last - ends in a flexible-array member. */ 932 935 struct ieee80211_chanctx_conf conf; 933 936 }; ··· 2188 2185 void ieee80211_apply_htcap_overrides(struct ieee80211_sub_if_data *sdata, 2189 2186 struct ieee80211_sta_ht_cap *ht_cap); 2190 2187 bool ieee80211_ht_cap_ie_to_sta_ht_cap(struct ieee80211_sub_if_data *sdata, 2191 - struct ieee80211_supported_band *sband, 2188 + const struct ieee80211_sta_ht_cap *own_cap, 2192 2189 const struct ieee80211_ht_cap *ht_cap_ie, 2193 2190 struct link_sta_info *link_sta); 2194 2191 void ieee80211_send_delba(struct ieee80211_sub_if_data *sdata, ··· 2273 2270 void 2274 2271 ieee80211_vht_cap_ie_to_sta_vht_cap(struct ieee80211_sub_if_data *sdata, 2275 2272 struct ieee80211_supported_band *sband, 2273 + const struct ieee80211_sta_vht_cap *own_vht_cap, 2276 2274 const struct ieee80211_vht_cap *vht_cap_ie, 2277 2275 const struct ieee80211_vht_cap *vht_cap_ie2, 2278 2276 struct link_sta_info *link_sta); ··· 2313 2309 ieee80211_sta_rx_bw_to_chan_width(struct link_sta_info *sta); 2314 2310 2315 2311 /* HE */ 2312 + void 2313 + _ieee80211_he_cap_ie_to_sta_he_cap(struct ieee80211_sub_if_data *sdata, 2314 + const struct ieee80211_sta_he_cap *own_he_cap, 2315 + const u8 *he_cap_ie, u8 he_cap_len, 2316 + const struct ieee80211_he_6ghz_capa *he_6ghz_capa, 2317 + struct link_sta_info *link_sta); 2316 2318 void 2317 2319 ieee80211_he_cap_ie_to_sta_he_cap(struct ieee80211_sub_if_data *sdata, 2318 2320 struct ieee80211_supported_band *sband,
+18 -10
net/mac80211/iface.c
··· 1222 1222 } 1223 1223 } 1224 1224 1225 - set_bit(SDATA_STATE_RUNNING, &sdata->state); 1226 - 1227 1225 ret = ieee80211_check_queues(sdata, NL80211_IFTYPE_MONITOR); 1228 1226 if (ret) { 1229 1227 kfree(sdata); 1230 1228 return ret; 1231 1229 } 1230 + 1231 + set_bit(SDATA_STATE_RUNNING, &sdata->state); 1232 1232 1233 1233 mutex_lock(&local->iflist_mtx); 1234 1234 rcu_assign_pointer(local->monitor_sdata, sdata); ··· 1242 1242 mutex_unlock(&local->iflist_mtx); 1243 1243 synchronize_net(); 1244 1244 drv_remove_interface(local, sdata); 1245 + clear_bit(SDATA_STATE_RUNNING, &sdata->state); 1245 1246 kfree(sdata); 1246 1247 return ret; 1247 1248 } ··· 1361 1360 break; 1362 1361 } 1363 1362 case NL80211_IFTYPE_AP: 1364 - sdata->bss = &sdata->u.ap; 1365 - break; 1366 1363 case NL80211_IFTYPE_MESH_POINT: 1367 1364 case NL80211_IFTYPE_STATION: 1368 1365 case NL80211_IFTYPE_MONITOR: ··· 1368 1369 case NL80211_IFTYPE_P2P_DEVICE: 1369 1370 case NL80211_IFTYPE_OCB: 1370 1371 case NL80211_IFTYPE_NAN: 1372 + case NL80211_IFTYPE_NAN_DATA: 1371 1373 /* no special treatment */ 1372 1374 break; 1373 1375 case NL80211_IFTYPE_UNSPECIFIED: ··· 1386 1386 local->reconfig_failure = false; 1387 1387 1388 1388 res = drv_start(local); 1389 - if (res) 1390 - goto err_del_bss; 1389 + if (res) { 1390 + /* 1391 + * no need to worry about AP_VLAN cleanup since in that 1392 + * case we can't have open_count == 0 1393 + */ 1394 + return res; 1395 + } 1391 1396 ieee80211_led_radio(local, true); 1392 1397 ieee80211_mod_tpt_led_trig(local, 1393 1398 IEEE80211_TPT_LEDTRIG_FL_RADIO, 0); ··· 1463 1458 netif_carrier_on(dev); 1464 1459 list_add_tail_rcu(&sdata->u.mntr.list, &local->mon_list); 1465 1460 break; 1461 + case NL80211_IFTYPE_AP: 1462 + sdata->bss = &sdata->u.ap; 1463 + fallthrough; 1466 1464 default: 1467 1465 if (coming_up) { 1468 1466 ieee80211_del_virtual_monitor(local); ··· 1554 1546 err_stop: 1555 1547 if (!local->open_count) 1556 1548 drv_stop(local, false); 1557 - err_del_bss: 1558 - sdata->bss = NULL; 1559 1549 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) 1560 1550 list_del(&sdata->u.vlan.list); 1561 - /* might already be clear but that doesn't matter */ 1562 - clear_bit(SDATA_STATE_RUNNING, &sdata->state); 1551 + /* Might not be initialized yet, but it is harmless */ 1552 + sdata->bss = NULL; 1563 1553 return res; 1564 1554 } 1565 1555 ··· 1945 1939 case NL80211_IFTYPE_AP_VLAN: 1946 1940 case NL80211_IFTYPE_P2P_DEVICE: 1947 1941 sdata->vif.bss_conf.bssid = sdata->vif.addr; 1942 + break; 1943 + case NL80211_IFTYPE_NAN_DATA: 1948 1944 break; 1949 1945 case NL80211_IFTYPE_UNSPECIFIED: 1950 1946 case NL80211_IFTYPE_WDS:
+2 -1
net/mac80211/mesh_plink.c
··· 450 450 changed |= IEEE80211_RC_SUPP_RATES_CHANGED; 451 451 sta->sta.deflink.supp_rates[sband->band] = rates; 452 452 453 - if (ieee80211_ht_cap_ie_to_sta_ht_cap(sdata, sband, 453 + if (ieee80211_ht_cap_ie_to_sta_ht_cap(sdata, &sband->ht_cap, 454 454 elems->ht_cap_elem, 455 455 &sta->deflink)) 456 456 changed |= IEEE80211_RC_BW_CHANGED; 457 457 458 458 ieee80211_vht_cap_ie_to_sta_vht_cap(sdata, sband, 459 + &sband->vht_cap, 459 460 elems->vht_cap_elem, NULL, 460 461 &sta->deflink); 461 462
+10 -7
net/mac80211/mlme.c
··· 5586 5586 5587 5587 /* Set up internal HT/VHT capabilities */ 5588 5588 if (elems->ht_cap_elem && link->u.mgd.conn.mode >= IEEE80211_CONN_MODE_HT) 5589 - ieee80211_ht_cap_ie_to_sta_ht_cap(sdata, sband, 5589 + ieee80211_ht_cap_ie_to_sta_ht_cap(sdata, &sband->ht_cap, 5590 5590 elems->ht_cap_elem, 5591 5591 link_sta); 5592 5592 ··· 5622 5622 } 5623 5623 5624 5624 ieee80211_vht_cap_ie_to_sta_vht_cap(sdata, sband, 5625 + &sband->vht_cap, 5625 5626 elems->vht_cap_elem, 5626 5627 bss_vht_cap, link_sta); 5627 5628 rcu_read_unlock(); ··· 10459 10458 pos = mgmt->u.action.ml_reconf_resp.variable; 10460 10459 len -= offsetofend(typeof(*mgmt), u.action.ml_reconf_resp); 10461 10460 10462 - /* each status duple is 3 octets */ 10463 - if (len < mgmt->u.action.ml_reconf_resp.count * 3) { 10461 + if (len < mgmt->u.action.ml_reconf_resp.count * 10462 + sizeof(struct ieee80211_ml_reconf_status)) { 10464 10463 sdata_info(sdata, 10465 10464 "mlo: reconf: unexpected len=%zu, count=%u\n", 10466 10465 len, mgmt->u.action.ml_reconf_resp.count); ··· 10469 10468 10470 10469 link_mask = sta_changed_links; 10471 10470 for (i = 0; i < mgmt->u.action.ml_reconf_resp.count; i++) { 10472 - u16 status = get_unaligned_le16(pos + 1); 10471 + struct ieee80211_ml_reconf_status *reconf_status = (void *)pos; 10472 + u16 status = le16_to_cpu(reconf_status->status); 10473 10473 10474 - link_id = *pos; 10474 + link_id = u8_get_bits(reconf_status->info, 10475 + IEEE80211_ML_RECONF_LINK_ID_MASK); 10475 10476 10476 10477 if (!(link_mask & BIT(link_id))) { 10477 10478 sdata_info(sdata, ··· 10508 10505 sdata->u.mgd.reconf.added_links &= ~BIT(link_id); 10509 10506 } 10510 10507 10511 - pos += 3; 10512 - len -= 3; 10508 + pos += sizeof(*reconf_status); 10509 + len -= sizeof(*reconf_status); 10513 10510 } 10514 10511 10515 10512 if (link_mask) {
+2
net/mac80211/rx.c
··· 4607 4607 (ieee80211_is_public_action(hdr, skb->len) || 4608 4608 (ieee80211_is_auth(hdr->frame_control) && 4609 4609 ether_addr_equal(sdata->vif.addr, hdr->addr1))); 4610 + case NL80211_IFTYPE_NAN_DATA: 4611 + return false; 4610 4612 default: 4611 4613 break; 4612 4614 }
+2 -3
net/mac80211/trace.h
··· 1778 1778 SWITCH_ENTRY_ASSIGN(vif.vif_type, vif->type); 1779 1779 SWITCH_ENTRY_ASSIGN(vif.p2p, vif->p2p); 1780 1780 SWITCH_ENTRY_ASSIGN(link_id, link_conf->link_id); 1781 - strncpy(local_vifs[i].vif.vif_name, 1782 - sdata->name, 1783 - sizeof(local_vifs[i].vif.vif_name)); 1781 + strscpy_pad(local_vifs[i].vif.vif_name, 1782 + sdata->name); 1784 1783 SWITCH_ENTRY_ASSIGN(old_chandef.control_freq, 1785 1784 old_ctx->def.chan->center_freq); 1786 1785 SWITCH_ENTRY_ASSIGN(old_chandef.freq_offset,
+1
net/mac80211/util.c
··· 2118 2118 return res; 2119 2119 } 2120 2120 break; 2121 + case NL80211_IFTYPE_NAN_DATA: 2121 2122 case NL80211_IFTYPE_AP_VLAN: 2122 2123 case NL80211_IFTYPE_MONITOR: 2123 2124 case NL80211_IFTYPE_P2P_DEVICE:
+18 -15
net/mac80211/vht.c
··· 4 4 * 5 5 * Portions of this file 6 6 * Copyright(c) 2015 - 2016 Intel Deutschland GmbH 7 - * Copyright (C) 2018-2026 Intel Corporation 7 + * Copyright (C) 2018 - 2026 Intel Corporation 8 8 */ 9 9 10 10 #include <linux/ieee80211.h> ··· 115 115 void 116 116 ieee80211_vht_cap_ie_to_sta_vht_cap(struct ieee80211_sub_if_data *sdata, 117 117 struct ieee80211_supported_band *sband, 118 + const struct ieee80211_sta_vht_cap *own_vht_cap, 118 119 const struct ieee80211_vht_cap *vht_cap_ie, 119 120 const struct ieee80211_vht_cap *vht_cap_ie2, 120 121 struct link_sta_info *link_sta) ··· 123 122 struct ieee80211_sta_vht_cap *vht_cap = &link_sta->pub->vht_cap; 124 123 struct ieee80211_sta_vht_cap own_cap; 125 124 u32 cap_info, i; 126 - bool have_80mhz; 127 125 u32 mpdu_len; 128 126 129 127 memset(vht_cap, 0, sizeof(*vht_cap)); ··· 130 130 if (!link_sta->pub->ht_cap.ht_supported) 131 131 return; 132 132 133 - if (!vht_cap_ie || !sband->vht_cap.vht_supported) 133 + if (!vht_cap_ie || !own_vht_cap->vht_supported) 134 134 return; 135 135 136 - /* Allow VHT if at least one channel on the sband supports 80 MHz */ 137 - have_80mhz = false; 138 - for (i = 0; i < sband->n_channels; i++) { 139 - if (sband->channels[i].flags & (IEEE80211_CHAN_DISABLED | 140 - IEEE80211_CHAN_NO_80MHZ)) 141 - continue; 136 + if (sband) { 137 + /* Allow VHT if at least one channel on the sband supports 80 MHz */ 138 + bool have_80mhz = false; 142 139 143 - have_80mhz = true; 144 - break; 140 + for (i = 0; i < sband->n_channels; i++) { 141 + if (sband->channels[i].flags & (IEEE80211_CHAN_DISABLED | 142 + IEEE80211_CHAN_NO_80MHZ)) 143 + continue; 144 + 145 + have_80mhz = true; 146 + break; 147 + } 148 + 149 + if (!have_80mhz) 150 + return; 145 151 } 146 - 147 - if (!have_80mhz) 148 - return; 149 152 150 153 /* 151 154 * A VHT STA must support 40 MHz, but if we verify that here ··· 159 156 160 157 vht_cap->vht_supported = true; 161 158 162 - own_cap = sband->vht_cap; 159 + own_cap = *own_vht_cap; 163 160 /* 164 161 * If user has specified capability overrides, take care 165 162 * of that if the station we're setting up is the AP that
+4 -2
net/wireless/chan.c
··· 317 317 int step; 318 318 319 319 /* We only do strict verification on 6 GHz */ 320 - if (center < 5955 || center > 7115) 320 + if (center < 5955 || center > 7215) 321 321 return true; 322 322 323 323 bw = nl80211_chan_width_to_mhz(width); ··· 325 325 return false; 326 326 327 327 /* Validate that the channels bw is entirely within the 6 GHz band */ 328 - if (center - bw / 2 < 5945 || center + bw / 2 > 7125) 328 + if (center - bw / 2 < 5945 || center + bw / 2 > 7225) 329 329 return false; 330 330 331 331 /* With 320 MHz the permitted channels overlap */ ··· 816 816 case NL80211_IFTYPE_MONITOR: 817 817 case NL80211_IFTYPE_AP_VLAN: 818 818 case NL80211_IFTYPE_P2P_DEVICE: 819 + case NL80211_IFTYPE_NAN_DATA: 819 820 break; 820 821 case NL80211_IFTYPE_WDS: 821 822 case NL80211_IFTYPE_UNSPECIFIED: ··· 940 939 case NL80211_IFTYPE_P2P_DEVICE: 941 940 /* Can NAN type be considered as beaconing interface? */ 942 941 case NL80211_IFTYPE_NAN: 942 + case NL80211_IFTYPE_NAN_DATA: 943 943 break; 944 944 case NL80211_IFTYPE_UNSPECIFIED: 945 945 case NL80211_IFTYPE_WDS:
+120 -10
net/wireless/core.c
··· 5 5 * Copyright 2006-2010 Johannes Berg <johannes@sipsolutions.net> 6 6 * Copyright 2013-2014 Intel Mobile Communications GmbH 7 7 * Copyright 2015-2017 Intel Deutschland GmbH 8 - * Copyright (C) 2018-2025 Intel Corporation 8 + * Copyright (C) 2018-2026 Intel Corporation 9 9 */ 10 10 11 11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt ··· 254 254 void cfg80211_stop_nan(struct cfg80211_registered_device *rdev, 255 255 struct wireless_dev *wdev) 256 256 { 257 + struct cfg80211_nan_local_sched empty_sched = {}; 258 + 257 259 lockdep_assert_held(&rdev->wiphy.mtx); 258 260 259 261 if (WARN_ON(wdev->iftype != NL80211_IFTYPE_NAN)) ··· 263 261 264 262 if (!wdev_running(wdev)) 265 263 return; 264 + 265 + /* 266 + * If there is a scheduled update pending, mark it as canceled, so the 267 + * empty schedule will be accepted 268 + */ 269 + wdev->u.nan.sched_update_pending = false; 270 + 271 + /* Unschedule all */ 272 + cfg80211_nan_set_local_schedule(rdev, wdev, &empty_sched); 266 273 267 274 rdev_stop_nan(rdev, wdev); 268 275 wdev->is_running = false; ··· 281 270 rdev->opencount--; 282 271 } 283 272 273 + int cfg80211_nan_set_local_schedule(struct cfg80211_registered_device *rdev, 274 + struct wireless_dev *wdev, 275 + struct cfg80211_nan_local_sched *sched) 276 + { 277 + int ret; 278 + 279 + lockdep_assert_held(&rdev->wiphy.mtx); 280 + 281 + if (wdev->iftype != NL80211_IFTYPE_NAN || !wdev_running(wdev)) 282 + return -EINVAL; 283 + 284 + if (wdev->u.nan.sched_update_pending) 285 + return -EBUSY; 286 + 287 + ret = rdev_nan_set_local_sched(rdev, wdev, sched); 288 + if (ret) 289 + return ret; 290 + 291 + wdev->u.nan.sched_update_pending = sched->deferred; 292 + 293 + kfree(wdev->u.nan.chandefs); 294 + wdev->u.nan.chandefs = NULL; 295 + wdev->u.nan.n_channels = 0; 296 + 297 + if (!sched->n_channels) 298 + return 0; 299 + 300 + wdev->u.nan.chandefs = kcalloc(sched->n_channels, 301 + sizeof(*wdev->u.nan.chandefs), 302 + GFP_KERNEL); 303 + if (!wdev->u.nan.chandefs) 304 + return -ENOMEM; 305 + 306 + for (int i = 0; i < sched->n_channels; i++) 307 + wdev->u.nan.chandefs[i] = sched->nan_channels[i].chandef; 308 + 309 + wdev->u.nan.n_channels = sched->n_channels; 310 + 311 + return 0; 312 + } 313 + 284 314 void cfg80211_shutdown_all_interfaces(struct wiphy *wiphy) 285 315 { 286 316 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy); ··· 329 277 330 278 ASSERT_RTNL(); 331 279 280 + /* 281 + * Some netdev interfaces need to be closed before some non-netdev 282 + * ones, i.e. NAN_DATA interfaces need to be closed before the NAN 283 + * interface 284 + */ 332 285 list_for_each_entry(wdev, &rdev->wiphy.wdev_list, list) { 333 286 if (wdev->netdev) { 334 287 dev_close(wdev->netdev); 335 288 continue; 336 289 } 290 + } 337 291 338 - /* otherwise, check iftype */ 292 + guard(wiphy)(wiphy); 339 293 340 - guard(wiphy)(wiphy); 341 - 294 + list_for_each_entry(wdev, &rdev->wiphy.wdev_list, list) { 342 295 switch (wdev->iftype) { 343 296 case NL80211_IFTYPE_P2P_DEVICE: 344 297 cfg80211_stop_p2p_device(rdev, wdev); ··· 401 344 402 345 list_for_each_entry_safe(wdev, tmp, &rdev->wiphy.wdev_list, list) { 403 346 if (wdev->nl_owner_dead) { 347 + cfg80211_close_dependents(rdev, wdev); 348 + 404 349 if (wdev->netdev) 405 350 dev_close(wdev->netdev); 406 351 ··· 410 351 411 352 cfg80211_remove_virtual_intf(rdev, wdev); 412 353 } 354 + } 355 + } 356 + 357 + void cfg80211_close_dependents(struct cfg80211_registered_device *rdev, 358 + struct wireless_dev *wdev) 359 + { 360 + ASSERT_RTNL(); 361 + 362 + if (wdev->iftype != NL80211_IFTYPE_NAN) 363 + return; 364 + 365 + /* Close all NAN DATA interfaces */ 366 + list_for_each_entry(wdev, &rdev->wiphy.wdev_list, list) { 367 + if (wdev->iftype == NL80211_IFTYPE_NAN_DATA) 368 + dev_close(wdev->netdev); 413 369 } 414 370 } 415 371 ··· 833 759 (!rdev->ops->start_nan || !rdev->ops->stop_nan || 834 760 !rdev->ops->add_nan_func || !rdev->ops->del_nan_func || 835 761 !(wiphy->nan_supported_bands & BIT(NL80211_BAND_2GHZ))))) 762 + return -EINVAL; 763 + 764 + if (WARN_ON((wiphy->interface_modes & BIT(NL80211_IFTYPE_NAN_DATA)) && 765 + !wiphy->nan_capa.phy.ht.ht_supported)) 836 766 return -EINVAL; 837 767 838 768 if (WARN_ON(wiphy->interface_modes & BIT(NL80211_IFTYPE_WDS))) ··· 1445 1367 rdev->num_running_monitor_ifaces += num; 1446 1368 } 1447 1369 1448 - void cfg80211_leave(struct cfg80211_registered_device *rdev, 1449 - struct wireless_dev *wdev, 1450 - int link_id) 1370 + void cfg80211_leave_locked(struct cfg80211_registered_device *rdev, 1371 + struct wireless_dev *wdev, int link_id) 1451 1372 { 1452 1373 struct net_device *dev = wdev->netdev; 1453 1374 struct cfg80211_sched_scan_request *pos, *tmp; ··· 1497 1420 break; 1498 1421 case NL80211_IFTYPE_AP_VLAN: 1499 1422 case NL80211_IFTYPE_MONITOR: 1423 + case NL80211_IFTYPE_NAN_DATA: 1500 1424 /* nothing to do */ 1501 1425 break; 1502 1426 case NL80211_IFTYPE_UNSPECIFIED: ··· 1506 1428 /* invalid */ 1507 1429 break; 1508 1430 } 1431 + } 1432 + 1433 + void cfg80211_leave(struct cfg80211_registered_device *rdev, 1434 + struct wireless_dev *wdev, int link_id) 1435 + { 1436 + ASSERT_RTNL(); 1437 + 1438 + /* NAN_DATA interfaces must be closed before stopping NAN */ 1439 + cfg80211_close_dependents(rdev, wdev); 1440 + 1441 + guard(wiphy)(&rdev->wiphy); 1442 + 1443 + cfg80211_leave_locked(rdev, wdev, link_id); 1509 1444 } 1510 1445 1511 1446 void cfg80211_stop_link(struct wiphy *wiphy, struct wireless_dev *wdev, ··· 1535 1444 link_id = -1; 1536 1445 1537 1446 trace_cfg80211_stop_link(wiphy, wdev, link_id); 1447 + 1448 + if (wdev->iftype == NL80211_IFTYPE_NAN) 1449 + return; 1538 1450 1539 1451 ev = kzalloc_obj(*ev, gfp); 1540 1452 if (!ev) ··· 1689 1595 } 1690 1596 break; 1691 1597 case NETDEV_GOING_DOWN: 1692 - scoped_guard(wiphy, &rdev->wiphy) { 1693 - cfg80211_leave(rdev, wdev, -1); 1598 + cfg80211_leave(rdev, wdev, -1); 1599 + scoped_guard(wiphy, &rdev->wiphy) 1694 1600 cfg80211_remove_links(wdev); 1695 - } 1696 1601 /* since we just did cfg80211_leave() nothing to do there */ 1697 1602 cancel_work_sync(&wdev->disconnect_wk); 1698 1603 cancel_work_sync(&wdev->pmsr_free_wk); ··· 1772 1679 1773 1680 if (rfkill_blocked(rdev->wiphy.rfkill)) 1774 1681 return notifier_from_errno(-ERFKILL); 1682 + 1683 + /* NAN_DATA interfaces require a running NAN interface */ 1684 + if (wdev->iftype == NL80211_IFTYPE_NAN_DATA) { 1685 + struct wireless_dev *iter; 1686 + bool nan_started = false; 1687 + 1688 + list_for_each_entry(iter, &rdev->wiphy.wdev_list, list) { 1689 + if (iter->iftype == NL80211_IFTYPE_NAN && 1690 + wdev_running(iter)) { 1691 + nan_started = true; 1692 + break; 1693 + } 1694 + } 1695 + 1696 + if (!nan_started) 1697 + return notifier_from_errno(-ENOLINK); 1698 + } 1775 1699 break; 1776 1700 default: 1777 1701 return NOTIFY_DONE;
+10
net/wireless/core.h
··· 318 318 319 319 void cfg80211_destroy_ifaces(struct cfg80211_registered_device *rdev); 320 320 321 + void cfg80211_close_dependents(struct cfg80211_registered_device *rdev, 322 + struct wireless_dev *wdev); 323 + 321 324 /* free object */ 322 325 void cfg80211_dev_free(struct cfg80211_registered_device *rdev); 323 326 ··· 544 541 void cfg80211_update_iface_num(struct cfg80211_registered_device *rdev, 545 542 enum nl80211_iftype iftype, int num); 546 543 544 + void cfg80211_leave_locked(struct cfg80211_registered_device *rdev, 545 + struct wireless_dev *wdev, int link_id); 546 + 547 547 void cfg80211_leave(struct cfg80211_registered_device *rdev, 548 548 struct wireless_dev *wdev, 549 549 int link_id); ··· 556 550 557 551 void cfg80211_stop_nan(struct cfg80211_registered_device *rdev, 558 552 struct wireless_dev *wdev); 553 + 554 + int cfg80211_nan_set_local_schedule(struct cfg80211_registered_device *rdev, 555 + struct wireless_dev *wdev, 556 + struct cfg80211_nan_local_sched *sched); 559 557 560 558 struct cfg80211_internal_bss * 561 559 cfg80211_bss_update(struct cfg80211_registered_device *rdev,
+9 -4
net/wireless/mlme.c
··· 4 4 * 5 5 * Copyright (c) 2009, Jouni Malinen <j@w1.fi> 6 6 * Copyright (c) 2015 Intel Deutschland GmbH 7 - * Copyright (C) 2019-2020, 2022-2025 Intel Corporation 7 + * Copyright (C) 2019-2020, 2022-2026 Intel Corporation 8 8 */ 9 9 10 10 #include <linux/kernel.h> ··· 782 782 rdev_crit_proto_stop(rdev, wdev); 783 783 } 784 784 785 - if (nlportid == wdev->ap_unexpected_nlportid) 786 - wdev->ap_unexpected_nlportid = 0; 785 + if (nlportid == wdev->unexpected_nlportid) 786 + wdev->unexpected_nlportid = 0; 787 787 } 788 788 789 789 void cfg80211_mlme_purge_registrations(struct wireless_dev *wdev) ··· 933 933 * cfg80211 doesn't track the stations 934 934 */ 935 935 break; 936 + case NL80211_IFTYPE_NAN: 937 + case NL80211_IFTYPE_NAN_DATA: 938 + if (mgmt->u.action.category != 939 + WLAN_CATEGORY_PROTECTED_DUAL_OF_ACTION) 940 + err = -EOPNOTSUPP; 941 + break; 936 942 case NL80211_IFTYPE_P2P_DEVICE: 937 943 /* 938 944 * fall through, P2P device only supports 939 945 * public action frames 940 946 */ 941 - case NL80211_IFTYPE_NAN: 942 947 default: 943 948 err = -EOPNOTSUPP; 944 949 break;
+877 -28
net/wireless/nl80211.c
··· 333 333 return 0; 334 334 } 335 335 336 + static int validate_nan_avail_blob(const struct nlattr *attr, 337 + struct netlink_ext_ack *extack) 338 + { 339 + const u8 *data = nla_data(attr); 340 + unsigned int len = nla_len(attr); 341 + u16 attr_len; 342 + 343 + /* Need at least: Attr ID (1) + Length (2) */ 344 + if (len < 3) { 345 + NL_SET_ERR_MSG_FMT(extack, 346 + "NAN Availability: Too short (need at least 3 bytes, have %u)", 347 + len); 348 + return -EINVAL; 349 + } 350 + 351 + if (data[0] != 0x12) { 352 + NL_SET_ERR_MSG_FMT(extack, 353 + "NAN Availability: Invalid Attribute ID 0x%02x (expected 0x12)", 354 + data[0]); 355 + return -EINVAL; 356 + } 357 + 358 + attr_len = get_unaligned_le16(&data[1]); 359 + 360 + if (attr_len != len - 3) { 361 + NL_SET_ERR_MSG_FMT(extack, 362 + "NAN Availability: Length field (%u) doesn't match data length (%u)", 363 + attr_len, len - 3); 364 + return -EINVAL; 365 + } 366 + 367 + return 0; 368 + } 369 + 370 + static int validate_nan_ulw(const struct nlattr *attr, 371 + struct netlink_ext_ack *extack) 372 + { 373 + const u8 *data = nla_data(attr); 374 + unsigned int len = nla_len(attr); 375 + unsigned int pos = 0; 376 + 377 + while (pos < len) { 378 + u16 attr_len; 379 + 380 + /* Need at least: Attr ID (1) + Length (2) */ 381 + if (pos + 3 > len) { 382 + NL_SET_ERR_MSG_FMT(extack, 383 + "ULW: Incomplete header (need 3 bytes, have %u)", 384 + len - pos); 385 + return -EINVAL; 386 + } 387 + 388 + if (data[pos] != 0x17) { 389 + NL_SET_ERR_MSG_FMT(extack, 390 + "ULW: Invalid Attribute ID 0x%02x (expected 0x17)", 391 + data[pos]); 392 + return -EINVAL; 393 + } 394 + pos++; 395 + 396 + /* Length is in little-endian format */ 397 + attr_len = get_unaligned_le16(&data[pos]); 398 + pos += 2; 399 + 400 + /* 401 + * Check if length is one of the valid values: 16 (no 402 + * channel/band entry included), 18 (band entry included), 403 + * 21 (channel entry included without Auxiliary channel bitmap), 404 + * or 23 (channel entry included with Auxiliary channel bitmap). 405 + */ 406 + if (attr_len != 16 && attr_len != 18 && attr_len != 21 && 407 + attr_len != 23) { 408 + NL_SET_ERR_MSG_FMT(extack, 409 + "ULW: Invalid length %u (must be 16, 18, 21, or 23)", 410 + attr_len); 411 + return -EINVAL; 412 + } 413 + 414 + if (pos + attr_len > len) { 415 + NL_SET_ERR_MSG_FMT(extack, 416 + "ULW: Length field (%u) exceeds remaining data (%u)", 417 + attr_len, len - pos); 418 + return -EINVAL; 419 + } 420 + 421 + pos += attr_len; 422 + } 423 + 424 + return 0; 425 + } 426 + 336 427 static int validate_uhr_capa(const struct nlattr *attr, 337 428 struct netlink_ext_ack *extack) 338 429 { ··· 644 553 [NL80211_NAN_BAND_CONF_RSSI_MIDDLE] = NLA_POLICY_MIN(NLA_S8, -74), 645 554 [NL80211_NAN_BAND_CONF_WAKE_DW] = NLA_POLICY_MAX(NLA_U8, 5), 646 555 [NL80211_NAN_BAND_CONF_DISABLE_SCAN] = { .type = NLA_FLAG }, 556 + }; 557 + 558 + static const struct nla_policy 559 + nl80211_nan_peer_map_policy[NL80211_NAN_PEER_MAP_ATTR_MAX + 1] = { 560 + [NL80211_NAN_PEER_MAP_ATTR_MAP_ID] = NLA_POLICY_MAX(NLA_U8, 15), 561 + [NL80211_NAN_PEER_MAP_ATTR_TIME_SLOTS] = 562 + NLA_POLICY_EXACT_LEN(CFG80211_NAN_SCHED_NUM_TIME_SLOTS), 647 563 }; 648 564 649 565 static const struct nla_policy ··· 1060 962 [NL80211_ATTR_DISABLE_UHR] = { .type = NLA_FLAG }, 1061 963 [NL80211_ATTR_UHR_OPERATION] = 1062 964 NLA_POLICY_VALIDATE_FN(NLA_BINARY, validate_uhr_operation), 965 + [NL80211_ATTR_NAN_CHANNEL] = NLA_POLICY_NESTED(nl80211_policy), 966 + [NL80211_ATTR_NAN_CHANNEL_ENTRY] = NLA_POLICY_EXACT_LEN(6), 967 + [NL80211_ATTR_NAN_RX_NSS] = { .type = NLA_U8 }, 968 + [NL80211_ATTR_NAN_TIME_SLOTS] = 969 + NLA_POLICY_EXACT_LEN(CFG80211_NAN_SCHED_NUM_TIME_SLOTS), 970 + [NL80211_ATTR_NAN_AVAIL_BLOB] = 971 + NLA_POLICY_VALIDATE_FN(NLA_BINARY, validate_nan_avail_blob), 972 + [NL80211_ATTR_NAN_SCHED_DEFERRED] = { .type = NLA_FLAG }, 973 + [NL80211_ATTR_NAN_NMI_MAC] = NLA_POLICY_ETH_ADDR, 974 + [NL80211_ATTR_NAN_ULW] = 975 + NLA_POLICY_VALIDATE_FN(NLA_BINARY, validate_nan_ulw), 976 + [NL80211_ATTR_NAN_COMMITTED_DW] = { .type = NLA_U16 }, 977 + [NL80211_ATTR_NAN_SEQ_ID] = { .type = NLA_U8 }, 978 + [NL80211_ATTR_NAN_MAX_CHAN_SWITCH_TIME] = { .type = NLA_U16 }, 979 + [NL80211_ATTR_NAN_PEER_MAPS] = 980 + NLA_POLICY_NESTED_ARRAY(nl80211_nan_peer_map_policy), 1063 981 }; 1064 982 1065 983 /* policy for the key attributes */ ··· 1836 1722 return 0; 1837 1723 return -ENOLINK; 1838 1724 case NL80211_IFTYPE_NAN: 1725 + case NL80211_IFTYPE_NAN_DATA: 1839 1726 if (wiphy_ext_feature_isset(wdev->wiphy, 1840 1727 NL80211_EXT_FEATURE_SECURE_NAN)) 1841 1728 return 0; ··· 2793 2678 return -ENOBUFS; 2794 2679 } 2795 2680 2681 + static int nl80211_put_nan_phy_cap(struct wiphy *wiphy, struct sk_buff *msg) 2682 + { 2683 + struct nlattr *nl_phy_cap; 2684 + const struct ieee80211_sta_ht_cap *ht_cap; 2685 + const struct ieee80211_sta_vht_cap *vht_cap; 2686 + const struct ieee80211_sta_he_cap *he_cap; 2687 + 2688 + if (!cfg80211_iftype_allowed(wiphy, NL80211_IFTYPE_NAN_DATA, false, 0)) 2689 + return 0; 2690 + 2691 + ht_cap = &wiphy->nan_capa.phy.ht; 2692 + vht_cap = &wiphy->nan_capa.phy.vht; 2693 + he_cap = &wiphy->nan_capa.phy.he; 2694 + 2695 + /* HT is mandatory */ 2696 + if (WARN_ON(!ht_cap->ht_supported)) 2697 + return 0; 2698 + 2699 + nl_phy_cap = nla_nest_start_noflag(msg, NL80211_NAN_CAPA_PHY); 2700 + if (!nl_phy_cap) 2701 + return -ENOBUFS; 2702 + 2703 + if (nla_put(msg, NL80211_NAN_PHY_CAP_ATTR_HT_MCS_SET, 2704 + sizeof(ht_cap->mcs), &ht_cap->mcs) || 2705 + nla_put_u16(msg, NL80211_NAN_PHY_CAP_ATTR_HT_CAPA, ht_cap->cap) || 2706 + nla_put_u8(msg, NL80211_NAN_PHY_CAP_ATTR_HT_AMPDU_FACTOR, 2707 + ht_cap->ampdu_factor) || 2708 + nla_put_u8(msg, NL80211_NAN_PHY_CAP_ATTR_HT_AMPDU_DENSITY, 2709 + ht_cap->ampdu_density)) 2710 + goto fail; 2711 + 2712 + if (vht_cap->vht_supported) { 2713 + if (nla_put(msg, NL80211_NAN_PHY_CAP_ATTR_VHT_MCS_SET, 2714 + sizeof(vht_cap->vht_mcs), &vht_cap->vht_mcs) || 2715 + nla_put_u32(msg, NL80211_NAN_PHY_CAP_ATTR_VHT_CAPA, 2716 + vht_cap->cap)) 2717 + goto fail; 2718 + } 2719 + 2720 + if (he_cap->has_he) { 2721 + if (nla_put(msg, NL80211_NAN_PHY_CAP_ATTR_HE_MAC, 2722 + sizeof(he_cap->he_cap_elem.mac_cap_info), 2723 + he_cap->he_cap_elem.mac_cap_info) || 2724 + nla_put(msg, NL80211_NAN_PHY_CAP_ATTR_HE_PHY, 2725 + sizeof(he_cap->he_cap_elem.phy_cap_info), 2726 + he_cap->he_cap_elem.phy_cap_info) || 2727 + nla_put(msg, NL80211_NAN_PHY_CAP_ATTR_HE_MCS_SET, 2728 + sizeof(he_cap->he_mcs_nss_supp), 2729 + &he_cap->he_mcs_nss_supp) || 2730 + nla_put(msg, NL80211_NAN_PHY_CAP_ATTR_HE_PPE, 2731 + sizeof(he_cap->ppe_thres), he_cap->ppe_thres)) 2732 + goto fail; 2733 + } 2734 + 2735 + nla_nest_end(msg, nl_phy_cap); 2736 + return 0; 2737 + 2738 + fail: 2739 + nla_nest_cancel(msg, nl_phy_cap); 2740 + return -ENOBUFS; 2741 + } 2742 + 2796 2743 static int nl80211_put_nan_capa(struct wiphy *wiphy, struct sk_buff *msg) 2797 2744 { 2798 2745 struct nlattr *nan_caps; ··· 2879 2702 wiphy->nan_capa.max_channel_switch_time) || 2880 2703 nla_put_u8(msg, NL80211_NAN_CAPA_CAPABILITIES, 2881 2704 wiphy->nan_capa.dev_capabilities)) 2705 + goto fail; 2706 + 2707 + if (nl80211_put_nan_phy_cap(wiphy, msg)) 2882 2708 goto fail; 2883 2709 2884 2710 nla_nest_end(msg, nan_caps); ··· 5058 4878 info->user_ptr[1] = NULL; 5059 4879 else 5060 4880 dev_close(wdev->netdev); 4881 + 4882 + cfg80211_close_dependents(rdev, wdev); 5061 4883 5062 4884 mutex_lock(&rdev->wiphy.mtx); 5063 4885 ··· 7305 7123 if ((params->sta_flags_mask | 7306 7124 params->sta_flags_set) & BIT(__NL80211_STA_FLAG_INVALID)) 7307 7125 return -EINVAL; 7126 + 7127 + if ((iftype == NL80211_IFTYPE_NAN || 7128 + iftype == NL80211_IFTYPE_NAN_DATA) && 7129 + params->sta_flags_mask & 7130 + ~(BIT(NL80211_STA_FLAG_AUTHENTICATED) | 7131 + BIT(NL80211_STA_FLAG_ASSOCIATED) | 7132 + BIT(NL80211_STA_FLAG_AUTHORIZED) | 7133 + BIT(NL80211_STA_FLAG_MFP))) 7134 + return -EINVAL; 7135 + 7136 + /* WME is always used in NAN */ 7137 + if (iftype == NL80211_IFTYPE_NAN_DATA) { 7138 + /* but don't let userspace control it */ 7139 + if (params->sta_flags_mask & BIT(NL80211_STA_FLAG_WME)) 7140 + return -EINVAL; 7141 + 7142 + params->sta_flags_mask |= BIT(NL80211_STA_FLAG_WME); 7143 + params->sta_flags_set |= BIT(NL80211_STA_FLAG_WME); 7144 + } 7145 + 7308 7146 return 0; 7309 7147 } 7310 7148 ··· 8207 8005 /* nl80211_prepare_wdev_dump acquired it in the successful case */ 8208 8006 __acquire(&rdev->wiphy.mtx); 8209 8007 8210 - if (!wdev->netdev) { 8008 + if (!wdev->netdev && wdev->iftype != NL80211_IFTYPE_NAN) { 8211 8009 err = -EINVAL; 8212 8010 goto out_err; 8213 8011 } ··· 8394 8192 return -EINVAL; 8395 8193 if (params->link_sta_params.supported_rates) 8396 8194 return -EINVAL; 8397 - if (params->ext_capab || params->link_sta_params.ht_capa || 8398 - params->link_sta_params.vht_capa || 8399 - params->link_sta_params.he_capa || 8400 - params->link_sta_params.eht_capa || 8195 + if (statype != CFG80211_STA_NAN_MGMT && 8196 + (params->link_sta_params.ht_capa || 8197 + params->link_sta_params.vht_capa || 8198 + params->link_sta_params.he_capa)) 8199 + return -EINVAL; 8200 + if (params->ext_capab || params->link_sta_params.eht_capa || 8401 8201 params->link_sta_params.uhr_capa) 8402 8202 return -EINVAL; 8403 8203 if (params->sta_flags_mask & BIT(NL80211_STA_FLAG_SPP_AMSDU)) ··· 8469 8265 case CFG80211_STA_MESH_PEER_USER: 8470 8266 if (params->plink_action != NL80211_PLINK_ACTION_NO_ACTION && 8471 8267 params->plink_action != NL80211_PLINK_ACTION_BLOCK) 8268 + return -EINVAL; 8269 + break; 8270 + case CFG80211_STA_NAN_MGMT: 8271 + if (params->sta_flags_mask & 8272 + ~(BIT(NL80211_STA_FLAG_AUTHORIZED) | 8273 + BIT(NL80211_STA_FLAG_MFP))) 8274 + return -EINVAL; 8275 + break; 8276 + case CFG80211_STA_NAN_DATA: 8277 + if (params->sta_flags_mask & 8278 + ~(BIT(NL80211_STA_FLAG_AUTHORIZED) | 8279 + BIT(NL80211_STA_FLAG_MFP) | 8280 + BIT(NL80211_STA_FLAG_WME))) 8472 8281 return -EINVAL; 8473 8282 break; 8474 8283 } ··· 8698 8481 8699 8482 memset(&params, 0, sizeof(params)); 8700 8483 8701 - if (!dev) 8484 + if (!dev && wdev->iftype != NL80211_IFTYPE_NAN && 8485 + wdev->iftype != NL80211_IFTYPE_NAN_DATA) 8702 8486 return -EINVAL; 8703 8487 8704 8488 if (!rdev->ops->change_station) ··· 8842 8624 case NL80211_IFTYPE_STATION: 8843 8625 case NL80211_IFTYPE_ADHOC: 8844 8626 case NL80211_IFTYPE_MESH_POINT: 8627 + case NL80211_IFTYPE_NAN: 8628 + case NL80211_IFTYPE_NAN_DATA: 8845 8629 break; 8846 8630 default: 8847 8631 err = -EOPNOTSUPP; ··· 8872 8652 8873 8653 memset(&params, 0, sizeof(params)); 8874 8654 8875 - if (!dev) 8655 + if (!dev && wdev->iftype != NL80211_IFTYPE_NAN) 8876 8656 return -EINVAL; 8877 8657 8878 8658 if (!rdev->ops->add_station) ··· 8881 8661 if (!info->attrs[NL80211_ATTR_MAC]) 8882 8662 return -EINVAL; 8883 8663 8884 - if (!info->attrs[NL80211_ATTR_STA_LISTEN_INTERVAL]) 8885 - return -EINVAL; 8664 + if (wdev->iftype == NL80211_IFTYPE_NAN || 8665 + wdev->iftype == NL80211_IFTYPE_NAN_DATA) { 8666 + if (info->attrs[NL80211_ATTR_STA_SUPPORTED_RATES]) 8667 + return -EINVAL; 8668 + if (wdev->iftype == NL80211_IFTYPE_NAN_DATA) { 8669 + if (!info->attrs[NL80211_ATTR_NAN_NMI_MAC]) 8670 + return -EINVAL; 8886 8671 8887 - if (!info->attrs[NL80211_ATTR_STA_SUPPORTED_RATES]) 8888 - return -EINVAL; 8672 + /* Only NMI stations receive the HT/VHT/HE capabilities */ 8673 + if (info->attrs[NL80211_ATTR_HT_CAPABILITY] || 8674 + info->attrs[NL80211_ATTR_VHT_CAPABILITY] || 8675 + info->attrs[NL80211_ATTR_HE_CAPABILITY]) 8676 + return -EINVAL; 8677 + } 8678 + } else { 8679 + if (!info->attrs[NL80211_ATTR_STA_LISTEN_INTERVAL]) 8680 + return -EINVAL; 8889 8681 8890 - if (!info->attrs[NL80211_ATTR_STA_AID] && 8891 - !info->attrs[NL80211_ATTR_PEER_AID]) 8892 - return -EINVAL; 8682 + if (!info->attrs[NL80211_ATTR_STA_SUPPORTED_RATES]) 8683 + return -EINVAL; 8684 + 8685 + if (!info->attrs[NL80211_ATTR_STA_AID] && 8686 + !info->attrs[NL80211_ATTR_PEER_AID]) 8687 + return -EINVAL; 8688 + } 8893 8689 8894 8690 params.link_sta_params.link_id = 8895 8691 nl80211_link_id_or_invalid(info->attrs); ··· 8921 8685 mac_addr = nla_data(info->attrs[NL80211_ATTR_MAC]); 8922 8686 } 8923 8687 8924 - params.link_sta_params.supported_rates = 8925 - nla_data(info->attrs[NL80211_ATTR_STA_SUPPORTED_RATES]); 8926 - params.link_sta_params.supported_rates_len = 8927 - nla_len(info->attrs[NL80211_ATTR_STA_SUPPORTED_RATES]); 8928 - params.listen_interval = 8929 - nla_get_u16(info->attrs[NL80211_ATTR_STA_LISTEN_INTERVAL]); 8688 + if (info->attrs[NL80211_ATTR_STA_SUPPORTED_RATES]) { 8689 + params.link_sta_params.supported_rates = 8690 + nla_data(info->attrs[NL80211_ATTR_STA_SUPPORTED_RATES]); 8691 + params.link_sta_params.supported_rates_len = 8692 + nla_len(info->attrs[NL80211_ATTR_STA_SUPPORTED_RATES]); 8693 + } 8694 + 8695 + if (info->attrs[NL80211_ATTR_STA_LISTEN_INTERVAL]) 8696 + params.listen_interval = 8697 + nla_get_u16(info->attrs[NL80211_ATTR_STA_LISTEN_INTERVAL]); 8930 8698 8931 8699 if (info->attrs[NL80211_ATTR_VLAN_ID]) 8932 8700 params.vlan_id = nla_get_u16(info->attrs[NL80211_ATTR_VLAN_ID]); ··· 8949 8709 8950 8710 if (info->attrs[NL80211_ATTR_PEER_AID]) 8951 8711 params.aid = nla_get_u16(info->attrs[NL80211_ATTR_PEER_AID]); 8952 - else 8712 + else if (info->attrs[NL80211_ATTR_STA_AID]) 8953 8713 params.aid = nla_get_u16(info->attrs[NL80211_ATTR_STA_AID]); 8954 8714 8955 8715 if (info->attrs[NL80211_ATTR_STA_CAPABILITY]) { ··· 9070 8830 return -EINVAL; 9071 8831 } 9072 8832 8833 + if (wdev->iftype == NL80211_IFTYPE_NAN || 8834 + wdev->iftype == NL80211_IFTYPE_NAN_DATA) { 8835 + if (params.sta_modify_mask & STATION_PARAM_APPLY_UAPSD) 8836 + return -EINVAL; 8837 + /* NAN NMI station must be added in associated or authorized state */ 8838 + if (!(params.sta_flags_set & (BIT(NL80211_STA_FLAG_ASSOCIATED) | 8839 + BIT(NL80211_STA_FLAG_AUTHENTICATED)))) 8840 + return -EINVAL; 8841 + } 8842 + 9073 8843 /* Ensure that HT/VHT capabilities are not set for 6 GHz HE STA */ 9074 8844 if (params.link_sta_params.he_6ghz_capa && 9075 8845 (params.link_sta_params.ht_capa || params.link_sta_params.vht_capa)) ··· 9172 8922 */ 9173 8923 params.sta_flags_mask &= ~BIT(NL80211_STA_FLAG_AUTHORIZED); 9174 8924 break; 8925 + case NL80211_IFTYPE_NAN: 8926 + break; 8927 + case NL80211_IFTYPE_NAN_DATA: 8928 + params.nmi_mac = nla_data(info->attrs[NL80211_ATTR_NAN_NMI_MAC]); 8929 + break; 9175 8930 default: 9176 8931 return -EOPNOTSUPP; 9177 8932 } ··· 9218 8963 9219 8964 memset(&params, 0, sizeof(params)); 9220 8965 9221 - if (!dev) 8966 + if (!dev && wdev->iftype != NL80211_IFTYPE_NAN) 9222 8967 return -EINVAL; 9223 8968 9224 8969 if (info->attrs[NL80211_ATTR_MAC]) ··· 9229 8974 case NL80211_IFTYPE_AP_VLAN: 9230 8975 case NL80211_IFTYPE_MESH_POINT: 9231 8976 case NL80211_IFTYPE_P2P_GO: 8977 + case NL80211_IFTYPE_NAN: 8978 + case NL80211_IFTYPE_NAN_DATA: 9232 8979 /* always accept these */ 9233 8980 break; 9234 8981 case NL80211_IFTYPE_ADHOC: ··· 14207 13950 case NL80211_IFTYPE_P2P_DEVICE: 14208 13951 break; 14209 13952 case NL80211_IFTYPE_NAN: 13953 + case NL80211_IFTYPE_NAN_DATA: 14210 13954 if (!wiphy_ext_feature_isset(wdev->wiphy, 14211 13955 NL80211_EXT_FEATURE_SECURE_NAN) && 14212 13956 !(wdev->wiphy->nan_capa.flags & ··· 14271 14013 case NL80211_IFTYPE_P2P_GO: 14272 14014 break; 14273 14015 case NL80211_IFTYPE_NAN: 14016 + case NL80211_IFTYPE_NAN_DATA: 14274 14017 if (!wiphy_ext_feature_isset(wdev->wiphy, 14275 14018 NL80211_EXT_FEATURE_SECURE_NAN) && 14276 14019 !(wdev->wiphy->nan_capa.flags & ··· 15779 15520 struct wireless_dev *wdev = dev->ieee80211_ptr; 15780 15521 15781 15522 if (wdev->iftype != NL80211_IFTYPE_AP && 15782 - wdev->iftype != NL80211_IFTYPE_P2P_GO) 15523 + wdev->iftype != NL80211_IFTYPE_P2P_GO && 15524 + wdev->iftype != NL80211_IFTYPE_NAN_DATA) 15783 15525 return -EINVAL; 15784 15526 15785 - if (wdev->ap_unexpected_nlportid) 15527 + if (wdev->unexpected_nlportid) 15786 15528 return -EBUSY; 15787 15529 15788 - wdev->ap_unexpected_nlportid = info->snd_portid; 15530 + wdev->unexpected_nlportid = info->snd_portid; 15789 15531 return 0; 15790 15532 } 15791 15533 ··· 16181 15921 16182 15922 if (wdev->iftype != NL80211_IFTYPE_NAN) 16183 15923 return -EOPNOTSUPP; 15924 + 15925 + cfg80211_close_dependents(rdev, wdev); 15926 + 15927 + guard(wiphy)(&rdev->wiphy); 16184 15928 16185 15929 cfg80211_stop_nan(rdev, wdev); 16186 15930 ··· 16684 16420 nlmsg_free(msg); 16685 16421 } 16686 16422 EXPORT_SYMBOL(cfg80211_nan_func_terminated); 16423 + 16424 + void cfg80211_nan_sched_update_done(struct wireless_dev *wdev, bool success, 16425 + gfp_t gfp) 16426 + { 16427 + struct wiphy *wiphy = wdev->wiphy; 16428 + struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy); 16429 + struct sk_buff *msg; 16430 + void *hdr; 16431 + 16432 + trace_cfg80211_nan_sched_update_done(wiphy, wdev, success); 16433 + 16434 + /* Can happen if we stopped NAN */ 16435 + if (!wdev->u.nan.sched_update_pending) 16436 + return; 16437 + 16438 + wdev->u.nan.sched_update_pending = false; 16439 + 16440 + if (!wdev->owner_nlportid) 16441 + return; 16442 + 16443 + msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp); 16444 + if (!msg) 16445 + return; 16446 + 16447 + hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_NAN_SCHED_UPDATE_DONE); 16448 + if (!hdr) 16449 + goto nla_put_failure; 16450 + 16451 + if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) || 16452 + nla_put_u64_64bit(msg, NL80211_ATTR_WDEV, wdev_id(wdev), 16453 + NL80211_ATTR_PAD) || 16454 + (success && 16455 + nla_put_flag(msg, NL80211_ATTR_NAN_SCHED_UPDATE_SUCCESS))) 16456 + goto nla_put_failure; 16457 + 16458 + genlmsg_end(msg, hdr); 16459 + 16460 + genlmsg_unicast(wiphy_net(wiphy), msg, wdev->owner_nlportid); 16461 + 16462 + return; 16463 + 16464 + nla_put_failure: 16465 + nlmsg_free(msg); 16466 + } 16467 + EXPORT_SYMBOL(cfg80211_nan_sched_update_done); 16468 + 16469 + static int nl80211_parse_nan_channel(struct cfg80211_registered_device *rdev, 16470 + struct nlattr *channel, 16471 + struct genl_info *info, 16472 + struct cfg80211_nan_channel *nan_channels, 16473 + u8 index, bool local) 16474 + { 16475 + struct nlattr **channel_parsed __free(kfree) = NULL; 16476 + struct cfg80211_chan_def chandef; 16477 + u8 n_rx_nss; 16478 + int ret; 16479 + 16480 + channel_parsed = kcalloc(NL80211_ATTR_MAX + 1, sizeof(*channel_parsed), 16481 + GFP_KERNEL); 16482 + if (!channel_parsed) 16483 + return -ENOMEM; 16484 + 16485 + ret = nla_parse_nested(channel_parsed, NL80211_ATTR_MAX, channel, NULL, 16486 + info->extack); 16487 + if (ret) 16488 + return ret; 16489 + 16490 + ret = nl80211_parse_chandef(rdev, info->extack, channel_parsed, 16491 + &chandef); 16492 + if (ret) 16493 + return ret; 16494 + 16495 + if (chandef.chan->band == NL80211_BAND_6GHZ) { 16496 + NL_SET_ERR_MSG(info->extack, 16497 + "6 GHz band is not supported"); 16498 + return -EOPNOTSUPP; 16499 + } 16500 + 16501 + if (!cfg80211_reg_can_beacon(&rdev->wiphy, &chandef, 16502 + NL80211_IFTYPE_NAN)) { 16503 + NL_SET_ERR_MSG_ATTR(info->extack, channel, 16504 + "Channel in NAN schedule is not allowed for NAN operation"); 16505 + return -EINVAL; 16506 + } 16507 + 16508 + if (local) { 16509 + for (int i = 0; i < index; i++) { 16510 + if (cfg80211_chandef_compatible(&nan_channels[i].chandef, 16511 + &chandef)) { 16512 + NL_SET_ERR_MSG_ATTR(info->extack, channel, 16513 + "Channels in NAN schedule must be mutually incompatible"); 16514 + return -EINVAL; 16515 + } 16516 + } 16517 + } 16518 + 16519 + if (!channel_parsed[NL80211_ATTR_NAN_CHANNEL_ENTRY]) { 16520 + NL_SET_ERR_MSG(info->extack, 16521 + "Missing NAN channel entry attribute"); 16522 + return -EINVAL; 16523 + } 16524 + 16525 + nan_channels[index].channel_entry = 16526 + nla_data(channel_parsed[NL80211_ATTR_NAN_CHANNEL_ENTRY]); 16527 + 16528 + if (!channel_parsed[NL80211_ATTR_NAN_RX_NSS]) { 16529 + NL_SET_ERR_MSG(info->extack, 16530 + "Missing NAN RX NSS attribute"); 16531 + return -EINVAL; 16532 + } 16533 + 16534 + nan_channels[index].rx_nss = 16535 + nla_get_u8(channel_parsed[NL80211_ATTR_NAN_RX_NSS]); 16536 + 16537 + n_rx_nss = u8_get_bits(rdev->wiphy.nan_capa.n_antennas, 0x03); 16538 + if ((local && nan_channels[index].rx_nss > n_rx_nss) || 16539 + !nan_channels[index].rx_nss) { 16540 + NL_SET_ERR_MSG_ATTR(info->extack, channel, 16541 + "Invalid RX NSS in NAN channel definition"); 16542 + return -EINVAL; 16543 + } 16544 + 16545 + nan_channels[index].chandef = chandef; 16546 + 16547 + return 0; 16548 + } 16549 + 16550 + static int 16551 + nl80211_parse_nan_schedule(struct genl_info *info, struct nlattr *slots_attr, 16552 + u8 schedule[CFG80211_NAN_SCHED_NUM_TIME_SLOTS], 16553 + u8 n_channels) 16554 + { 16555 + if (WARN_ON(nla_len(slots_attr) != CFG80211_NAN_SCHED_NUM_TIME_SLOTS)) 16556 + return -EINVAL; 16557 + 16558 + memcpy(schedule, nla_data(slots_attr), nla_len(slots_attr)); 16559 + 16560 + for (int slot = 0; slot < CFG80211_NAN_SCHED_NUM_TIME_SLOTS; slot++) { 16561 + if (schedule[slot] != NL80211_NAN_SCHED_NOT_AVAIL_SLOT && 16562 + schedule[slot] >= n_channels) { 16563 + NL_SET_ERR_MSG_FMT(info->extack, 16564 + "Invalid time slot: slot %d refers to channel index %d, n_channels=%d", 16565 + slot, schedule[slot], n_channels); 16566 + return -EINVAL; 16567 + } 16568 + } 16569 + 16570 + return 0; 16571 + } 16572 + 16573 + static int 16574 + nl80211_parse_nan_peer_map(struct genl_info *info, struct nlattr *map_attr, 16575 + struct cfg80211_nan_peer_map *map, u8 n_channels) 16576 + { 16577 + struct nlattr *tb[NL80211_NAN_PEER_MAP_ATTR_MAX + 1]; 16578 + int ret; 16579 + 16580 + ret = nla_parse_nested(tb, NL80211_NAN_PEER_MAP_ATTR_MAX, map_attr, 16581 + nl80211_nan_peer_map_policy, info->extack); 16582 + if (ret) 16583 + return ret; 16584 + 16585 + if (!tb[NL80211_NAN_PEER_MAP_ATTR_MAP_ID] || 16586 + !tb[NL80211_NAN_PEER_MAP_ATTR_TIME_SLOTS]) { 16587 + NL_SET_ERR_MSG(info->extack, 16588 + "Missing required peer map attributes"); 16589 + return -EINVAL; 16590 + } 16591 + 16592 + map->map_id = nla_get_u8(tb[NL80211_NAN_PEER_MAP_ATTR_MAP_ID]); 16593 + 16594 + /* Parse schedule */ 16595 + return nl80211_parse_nan_schedule(info, 16596 + tb[NL80211_NAN_PEER_MAP_ATTR_TIME_SLOTS], 16597 + map->schedule, n_channels); 16598 + } 16599 + 16600 + static int nl80211_nan_validate_map_pair(struct wiphy *wiphy, 16601 + struct genl_info *info, 16602 + const struct cfg80211_nan_peer_map *map1, 16603 + const struct cfg80211_nan_peer_map *map2, 16604 + struct cfg80211_nan_channel *nan_channels) 16605 + { 16606 + /* Check for duplicate map_id */ 16607 + if (map1->map_id == map2->map_id) { 16608 + NL_SET_ERR_MSG_FMT(info->extack, "Duplicate map_id %u", 16609 + map1->map_id); 16610 + return -EINVAL; 16611 + } 16612 + 16613 + /* Check for compatible channels between maps */ 16614 + for (int i = 0; i < ARRAY_SIZE(map1->schedule); i++) { 16615 + if (map1->schedule[i] == NL80211_NAN_SCHED_NOT_AVAIL_SLOT) 16616 + continue; 16617 + 16618 + for (int j = 0; j < ARRAY_SIZE(map2->schedule); j++) { 16619 + u8 ch1 = map1->schedule[i]; 16620 + u8 ch2 = map2->schedule[j]; 16621 + 16622 + if (ch2 == NL80211_NAN_SCHED_NOT_AVAIL_SLOT) 16623 + continue; 16624 + 16625 + if (cfg80211_chandef_compatible(&nan_channels[ch1].chandef, 16626 + &nan_channels[ch2].chandef)) { 16627 + NL_SET_ERR_MSG_FMT(info->extack, 16628 + "Maps %u and %u have compatible channels %d and %d", 16629 + map1->map_id, map2->map_id, 16630 + ch1, ch2); 16631 + return -EINVAL; 16632 + } 16633 + } 16634 + } 16635 + 16636 + /* 16637 + * Check for conflicting time slots between maps. 16638 + * Only check for single-radio devices (n_radio <= 1) which cannot 16639 + * operate on multiple channels simultaneously. 16640 + */ 16641 + if (wiphy->n_radio > 1) 16642 + return 0; 16643 + 16644 + for (int i = 0; i < ARRAY_SIZE(map1->schedule); i++) { 16645 + if (map1->schedule[i] != NL80211_NAN_SCHED_NOT_AVAIL_SLOT && 16646 + map2->schedule[i] != NL80211_NAN_SCHED_NOT_AVAIL_SLOT) { 16647 + NL_SET_ERR_MSG_FMT(info->extack, 16648 + "Maps %u and %u both schedule slot %d", 16649 + map1->map_id, map2->map_id, i); 16650 + return -EINVAL; 16651 + } 16652 + } 16653 + 16654 + return 0; 16655 + } 16656 + 16657 + static int nl80211_nan_set_peer_sched(struct sk_buff *skb, 16658 + struct genl_info *info) 16659 + { 16660 + struct cfg80211_registered_device *rdev = info->user_ptr[0]; 16661 + struct cfg80211_nan_channel *nan_channels __free(kfree) = NULL; 16662 + struct cfg80211_nan_peer_sched sched = {}; 16663 + struct wireless_dev *wdev = info->user_ptr[1]; 16664 + struct nlattr *map_attr, *channel; 16665 + int ret, n_maps = 0, n_channels = 0, i = 0, rem; 16666 + 16667 + if (wdev->iftype != NL80211_IFTYPE_NAN) 16668 + return -EOPNOTSUPP; 16669 + 16670 + if (!info->attrs[NL80211_ATTR_MAC] || 16671 + !info->attrs[NL80211_ATTR_NAN_COMMITTED_DW]) { 16672 + NL_SET_ERR_MSG(info->extack, 16673 + "Required NAN peer schedule attributes are missing"); 16674 + return -EINVAL; 16675 + } 16676 + 16677 + /* First count how many channel attributes we got */ 16678 + nlmsg_for_each_attr_type(channel, NL80211_ATTR_NAN_CHANNEL, 16679 + info->nlhdr, GENL_HDRLEN, rem) 16680 + n_channels++; 16681 + 16682 + if (!((info->attrs[NL80211_ATTR_NAN_SEQ_ID] && 16683 + info->attrs[NL80211_ATTR_NAN_PEER_MAPS] && n_channels) || 16684 + ((!info->attrs[NL80211_ATTR_NAN_SEQ_ID] && 16685 + !info->attrs[NL80211_ATTR_NAN_PEER_MAPS] && !n_channels)))) { 16686 + NL_SET_ERR_MSG(info->extack, 16687 + "Either provide all of: seq id, channels and maps, or none"); 16688 + return -EINVAL; 16689 + } 16690 + 16691 + /* 16692 + * Limit the number of peer channels to: 16693 + * local_channels * 4 (possible BWs) * 2 (possible NSS values) 16694 + */ 16695 + if (n_channels && n_channels > wdev->u.nan.n_channels * 4 * 2) { 16696 + NL_SET_ERR_MSG_FMT(info->extack, 16697 + "Too many peer channels: %d (max %d)", 16698 + n_channels, 16699 + wdev->u.nan.n_channels * 4 * 2); 16700 + return -EINVAL; 16701 + } 16702 + 16703 + if (n_channels) { 16704 + nan_channels = kcalloc(n_channels, sizeof(*nan_channels), 16705 + GFP_KERNEL); 16706 + if (!nan_channels) 16707 + return -ENOMEM; 16708 + } 16709 + 16710 + /* Parse peer channels */ 16711 + nlmsg_for_each_attr_type(channel, NL80211_ATTR_NAN_CHANNEL, 16712 + info->nlhdr, GENL_HDRLEN, rem) { 16713 + bool compatible = false; 16714 + 16715 + ret = nl80211_parse_nan_channel(rdev, channel, info, 16716 + nan_channels, i, false); 16717 + if (ret) 16718 + return ret; 16719 + 16720 + /* Verify channel is compatible with at least one local channel */ 16721 + for (int j = 0; j < wdev->u.nan.n_channels; j++) { 16722 + if (cfg80211_chandef_compatible(&nan_channels[i].chandef, 16723 + &wdev->u.nan.chandefs[j])) { 16724 + compatible = true; 16725 + break; 16726 + } 16727 + } 16728 + if (!compatible) { 16729 + NL_SET_ERR_MSG_FMT(info->extack, 16730 + "Channel %d not compatible with any local channel", 16731 + i); 16732 + return -EINVAL; 16733 + } 16734 + i++; 16735 + } 16736 + 16737 + sched.n_channels = n_channels; 16738 + sched.nan_channels = nan_channels; 16739 + sched.peer_addr = nla_data(info->attrs[NL80211_ATTR_MAC]); 16740 + sched.seq_id = nla_get_u8_default(info->attrs[NL80211_ATTR_NAN_SEQ_ID], 0); 16741 + sched.committed_dw = nla_get_u16(info->attrs[NL80211_ATTR_NAN_COMMITTED_DW]); 16742 + sched.max_chan_switch = 16743 + nla_get_u16_default(info->attrs[NL80211_ATTR_NAN_MAX_CHAN_SWITCH_TIME], 0); 16744 + 16745 + if (info->attrs[NL80211_ATTR_NAN_ULW]) { 16746 + sched.ulw_size = nla_len(info->attrs[NL80211_ATTR_NAN_ULW]); 16747 + sched.init_ulw = nla_data(info->attrs[NL80211_ATTR_NAN_ULW]); 16748 + } 16749 + 16750 + /* Initialize all maps as invalid */ 16751 + for (int j = 0; j < ARRAY_SIZE(sched.maps); j++) 16752 + sched.maps[j].map_id = CFG80211_NAN_INVALID_MAP_ID; 16753 + 16754 + if (info->attrs[NL80211_ATTR_NAN_PEER_MAPS]) { 16755 + /* Parse each map */ 16756 + nla_for_each_nested(map_attr, info->attrs[NL80211_ATTR_NAN_PEER_MAPS], 16757 + rem) { 16758 + if (n_maps >= ARRAY_SIZE(sched.maps)) { 16759 + NL_SET_ERR_MSG(info->extack, "Too many peer maps"); 16760 + return -EINVAL; 16761 + } 16762 + 16763 + ret = nl80211_parse_nan_peer_map(info, map_attr, 16764 + &sched.maps[n_maps], 16765 + n_channels); 16766 + if (ret) 16767 + return ret; 16768 + 16769 + /* Validate against previous maps */ 16770 + for (int j = 0; j < n_maps; j++) { 16771 + ret = nl80211_nan_validate_map_pair(&rdev->wiphy, info, 16772 + &sched.maps[j], 16773 + &sched.maps[n_maps], 16774 + nan_channels); 16775 + if (ret) 16776 + return ret; 16777 + } 16778 + 16779 + n_maps++; 16780 + } 16781 + } 16782 + 16783 + /* Verify each channel is scheduled at least once */ 16784 + for (int ch = 0; ch < n_channels; ch++) { 16785 + bool scheduled = false; 16786 + 16787 + for (int m = 0; m < n_maps && !scheduled; m++) { 16788 + for (int s = 0; s < ARRAY_SIZE(sched.maps[m].schedule); s++) { 16789 + if (sched.maps[m].schedule[s] == ch) { 16790 + scheduled = true; 16791 + break; 16792 + } 16793 + } 16794 + } 16795 + if (!scheduled) { 16796 + NL_SET_ERR_MSG_FMT(info->extack, 16797 + "Channel %d is not scheduled in any map", 16798 + ch); 16799 + return -EINVAL; 16800 + } 16801 + } 16802 + 16803 + return rdev_nan_set_peer_sched(rdev, wdev, &sched); 16804 + } 16805 + 16806 + static bool nl80211_nan_is_sched_empty(struct cfg80211_nan_local_sched *sched) 16807 + { 16808 + if (!sched->n_channels) 16809 + return true; 16810 + 16811 + for (int i = 0; i < ARRAY_SIZE(sched->schedule); i++) { 16812 + if (sched->schedule[i] != NL80211_NAN_SCHED_NOT_AVAIL_SLOT) 16813 + return false; 16814 + } 16815 + 16816 + return true; 16817 + } 16818 + 16819 + static int nl80211_nan_set_local_sched(struct sk_buff *skb, 16820 + struct genl_info *info) 16821 + { 16822 + struct cfg80211_registered_device *rdev = info->user_ptr[0]; 16823 + struct cfg80211_nan_local_sched *sched __free(kfree) = NULL; 16824 + struct wireless_dev *wdev = info->user_ptr[1]; 16825 + int rem, i = 0, n_channels = 0, ret; 16826 + struct nlattr *channel; 16827 + bool sched_empty; 16828 + 16829 + if (wdev->iftype != NL80211_IFTYPE_NAN) 16830 + return -EOPNOTSUPP; 16831 + 16832 + if (!wdev_running(wdev)) 16833 + return -ENOTCONN; 16834 + 16835 + if (!info->attrs[NL80211_ATTR_NAN_TIME_SLOTS]) 16836 + return -EINVAL; 16837 + 16838 + /* First count how many channel attributes we got */ 16839 + nlmsg_for_each_attr_type(channel, NL80211_ATTR_NAN_CHANNEL, 16840 + info->nlhdr, GENL_HDRLEN, rem) 16841 + n_channels++; 16842 + 16843 + sched = kzalloc(struct_size(sched, nan_channels, n_channels), 16844 + GFP_KERNEL); 16845 + if (!sched) 16846 + return -ENOMEM; 16847 + 16848 + sched->n_channels = n_channels; 16849 + 16850 + nlmsg_for_each_attr_type(channel, NL80211_ATTR_NAN_CHANNEL, 16851 + info->nlhdr, GENL_HDRLEN, rem) { 16852 + ret = nl80211_parse_nan_channel(rdev, channel, info, 16853 + sched->nan_channels, i, true); 16854 + 16855 + if (ret) 16856 + return ret; 16857 + i++; 16858 + } 16859 + 16860 + /* Parse and validate schedule */ 16861 + ret = nl80211_parse_nan_schedule(info, 16862 + info->attrs[NL80211_ATTR_NAN_TIME_SLOTS], 16863 + sched->schedule, sched->n_channels); 16864 + if (ret) 16865 + return ret; 16866 + 16867 + sched_empty = nl80211_nan_is_sched_empty(sched); 16868 + 16869 + sched->deferred = 16870 + nla_get_flag(info->attrs[NL80211_ATTR_NAN_SCHED_DEFERRED]); 16871 + 16872 + if (sched_empty) { 16873 + if (sched->deferred) { 16874 + NL_SET_ERR_MSG(info->extack, 16875 + "Schedule cannot be deferred if all time slots are unavailable"); 16876 + return -EINVAL; 16877 + } 16878 + 16879 + if (info->attrs[NL80211_ATTR_NAN_AVAIL_BLOB]) { 16880 + NL_SET_ERR_MSG(info->extack, 16881 + "NAN Availability blob must be empty if all time slots are unavailable"); 16882 + return -EINVAL; 16883 + } 16884 + } else { 16885 + if (!info->attrs[NL80211_ATTR_NAN_AVAIL_BLOB]) { 16886 + NL_SET_ERR_MSG(info->extack, 16887 + "NAN Availability blob attribute is required"); 16888 + return -EINVAL; 16889 + } 16890 + 16891 + sched->nan_avail_blob = 16892 + nla_data(info->attrs[NL80211_ATTR_NAN_AVAIL_BLOB]); 16893 + sched->nan_avail_blob_len = 16894 + nla_len(info->attrs[NL80211_ATTR_NAN_AVAIL_BLOB]); 16895 + } 16896 + 16897 + return cfg80211_nan_set_local_schedule(rdev, wdev, sched); 16898 + } 16687 16899 16688 16900 static int nl80211_get_protocol_features(struct sk_buff *skb, 16689 16901 struct genl_info *info) ··· 18836 18096 NL80211_FLAG_NEED_RTNL) \ 18837 18097 SELECTOR(__sel, WIPHY_CLEAR, \ 18838 18098 NL80211_FLAG_NEED_WIPHY | \ 18839 - NL80211_FLAG_CLEAR_SKB) 18099 + NL80211_FLAG_CLEAR_SKB) \ 18100 + SELECTOR(__sel, WDEV_UP_RTNL_NOMTX, \ 18101 + NL80211_FLAG_NEED_WDEV_UP | \ 18102 + NL80211_FLAG_NO_WIPHY_MTX | \ 18103 + NL80211_FLAG_NEED_RTNL) 18840 18104 18841 18105 enum nl80211_internal_flags_selector { 18842 18106 #define SELECTOR(_, name, value) NL80211_IFL_SEL_##name, ··· 19677 18933 .doit = nl80211_stop_nan, 19678 18934 .flags = GENL_ADMIN_PERM, 19679 18935 .internal_flags = IFLAGS(NL80211_FLAG_NEED_WDEV_UP | 18936 + NL80211_FLAG_NO_WIPHY_MTX | 19680 18937 NL80211_FLAG_NEED_RTNL), 19681 18938 }, 19682 18939 { ··· 19971 19226 .doit = nl80211_epcs_cfg, 19972 19227 .flags = GENL_UNS_ADMIN_PERM, 19973 19228 .internal_flags = IFLAGS(NL80211_FLAG_NEED_NETDEV_UP), 19229 + }, 19230 + { 19231 + .cmd = NL80211_CMD_NAN_SET_LOCAL_SCHED, 19232 + .doit = nl80211_nan_set_local_sched, 19233 + .flags = GENL_ADMIN_PERM, 19234 + .internal_flags = IFLAGS(NL80211_FLAG_NEED_WDEV_UP), 19235 + }, 19236 + { 19237 + .cmd = NL80211_CMD_NAN_SET_PEER_SCHED, 19238 + .doit = nl80211_nan_set_peer_sched, 19239 + .flags = GENL_ADMIN_PERM, 19240 + .internal_flags = IFLAGS(NL80211_FLAG_NEED_WDEV_UP), 19974 19241 }, 19975 19242 }; 19976 19243 ··· 21284 20527 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy); 21285 20528 struct sk_buff *msg; 21286 20529 void *hdr; 21287 - u32 nlportid = READ_ONCE(wdev->ap_unexpected_nlportid); 20530 + u32 nlportid = READ_ONCE(wdev->unexpected_nlportid); 21288 20531 21289 20532 if (!nlportid) 21290 20533 return false; ··· 21324 20567 trace_cfg80211_rx_spurious_frame(dev, addr, link_id); 21325 20568 21326 20569 if (WARN_ON(wdev->iftype != NL80211_IFTYPE_AP && 21327 - wdev->iftype != NL80211_IFTYPE_P2P_GO)) { 20570 + wdev->iftype != NL80211_IFTYPE_P2P_GO && 20571 + wdev->iftype != NL80211_IFTYPE_NAN_DATA)) { 21328 20572 trace_cfg80211_return_bool(false); 21329 20573 return false; 21330 20574 } ··· 22894 22136 nlmsg_free(msg); 22895 22137 } 22896 22138 EXPORT_SYMBOL(cfg80211_nan_cluster_joined); 22139 + 22140 + void cfg80211_nan_ulw_update(struct wireless_dev *wdev, 22141 + const u8 *ulw, size_t ulw_len, gfp_t gfp) 22142 + { 22143 + struct wiphy *wiphy = wdev->wiphy; 22144 + struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy); 22145 + struct sk_buff *msg; 22146 + void *hdr; 22147 + 22148 + trace_cfg80211_nan_ulw_update(wiphy, wdev, ulw, ulw_len); 22149 + 22150 + if (!wdev->owner_nlportid) 22151 + return; 22152 + 22153 + /* 32 for the wiphy idx, 64 for the wdev id, 100 for padding */ 22154 + msg = nlmsg_new(nla_total_size(sizeof(u32)) + 22155 + nla_total_size(ulw_len) + 22156 + nla_total_size(sizeof(u64)) + 100, 22157 + gfp); 22158 + if (!msg) 22159 + return; 22160 + 22161 + hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_NAN_ULW_UPDATE); 22162 + if (!hdr) 22163 + goto nla_put_failure; 22164 + 22165 + if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) || 22166 + nla_put_u64_64bit(msg, NL80211_ATTR_WDEV, wdev_id(wdev), 22167 + NL80211_ATTR_PAD) || 22168 + (ulw && ulw_len && 22169 + nla_put(msg, NL80211_ATTR_NAN_ULW, ulw_len, ulw))) 22170 + goto nla_put_failure; 22171 + 22172 + genlmsg_end(msg, hdr); 22173 + 22174 + genlmsg_unicast(wiphy_net(wiphy), msg, wdev->owner_nlportid); 22175 + 22176 + return; 22177 + 22178 + nla_put_failure: 22179 + nlmsg_free(msg); 22180 + } 22181 + EXPORT_SYMBOL(cfg80211_nan_ulw_update); 22182 + 22183 + void cfg80211_nan_channel_evac(struct wireless_dev *wdev, 22184 + const struct cfg80211_chan_def *chandef, 22185 + gfp_t gfp) 22186 + { 22187 + struct wiphy *wiphy = wdev->wiphy; 22188 + struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy); 22189 + struct sk_buff *msg; 22190 + struct nlattr *chan_attr; 22191 + void *hdr; 22192 + 22193 + trace_cfg80211_nan_channel_evac(wiphy, wdev, chandef); 22194 + 22195 + if (!wdev->owner_nlportid) 22196 + return; 22197 + 22198 + msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp); 22199 + if (!msg) 22200 + return; 22201 + 22202 + hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_NAN_CHANNEL_EVAC); 22203 + if (!hdr) 22204 + goto nla_put_failure; 22205 + 22206 + if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) || 22207 + nla_put_u64_64bit(msg, NL80211_ATTR_WDEV, wdev_id(wdev), 22208 + NL80211_ATTR_PAD)) 22209 + goto nla_put_failure; 22210 + 22211 + chan_attr = nla_nest_start(msg, NL80211_ATTR_NAN_CHANNEL); 22212 + if (!chan_attr) 22213 + goto nla_put_failure; 22214 + 22215 + if (nl80211_send_chandef(msg, chandef)) 22216 + goto nla_put_failure; 22217 + 22218 + nla_nest_end(msg, chan_attr); 22219 + 22220 + genlmsg_end(msg, hdr); 22221 + 22222 + genlmsg_unicast(wiphy_net(wiphy), msg, wdev->owner_nlportid); 22223 + 22224 + return; 22225 + 22226 + nla_put_failure: 22227 + nlmsg_free(msg); 22228 + } 22229 + EXPORT_SYMBOL(cfg80211_nan_channel_evac); 22897 22230 22898 22231 /* initialisation/exit functions */ 22899 22232
+32
net/wireless/rdev-ops.h
··· 1060 1060 return ret; 1061 1061 } 1062 1062 1063 + static inline int 1064 + rdev_nan_set_local_sched(struct cfg80211_registered_device *rdev, 1065 + struct wireless_dev *wdev, 1066 + struct cfg80211_nan_local_sched *sched) 1067 + { 1068 + int ret; 1069 + 1070 + trace_rdev_nan_set_local_sched(&rdev->wiphy, wdev, sched); 1071 + if (rdev->ops->nan_set_local_sched) 1072 + ret = rdev->ops->nan_set_local_sched(&rdev->wiphy, wdev, sched); 1073 + else 1074 + ret = -EOPNOTSUPP; 1075 + trace_rdev_return_int(&rdev->wiphy, ret); 1076 + return ret; 1077 + } 1078 + 1079 + static inline int 1080 + rdev_nan_set_peer_sched(struct cfg80211_registered_device *rdev, 1081 + struct wireless_dev *wdev, 1082 + struct cfg80211_nan_peer_sched *sched) 1083 + { 1084 + int ret; 1085 + 1086 + trace_rdev_nan_set_peer_sched(&rdev->wiphy, wdev, sched); 1087 + if (rdev->ops->nan_set_peer_sched) 1088 + ret = rdev->ops->nan_set_peer_sched(&rdev->wiphy, wdev, sched); 1089 + else 1090 + ret = -EOPNOTSUPP; 1091 + trace_rdev_return_int(&rdev->wiphy, ret); 1092 + return ret; 1093 + } 1094 + 1063 1095 static inline int rdev_set_mac_acl(struct cfg80211_registered_device *rdev, 1064 1096 struct net_device *dev, 1065 1097 struct cfg80211_acl_data *params)
+21 -6
net/wireless/reg.c
··· 2348 2348 if (!wdev->netdev || !netif_running(wdev->netdev)) 2349 2349 return true; 2350 2350 2351 + /* NAN doesn't have links, handle it separately */ 2352 + if (iftype == NL80211_IFTYPE_NAN) { 2353 + for (int i = 0; i < wdev->u.nan.n_channels; i++) { 2354 + ret = cfg80211_reg_can_beacon(wiphy, 2355 + &wdev->u.nan.chandefs[i], 2356 + NL80211_IFTYPE_NAN); 2357 + if (!ret) 2358 + return false; 2359 + } 2360 + return true; 2361 + } 2362 + 2351 2363 for (link = 0; link < ARRAY_SIZE(wdev->links); link++) { 2352 2364 struct ieee80211_channel *chan; 2353 2365 ··· 2409 2397 continue; 2410 2398 chandef = wdev->u.ocb.chandef; 2411 2399 break; 2412 - case NL80211_IFTYPE_NAN: 2413 - /* we have no info, but NAN is also pretty universal */ 2414 - continue; 2400 + case NL80211_IFTYPE_NAN_DATA: 2401 + /* NAN channels are checked in NL80211_IFTYPE_NAN interface */ 2402 + break; 2415 2403 default: 2416 2404 /* others not implemented for now */ 2417 2405 WARN_ON_ONCE(1); ··· 2448 2436 struct wireless_dev *wdev; 2449 2437 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy); 2450 2438 2451 - guard(wiphy)(wiphy); 2439 + list_for_each_entry(wdev, &rdev->wiphy.wdev_list, list) { 2440 + bool valid; 2452 2441 2453 - list_for_each_entry(wdev, &rdev->wiphy.wdev_list, list) 2454 - if (!reg_wdev_chan_valid(wiphy, wdev)) 2442 + scoped_guard(wiphy, wiphy) 2443 + valid = reg_wdev_chan_valid(wiphy, wdev); 2444 + if (!valid) 2455 2445 cfg80211_leave(rdev, wdev, -1); 2446 + } 2456 2447 } 2457 2448 2458 2449 static void reg_check_chans_work(struct work_struct *work)
+14 -13
net/wireless/sysfs.c
··· 102 102 if (!rdev->wiphy.registered) 103 103 goto out_unlock_rtnl; 104 104 105 - wiphy_lock(&rdev->wiphy); 106 105 if (rdev->wiphy.wowlan_config) { 107 - cfg80211_process_wiphy_works(rdev, NULL); 108 - if (rdev->ops->suspend) 109 - ret = rdev_suspend(rdev, rdev->wiphy.wowlan_config); 110 - if (ret <= 0) 111 - goto out_unlock_wiphy; 106 + scoped_guard(wiphy, &rdev->wiphy) { 107 + cfg80211_process_wiphy_works(rdev, NULL); 108 + if (rdev->ops->suspend) 109 + ret = rdev_suspend(rdev, 110 + rdev->wiphy.wowlan_config); 111 + if (ret <= 0) 112 + goto out_unlock_rtnl; 113 + } 112 114 } 113 115 114 116 /* Driver refused to configure wowlan (ret = 1) or no wowlan */ 115 117 116 118 cfg80211_leave_all(rdev); 117 - cfg80211_process_rdev_events(rdev); 118 - cfg80211_process_wiphy_works(rdev, NULL); 119 - if (rdev->ops->suspend) 120 - ret = rdev_suspend(rdev, NULL); 121 - 122 - out_unlock_wiphy: 123 - wiphy_unlock(&rdev->wiphy); 119 + scoped_guard(wiphy, &rdev->wiphy) { 120 + cfg80211_process_rdev_events(rdev); 121 + cfg80211_process_wiphy_works(rdev, NULL); 122 + if (rdev->ops->suspend) 123 + ret = rdev_suspend(rdev, NULL); 124 + } 124 125 out_unlock_rtnl: 125 126 if (ret == 0) 126 127 rdev->suspended = true;
+105
net/wireless/trace.h
··· 2410 2410 WIPHY_PR_ARG, WDEV_PR_ARG, __entry->cookie) 2411 2411 ); 2412 2412 2413 + TRACE_EVENT(rdev_nan_set_local_sched, 2414 + TP_PROTO(struct wiphy *wiphy, struct wireless_dev *wdev, 2415 + struct cfg80211_nan_local_sched *sched), 2416 + TP_ARGS(wiphy, wdev, sched), 2417 + TP_STRUCT__entry( 2418 + WIPHY_ENTRY 2419 + WDEV_ENTRY 2420 + __array(u8, schedule, CFG80211_NAN_SCHED_NUM_TIME_SLOTS) 2421 + ), 2422 + TP_fast_assign( 2423 + WIPHY_ASSIGN; 2424 + WDEV_ASSIGN; 2425 + memcpy(__entry->schedule, sched->schedule, 2426 + CFG80211_NAN_SCHED_NUM_TIME_SLOTS); 2427 + ), 2428 + TP_printk(WIPHY_PR_FMT ", " WDEV_PR_FMT ", schedule: %s", 2429 + WIPHY_PR_ARG, WDEV_PR_ARG, 2430 + __print_array(__entry->schedule, 2431 + CFG80211_NAN_SCHED_NUM_TIME_SLOTS, 1)) 2432 + ); 2433 + 2434 + TRACE_EVENT(rdev_nan_set_peer_sched, 2435 + TP_PROTO(struct wiphy *wiphy, struct wireless_dev *wdev, 2436 + struct cfg80211_nan_peer_sched *sched), 2437 + TP_ARGS(wiphy, wdev, sched), 2438 + TP_STRUCT__entry( 2439 + WIPHY_ENTRY 2440 + WDEV_ENTRY 2441 + __array(u8, peer_addr, ETH_ALEN) 2442 + __field(u8, seq_id) 2443 + __field(u16, committed_dw) 2444 + __field(u16, max_chan_switch) 2445 + ), 2446 + TP_fast_assign( 2447 + WIPHY_ASSIGN; 2448 + WDEV_ASSIGN; 2449 + memcpy(__entry->peer_addr, sched->peer_addr, ETH_ALEN); 2450 + __entry->seq_id = sched->seq_id; 2451 + __entry->committed_dw = sched->committed_dw; 2452 + __entry->max_chan_switch = sched->max_chan_switch; 2453 + ), 2454 + TP_printk(WIPHY_PR_FMT ", " WDEV_PR_FMT 2455 + ", peer: %pM, seq_id: %u, committed_dw: 0x%x, max_chan_switch: %u", 2456 + WIPHY_PR_ARG, WDEV_PR_ARG, __entry->peer_addr, 2457 + __entry->seq_id, __entry->committed_dw, 2458 + __entry->max_chan_switch 2459 + ) 2460 + ); 2461 + 2413 2462 TRACE_EVENT(rdev_set_mac_acl, 2414 2463 TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, 2415 2464 struct cfg80211_acl_data *params), ··· 4324 4275 ), 4325 4276 TP_printk(WIPHY_PR_FMT ", " CHAN_DEF_PR_FMT ", signal_interference_bitmap=0x%x", 4326 4277 WIPHY_PR_ARG, CHAN_DEF_PR_ARG, __entry->signal_interference_bitmap) 4278 + ); 4279 + 4280 + TRACE_EVENT(cfg80211_nan_sched_update_done, 4281 + TP_PROTO(struct wiphy *wiphy, struct wireless_dev *wdev, bool success), 4282 + TP_ARGS(wiphy, wdev, success), 4283 + TP_STRUCT__entry( 4284 + WIPHY_ENTRY 4285 + WDEV_ENTRY 4286 + __field(bool, success) 4287 + ), 4288 + TP_fast_assign( 4289 + WIPHY_ASSIGN; 4290 + WDEV_ASSIGN; 4291 + __entry->success = success; 4292 + ), 4293 + TP_printk(WIPHY_PR_FMT ", " WDEV_PR_FMT " success=%d", 4294 + WIPHY_PR_ARG, WDEV_PR_ARG, __entry->success) 4295 + ); 4296 + 4297 + TRACE_EVENT(cfg80211_nan_ulw_update, 4298 + TP_PROTO(struct wiphy *wiphy, struct wireless_dev *wdev, 4299 + const u8 *ulw, size_t ulw_len), 4300 + TP_ARGS(wiphy, wdev, ulw, ulw_len), 4301 + TP_STRUCT__entry( 4302 + WIPHY_ENTRY 4303 + WDEV_ENTRY 4304 + __dynamic_array(u8, ulw, ulw_len) 4305 + ), 4306 + TP_fast_assign( 4307 + WIPHY_ASSIGN; 4308 + WDEV_ASSIGN; 4309 + if (ulw && ulw_len) 4310 + memcpy(__get_dynamic_array(ulw), ulw, ulw_len); 4311 + ), 4312 + TP_printk(WIPHY_PR_FMT ", " WDEV_PR_FMT " ulw: %s", 4313 + WIPHY_PR_ARG, WDEV_PR_ARG, 4314 + __print_array(__get_dynamic_array(ulw), 4315 + __get_dynamic_array_len(ulw), 1)) 4316 + ); 4317 + 4318 + TRACE_EVENT(cfg80211_nan_channel_evac, 4319 + TP_PROTO(struct wiphy *wiphy, struct wireless_dev *wdev, 4320 + const struct cfg80211_chan_def *chandef), 4321 + TP_ARGS(wiphy, wdev, chandef), 4322 + TP_STRUCT__entry( 4323 + WDEV_ENTRY 4324 + WIPHY_ENTRY 4325 + CHAN_DEF_ENTRY 4326 + ), 4327 + TP_fast_assign( 4328 + WDEV_ASSIGN; 4329 + WIPHY_ASSIGN; 4330 + CHAN_DEF_ASSIGN(chandef); 4331 + ), 4332 + TP_printk(WDEV_PR_FMT ", " WIPHY_PR_FMT ", " CHAN_DEF_PR_FMT, 4333 + WDEV_PR_ARG, WIPHY_PR_ARG, CHAN_DEF_PR_ARG) 4327 4334 ); 4328 4335 #endif /* !__RDEV_OPS_TRACE || TRACE_HEADER_MULTI_READ */ 4329 4336
+22 -6
net/wireless/util.c
··· 90 90 /* see 802.11ax D6.1 27.3.23.2 */ 91 91 if (chan == 2) 92 92 return MHZ_TO_KHZ(5935); 93 - if (chan <= 233) 93 + if (chan <= 253) 94 94 return MHZ_TO_KHZ(5950 + chan * 5); 95 95 break; 96 96 case NL80211_BAND_60GHZ: ··· 625 625 case cpu_to_le16(0): 626 626 if (iftype != NL80211_IFTYPE_ADHOC && 627 627 iftype != NL80211_IFTYPE_STATION && 628 - iftype != NL80211_IFTYPE_OCB) 629 - return -1; 628 + iftype != NL80211_IFTYPE_OCB && 629 + iftype != NL80211_IFTYPE_NAN_DATA) 630 + return -1; 630 631 break; 631 632 } 632 633 ··· 1145 1144 ev->ij.channel); 1146 1145 break; 1147 1146 case EVENT_STOPPED: 1148 - cfg80211_leave(wiphy_to_rdev(wdev->wiphy), wdev, 1149 - ev->link_id); 1147 + /* 1148 + * for NAN interfaces cfg80211_leave must be called but 1149 + * locking here doesn't allow this. 1150 + */ 1151 + if (WARN_ON(wdev->iftype == NL80211_IFTYPE_NAN)) 1152 + break; 1153 + 1154 + cfg80211_leave_locked(wiphy_to_rdev(wdev->wiphy), wdev, 1155 + ev->link_id); 1150 1156 break; 1151 1157 case EVENT_PORT_AUTHORIZED: 1152 1158 __cfg80211_port_authorized(wdev, ev->pa.peer_addr, ··· 1192 1184 if (otype == NL80211_IFTYPE_AP_VLAN) 1193 1185 return -EOPNOTSUPP; 1194 1186 1187 + /* 1188 + * for NAN interfaces cfg80211_leave must be called for leaving, 1189 + * but locking here doesn't allow this. 1190 + */ 1191 + if (otype == NL80211_IFTYPE_NAN) 1192 + return -EOPNOTSUPP; 1193 + 1195 1194 /* cannot change into P2P device or NAN */ 1196 1195 if (ntype == NL80211_IFTYPE_P2P_DEVICE || 1197 1196 ntype == NL80211_IFTYPE_NAN) ··· 1219 1204 dev->ieee80211_ptr->use_4addr = false; 1220 1205 rdev_set_qos_map(rdev, dev, NULL); 1221 1206 1222 - cfg80211_leave(rdev, dev->ieee80211_ptr, -1); 1207 + cfg80211_leave_locked(rdev, dev->ieee80211_ptr, -1); 1223 1208 1224 1209 cfg80211_process_rdev_events(rdev); 1225 1210 cfg80211_mlme_purge_registrations(dev->ieee80211_ptr); ··· 1247 1232 case NL80211_IFTYPE_OCB: 1248 1233 case NL80211_IFTYPE_P2P_CLIENT: 1249 1234 case NL80211_IFTYPE_ADHOC: 1235 + case NL80211_IFTYPE_NAN_DATA: 1250 1236 dev->priv_flags |= IFF_DONT_BRIDGE; 1251 1237 break; 1252 1238 case NL80211_IFTYPE_P2P_GO: