Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

Pull networking fixes from David Miller:
"One last pull request before heading to Vancouver for LPC, here we have:

1) Don't forget to free VSI contexts during ice driver unload, from
Victor Raj.

2) Don't forget napi delete calls during device remove in ice driver,
from Dave Ertman.

3) Don't request VLAN tag insertion of ibmvnic device when SKB
doesn't have VLAN tags at all.

4) IPV4 frag handling code has to accomodate the situation where two
threads try to insert the same fragment into the hash table at the
same time. From Eric Dumazet.

5) Relatedly, don't flow separate on protocol ports for fragmented
frames, also from Eric Dumazet.

6) Memory leaks in qed driver, from Denis Bolotin.

7) Correct valid MTU range in smsc95xx driver, from Stefan Wahren.

8) Validate cls_flower nested policies properly, from Jakub Kicinski.

9) Clearing of stats counters in mc88e6xxx driver doesn't retain
important bits in the G1_STATS_OP register causing the chip to
hang. Fix from Andrew Lunn"

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (41 commits)
act_mirred: clear skb->tstamp on redirect
net: dsa: mv88e6xxx: Fix clearing of stats counters
tipc: fix link re-establish failure
net: sched: cls_flower: validate nested enc_opts_policy to avoid warning
net: mvneta: correct typo
flow_dissector: do not dissect l4 ports for fragments
net: qualcomm: rmnet: Fix incorrect assignment of real_dev
net: aquantia: allow rx checksum offload configuration
net: aquantia: invalid checksumm offload implementation
net: aquantia: fixed enable unicast on 32 macvlan
net: aquantia: fix potential IOMMU fault after driver unbind
net: aquantia: synchronized flow control between mac/phy
net: smsc95xx: Fix MTU range
net: stmmac: Fix RX packet size > 8191
qed: Fix potential memory corruption
qed: Fix SPQ entries not returned to pool in error flows
qed: Fix blocking/unlimited SPQ entries leak
qed: Fix memory/entry leak in qed_init_sp_request()
inet: frags: better deal with smp races
net: hns3: bugfix for not checking return value
...

+430 -199
+2
drivers/net/dsa/mv88e6xxx/global1.c
··· 567 567 if (err) 568 568 return err; 569 569 570 + /* Keep the histogram mode bits */ 571 + val &= MV88E6XXX_G1_STATS_OP_HIST_RX_TX; 570 572 val |= MV88E6XXX_G1_STATS_OP_BUSY | MV88E6XXX_G1_STATS_OP_FLUSH_ALL; 571 573 572 574 err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_STATS_OP, val);
+4 -4
drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
··· 407 407 struct ethtool_pauseparam *pause) 408 408 { 409 409 struct aq_nic_s *aq_nic = netdev_priv(ndev); 410 + u32 fc = aq_nic->aq_nic_cfg.flow_control; 410 411 411 412 pause->autoneg = 0; 412 413 413 - if (aq_nic->aq_hw->aq_nic_cfg->flow_control & AQ_NIC_FC_RX) 414 - pause->rx_pause = 1; 415 - if (aq_nic->aq_hw->aq_nic_cfg->flow_control & AQ_NIC_FC_TX) 416 - pause->tx_pause = 1; 414 + pause->rx_pause = !!(fc & AQ_NIC_FC_RX); 415 + pause->tx_pause = !!(fc & AQ_NIC_FC_TX); 416 + 417 417 } 418 418 419 419 static int aq_ethtool_set_pauseparam(struct net_device *ndev,
+6
drivers/net/ethernet/aquantia/atlantic/aq_hw.h
··· 204 204 205 205 int (*hw_get_fw_version)(struct aq_hw_s *self, u32 *fw_version); 206 206 207 + int (*hw_set_offload)(struct aq_hw_s *self, 208 + struct aq_nic_cfg_s *aq_nic_cfg); 209 + 210 + int (*hw_set_fc)(struct aq_hw_s *self, u32 fc, u32 tc); 207 211 }; 208 212 209 213 struct aq_fw_ops { ··· 229 225 int (*update_link_status)(struct aq_hw_s *self); 230 226 231 227 int (*update_stats)(struct aq_hw_s *self); 228 + 229 + u32 (*get_flow_control)(struct aq_hw_s *self, u32 *fcmode); 232 230 233 231 int (*set_flow_control)(struct aq_hw_s *self); 234 232
+8 -2
drivers/net/ethernet/aquantia/atlantic/aq_main.c
··· 99 99 struct aq_nic_s *aq_nic = netdev_priv(ndev); 100 100 struct aq_nic_cfg_s *aq_cfg = aq_nic_get_cfg(aq_nic); 101 101 bool is_lro = false; 102 + int err = 0; 102 103 103 - if (aq_cfg->hw_features & NETIF_F_LRO) { 104 + aq_cfg->features = features; 105 + 106 + if (aq_cfg->aq_hw_caps->hw_features & NETIF_F_LRO) { 104 107 is_lro = features & NETIF_F_LRO; 105 108 106 109 if (aq_cfg->is_lro != is_lro) { ··· 115 112 } 116 113 } 117 114 } 115 + if ((aq_nic->ndev->features ^ features) & NETIF_F_RXCSUM) 116 + err = aq_nic->aq_hw_ops->hw_set_offload(aq_nic->aq_hw, 117 + aq_cfg); 118 118 119 - return 0; 119 + return err; 120 120 } 121 121 122 122 static int aq_ndev_set_mac_address(struct net_device *ndev, void *addr)
+15 -3
drivers/net/ethernet/aquantia/atlantic/aq_nic.c
··· 118 118 } 119 119 120 120 cfg->link_speed_msk &= cfg->aq_hw_caps->link_speed_msk; 121 - cfg->hw_features = cfg->aq_hw_caps->hw_features; 121 + cfg->features = cfg->aq_hw_caps->hw_features; 122 122 } 123 123 124 124 static int aq_nic_update_link_status(struct aq_nic_s *self) 125 125 { 126 126 int err = self->aq_fw_ops->update_link_status(self->aq_hw); 127 + u32 fc = 0; 127 128 128 129 if (err) 129 130 return err; ··· 134 133 AQ_CFG_DRV_NAME, self->link_status.mbps, 135 134 self->aq_hw->aq_link_status.mbps); 136 135 aq_nic_update_interrupt_moderation_settings(self); 136 + 137 + /* Driver has to update flow control settings on RX block 138 + * on any link event. 139 + * We should query FW whether it negotiated FC. 140 + */ 141 + if (self->aq_fw_ops->get_flow_control) 142 + self->aq_fw_ops->get_flow_control(self->aq_hw, &fc); 143 + if (self->aq_hw_ops->hw_set_fc) 144 + self->aq_hw_ops->hw_set_fc(self->aq_hw, fc, 0); 137 145 } 138 146 139 147 self->link_status = self->aq_hw->aq_link_status; ··· 600 590 } 601 591 } 602 592 603 - if (i > 0 && i < AQ_HW_MULTICAST_ADDRESS_MAX) { 593 + if (i > 0 && i <= AQ_HW_MULTICAST_ADDRESS_MAX) { 604 594 packet_filter |= IFF_MULTICAST; 605 595 self->mc_list.count = i; 606 596 self->aq_hw_ops->hw_multicast_list_set(self->aq_hw, ··· 782 772 ethtool_link_ksettings_add_link_mode(cmd, advertising, 783 773 Pause); 784 774 785 - if (self->aq_nic_cfg.flow_control & AQ_NIC_FC_TX) 775 + /* Asym is when either RX or TX, but not both */ 776 + if (!!(self->aq_nic_cfg.flow_control & AQ_NIC_FC_TX) ^ 777 + !!(self->aq_nic_cfg.flow_control & AQ_NIC_FC_RX)) 786 778 ethtool_link_ksettings_add_link_mode(cmd, advertising, 787 779 Asym_Pause); 788 780
+1 -1
drivers/net/ethernet/aquantia/atlantic/aq_nic.h
··· 23 23 24 24 struct aq_nic_cfg_s { 25 25 const struct aq_hw_caps_s *aq_hw_caps; 26 - u64 hw_features; 26 + u64 features; 27 27 u32 rxds; /* rx ring size, descriptors # */ 28 28 u32 txds; /* tx ring size, descriptors # */ 29 29 u32 vecs; /* vecs==allocated irqs */
+23 -12
drivers/net/ethernet/aquantia/atlantic/aq_ring.c
··· 172 172 return !!budget; 173 173 } 174 174 175 + static void aq_rx_checksum(struct aq_ring_s *self, 176 + struct aq_ring_buff_s *buff, 177 + struct sk_buff *skb) 178 + { 179 + if (!(self->aq_nic->ndev->features & NETIF_F_RXCSUM)) 180 + return; 181 + 182 + if (unlikely(buff->is_cso_err)) { 183 + ++self->stats.rx.errors; 184 + skb->ip_summed = CHECKSUM_NONE; 185 + return; 186 + } 187 + if (buff->is_ip_cso) { 188 + __skb_incr_checksum_unnecessary(skb); 189 + if (buff->is_udp_cso || buff->is_tcp_cso) 190 + __skb_incr_checksum_unnecessary(skb); 191 + } else { 192 + skb->ip_summed = CHECKSUM_NONE; 193 + } 194 + } 195 + 175 196 #define AQ_SKB_ALIGN SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) 176 197 int aq_ring_rx_clean(struct aq_ring_s *self, 177 198 struct napi_struct *napi, ··· 288 267 } 289 268 290 269 skb->protocol = eth_type_trans(skb, ndev); 291 - if (unlikely(buff->is_cso_err)) { 292 - ++self->stats.rx.errors; 293 - skb->ip_summed = CHECKSUM_NONE; 294 - } else { 295 - if (buff->is_ip_cso) { 296 - __skb_incr_checksum_unnecessary(skb); 297 - if (buff->is_udp_cso || buff->is_tcp_cso) 298 - __skb_incr_checksum_unnecessary(skb); 299 - } else { 300 - skb->ip_summed = CHECKSUM_NONE; 301 - } 302 - } 270 + 271 + aq_rx_checksum(self, buff, skb); 303 272 304 273 skb_set_hash(skb, buff->rss_hash, 305 274 buff->is_hash_l4 ? PKT_HASH_TYPE_L4 :
+38 -23
drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
··· 100 100 return err; 101 101 } 102 102 103 + static int hw_atl_b0_set_fc(struct aq_hw_s *self, u32 fc, u32 tc) 104 + { 105 + hw_atl_rpb_rx_xoff_en_per_tc_set(self, !!(fc & AQ_NIC_FC_RX), tc); 106 + return 0; 107 + } 108 + 103 109 static int hw_atl_b0_hw_qos_set(struct aq_hw_s *self) 104 110 { 105 111 u32 tc = 0U; 106 112 u32 buff_size = 0U; 107 113 unsigned int i_priority = 0U; 108 - bool is_rx_flow_control = false; 109 114 110 115 /* TPS Descriptor rate init */ 111 116 hw_atl_tps_tx_pkt_shed_desc_rate_curr_time_res_set(self, 0x0U); ··· 143 138 144 139 /* QoS Rx buf size per TC */ 145 140 tc = 0; 146 - is_rx_flow_control = (AQ_NIC_FC_RX & self->aq_nic_cfg->flow_control); 147 141 buff_size = HW_ATL_B0_RXBUF_MAX; 148 142 149 143 hw_atl_rpb_rx_pkt_buff_size_per_tc_set(self, buff_size, tc); ··· 154 150 (buff_size * 155 151 (1024U / 32U) * 50U) / 156 152 100U, tc); 157 - hw_atl_rpb_rx_xoff_en_per_tc_set(self, is_rx_flow_control ? 1U : 0U, tc); 153 + 154 + hw_atl_b0_set_fc(self, self->aq_nic_cfg->flow_control, tc); 158 155 159 156 /* QoS 802.1p priority -> TC mapping */ 160 157 for (i_priority = 8U; i_priority--;) ··· 234 229 hw_atl_tpo_tcp_udp_crc_offload_en_set(self, 1); 235 230 236 231 /* RX checksums offloads*/ 237 - hw_atl_rpo_ipv4header_crc_offload_en_set(self, 1); 238 - hw_atl_rpo_tcp_udp_crc_offload_en_set(self, 1); 232 + hw_atl_rpo_ipv4header_crc_offload_en_set(self, !!(aq_nic_cfg->features & 233 + NETIF_F_RXCSUM)); 234 + hw_atl_rpo_tcp_udp_crc_offload_en_set(self, !!(aq_nic_cfg->features & 235 + NETIF_F_RXCSUM)); 239 236 240 237 /* LSO offloads*/ 241 238 hw_atl_tdm_large_send_offload_en_set(self, 0xFFFFFFFFU); ··· 662 655 struct hw_atl_rxd_wb_s *rxd_wb = (struct hw_atl_rxd_wb_s *) 663 656 &ring->dx_ring[ring->hw_head * HW_ATL_B0_RXD_SIZE]; 664 657 665 - unsigned int is_err = 1U; 666 658 unsigned int is_rx_check_sum_enabled = 0U; 667 659 unsigned int pkt_type = 0U; 660 + u8 rx_stat = 0U; 668 661 669 662 if (!(rxd_wb->status & 0x1U)) { /* RxD is not done */ 670 663 break; ··· 672 665 673 666 buff = &ring->buff_ring[ring->hw_head]; 674 667 675 - is_err = (0x0000003CU & rxd_wb->status); 668 + rx_stat = (0x0000003CU & rxd_wb->status) >> 2; 676 669 677 670 is_rx_check_sum_enabled = (rxd_wb->type) & (0x3U << 19); 678 - is_err &= ~0x20U; /* exclude validity bit */ 679 671 680 672 pkt_type = 0xFFU & (rxd_wb->type >> 4); 681 673 682 - if (is_rx_check_sum_enabled) { 683 - if (0x0U == (pkt_type & 0x3U)) 684 - buff->is_ip_cso = (is_err & 0x08U) ? 0U : 1U; 674 + if (is_rx_check_sum_enabled & BIT(0) && 675 + (0x0U == (pkt_type & 0x3U))) 676 + buff->is_ip_cso = (rx_stat & BIT(1)) ? 0U : 1U; 685 677 678 + if (is_rx_check_sum_enabled & BIT(1)) { 686 679 if (0x4U == (pkt_type & 0x1CU)) 687 - buff->is_udp_cso = buff->is_cso_err ? 0U : 1U; 680 + buff->is_udp_cso = (rx_stat & BIT(2)) ? 0U : 681 + !!(rx_stat & BIT(3)); 688 682 else if (0x0U == (pkt_type & 0x1CU)) 689 - buff->is_tcp_cso = buff->is_cso_err ? 0U : 1U; 690 - 691 - /* Checksum offload workaround for small packets */ 692 - if (rxd_wb->pkt_len <= 60) { 693 - buff->is_ip_cso = 0U; 694 - buff->is_cso_err = 0U; 695 - } 683 + buff->is_tcp_cso = (rx_stat & BIT(2)) ? 0U : 684 + !!(rx_stat & BIT(3)); 696 685 } 697 - 698 - is_err &= ~0x18U; 686 + buff->is_cso_err = !!(rx_stat & 0x6); 687 + /* Checksum offload workaround for small packets */ 688 + if (unlikely(rxd_wb->pkt_len <= 60)) { 689 + buff->is_ip_cso = 0U; 690 + buff->is_cso_err = 0U; 691 + } 699 692 700 693 dma_unmap_page(ndev, buff->pa, buff->len, DMA_FROM_DEVICE); 701 694 702 - if (is_err || rxd_wb->type & 0x1000U) { 703 - /* status error or DMA error */ 695 + if ((rx_stat & BIT(0)) || rxd_wb->type & 0x1000U) { 696 + /* MAC error or DMA error */ 704 697 buff->is_error = 1U; 705 698 } else { 706 699 if (self->aq_nic_cfg->is_rss) { ··· 922 915 static int hw_atl_b0_hw_stop(struct aq_hw_s *self) 923 916 { 924 917 hw_atl_b0_hw_irq_disable(self, HW_ATL_B0_INT_MASK); 918 + 919 + /* Invalidate Descriptor Cache to prevent writing to the cached 920 + * descriptors and to the data pointer of those descriptors 921 + */ 922 + hw_atl_rdm_rx_dma_desc_cache_init_set(self, 1); 923 + 925 924 return aq_hw_err_from_flags(self); 926 925 } 927 926 ··· 976 963 .hw_get_regs = hw_atl_utils_hw_get_regs, 977 964 .hw_get_hw_stats = hw_atl_utils_get_hw_stats, 978 965 .hw_get_fw_version = hw_atl_utils_get_fw_version, 966 + .hw_set_offload = hw_atl_b0_hw_offload_set, 967 + .hw_set_fc = hw_atl_b0_set_fc, 979 968 };
+8
drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c
··· 619 619 HW_ATL_RPB_RX_FC_MODE_SHIFT, rx_flow_ctl_mode); 620 620 } 621 621 622 + void hw_atl_rdm_rx_dma_desc_cache_init_set(struct aq_hw_s *aq_hw, u32 init) 623 + { 624 + aq_hw_write_reg_bit(aq_hw, HW_ATL_RDM_RX_DMA_DESC_CACHE_INIT_ADR, 625 + HW_ATL_RDM_RX_DMA_DESC_CACHE_INIT_MSK, 626 + HW_ATL_RDM_RX_DMA_DESC_CACHE_INIT_SHIFT, 627 + init); 628 + } 629 + 622 630 void hw_atl_rpb_rx_pkt_buff_size_per_tc_set(struct aq_hw_s *aq_hw, 623 631 u32 rx_pkt_buff_size_per_tc, u32 buffer) 624 632 {
+3
drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h
··· 325 325 u32 rx_pkt_buff_size_per_tc, 326 326 u32 buffer); 327 327 328 + /* set rdm rx dma descriptor cache init */ 329 + void hw_atl_rdm_rx_dma_desc_cache_init_set(struct aq_hw_s *aq_hw, u32 init); 330 + 328 331 /* set rx xoff enable (per tc) */ 329 332 void hw_atl_rpb_rx_xoff_en_per_tc_set(struct aq_hw_s *aq_hw, u32 rx_xoff_en_per_tc, 330 333 u32 buffer);
+18
drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h
··· 293 293 /* default value of bitfield desc{d}_reset */ 294 294 #define HW_ATL_RDM_DESCDRESET_DEFAULT 0x0 295 295 296 + /* rdm_desc_init_i bitfield definitions 297 + * preprocessor definitions for the bitfield rdm_desc_init_i. 298 + * port="pif_rdm_desc_init_i" 299 + */ 300 + 301 + /* register address for bitfield rdm_desc_init_i */ 302 + #define HW_ATL_RDM_RX_DMA_DESC_CACHE_INIT_ADR 0x00005a00 303 + /* bitmask for bitfield rdm_desc_init_i */ 304 + #define HW_ATL_RDM_RX_DMA_DESC_CACHE_INIT_MSK 0xffffffff 305 + /* inverted bitmask for bitfield rdm_desc_init_i */ 306 + #define HW_ATL_RDM_RX_DMA_DESC_CACHE_INIT_MSKN 0x00000000 307 + /* lower bit position of bitfield rdm_desc_init_i */ 308 + #define HW_ATL_RDM_RX_DMA_DESC_CACHE_INIT_SHIFT 0 309 + /* width of bitfield rdm_desc_init_i */ 310 + #define HW_ATL_RDM_RX_DMA_DESC_CACHE_INIT_WIDTH 32 311 + /* default value of bitfield rdm_desc_init_i */ 312 + #define HW_ATL_RDM_RX_DMA_DESC_CACHE_INIT_DEFAULT 0x0 313 + 296 314 /* rx int_desc_wrb_en bitfield definitions 297 315 * preprocessor definitions for the bitfield "int_desc_wrb_en". 298 316 * port="pif_rdm_int_desc_wrb_en_i"
+21
drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c
··· 30 30 #define HW_ATL_FW2X_MPI_STATE_ADDR 0x370 31 31 #define HW_ATL_FW2X_MPI_STATE2_ADDR 0x374 32 32 33 + #define HW_ATL_FW2X_CAP_PAUSE BIT(CAPS_HI_PAUSE) 34 + #define HW_ATL_FW2X_CAP_ASYM_PAUSE BIT(CAPS_HI_ASYMMETRIC_PAUSE) 33 35 #define HW_ATL_FW2X_CAP_SLEEP_PROXY BIT(CAPS_HI_SLEEP_PROXY) 34 36 #define HW_ATL_FW2X_CAP_WOL BIT(CAPS_HI_WOL) 35 37 ··· 453 451 return 0; 454 452 } 455 453 454 + static u32 aq_fw2x_get_flow_control(struct aq_hw_s *self, u32 *fcmode) 455 + { 456 + u32 mpi_state = aq_hw_read_reg(self, HW_ATL_FW2X_MPI_STATE2_ADDR); 457 + 458 + if (mpi_state & HW_ATL_FW2X_CAP_PAUSE) 459 + if (mpi_state & HW_ATL_FW2X_CAP_ASYM_PAUSE) 460 + *fcmode = AQ_NIC_FC_RX; 461 + else 462 + *fcmode = AQ_NIC_FC_RX | AQ_NIC_FC_TX; 463 + else 464 + if (mpi_state & HW_ATL_FW2X_CAP_ASYM_PAUSE) 465 + *fcmode = AQ_NIC_FC_TX; 466 + else 467 + *fcmode = 0; 468 + 469 + return 0; 470 + } 471 + 456 472 const struct aq_fw_ops aq_fw_2x_ops = { 457 473 .init = aq_fw2x_init, 458 474 .deinit = aq_fw2x_deinit, ··· 485 465 .set_eee_rate = aq_fw2x_set_eee_rate, 486 466 .get_eee_rate = aq_fw2x_get_eee_rate, 487 467 .set_flow_control = aq_fw2x_set_flow_control, 468 + .get_flow_control = aq_fw2x_get_flow_control 488 469 };
+2 -1
drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
··· 3760 3760 /* Hardware table is only clear when pf resets */ 3761 3761 if (!(handle->flags & HNAE3_SUPPORT_VF)) { 3762 3762 ret = hns3_restore_vlan(netdev); 3763 - return ret; 3763 + if (ret) 3764 + return ret; 3764 3765 } 3765 3766 3766 3767 ret = hns3_restore_fd_rules(netdev);
+1 -1
drivers/net/ethernet/ibm/ibmvnic.c
··· 1545 1545 tx_crq.v1.sge_len = cpu_to_be32(skb->len); 1546 1546 tx_crq.v1.ioba = cpu_to_be64(data_dma_addr); 1547 1547 1548 - if (adapter->vlan_header_insertion) { 1548 + if (adapter->vlan_header_insertion && skb_vlan_tag_present(skb)) { 1549 1549 tx_crq.v1.flags2 |= IBMVNIC_TX_VLAN_INSERT; 1550 1550 tx_crq.v1.vlan_id = cpu_to_be16(skb->vlan_tci); 1551 1551 }
+5 -3
drivers/net/ethernet/intel/i40e/i40e_main.c
··· 12249 12249 NETIF_F_GSO_GRE | 12250 12250 NETIF_F_GSO_GRE_CSUM | 12251 12251 NETIF_F_GSO_PARTIAL | 12252 + NETIF_F_GSO_IPXIP4 | 12253 + NETIF_F_GSO_IPXIP6 | 12252 12254 NETIF_F_GSO_UDP_TUNNEL | 12253 12255 NETIF_F_GSO_UDP_TUNNEL_CSUM | 12254 12256 NETIF_F_SCTP_CRC | ··· 12268 12266 /* record features VLANs can make use of */ 12269 12267 netdev->vlan_features |= hw_enc_features | NETIF_F_TSO_MANGLEID; 12270 12268 12271 - if (!(pf->flags & I40E_FLAG_MFP_ENABLED)) 12272 - netdev->hw_features |= NETIF_F_NTUPLE | NETIF_F_HW_TC; 12273 - 12274 12269 hw_features = hw_enc_features | 12275 12270 NETIF_F_HW_VLAN_CTAG_TX | 12276 12271 NETIF_F_HW_VLAN_CTAG_RX; 12272 + 12273 + if (!(pf->flags & I40E_FLAG_MFP_ENABLED)) 12274 + hw_features |= NETIF_F_NTUPLE | NETIF_F_HW_TC; 12277 12275 12278 12276 netdev->hw_features |= hw_features; 12279 12277
+3 -1
drivers/net/ethernet/intel/ice/ice.h
··· 76 76 #define ICE_MIN_INTR_PER_VF (ICE_MIN_QS_PER_VF + 1) 77 77 #define ICE_DFLT_INTR_PER_VF (ICE_DFLT_QS_PER_VF + 1) 78 78 79 + #define ICE_MAX_RESET_WAIT 20 80 + 79 81 #define ICE_VSIQF_HKEY_ARRAY_SIZE ((VSIQF_HKEY_MAX_INDEX + 1) * 4) 80 82 81 83 #define ICE_DFLT_NETIF_M (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK) ··· 191 189 u64 tx_linearize; 192 190 DECLARE_BITMAP(state, __ICE_STATE_NBITS); 193 191 DECLARE_BITMAP(flags, ICE_VSI_FLAG_NBITS); 194 - unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; 195 192 unsigned int current_netdev_flags; 196 193 u32 tx_restart; 197 194 u32 tx_busy; ··· 370 369 int ice_get_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size); 371 370 void ice_fill_rss_lut(u8 *lut, u16 rss_table_size, u16 rss_size); 372 371 void ice_print_link_msg(struct ice_vsi *vsi, bool isup); 372 + void ice_napi_del(struct ice_vsi *vsi); 373 373 374 374 #endif /* _ICE_H_ */
+3
drivers/net/ethernet/intel/ice/ice_common.c
··· 811 811 /* Attempt to disable FW logging before shutting down control queues */ 812 812 ice_cfg_fw_log(hw, false); 813 813 ice_shutdown_all_ctrlq(hw); 814 + 815 + /* Clear VSI contexts if not already cleared */ 816 + ice_clear_all_vsi_ctx(hw); 814 817 } 815 818 816 819 /**
+6 -1
drivers/net/ethernet/intel/ice/ice_ethtool.c
··· 1517 1517 } 1518 1518 1519 1519 if (!test_bit(__ICE_DOWN, pf->state)) { 1520 - /* Give it a little more time to try to come back */ 1520 + /* Give it a little more time to try to come back. If still 1521 + * down, restart autoneg link or reinitialize the interface. 1522 + */ 1521 1523 msleep(75); 1522 1524 if (!test_bit(__ICE_DOWN, pf->state)) 1523 1525 return ice_nway_reset(netdev); 1526 + 1527 + ice_down(vsi); 1528 + ice_up(vsi); 1524 1529 } 1525 1530 1526 1531 return err;
+2
drivers/net/ethernet/intel/ice/ice_hw_autogen.h
··· 242 242 #define GLNVM_ULD 0x000B6008 243 243 #define GLNVM_ULD_CORER_DONE_M BIT(3) 244 244 #define GLNVM_ULD_GLOBR_DONE_M BIT(4) 245 + #define GLPCI_CNF2 0x000BE004 246 + #define GLPCI_CNF2_CACHELINE_SIZE_M BIT(1) 245 247 #define PF_FUNC_RID 0x0009E880 246 248 #define PF_FUNC_RID_FUNC_NUM_S 0 247 249 #define PF_FUNC_RID_FUNC_NUM_M ICE_M(0x7, 0)
+2 -1
drivers/net/ethernet/intel/ice/ice_lib.c
··· 1997 1997 status = ice_update_vsi(&vsi->back->hw, vsi->idx, ctxt, NULL); 1998 1998 if (status) { 1999 1999 netdev_err(vsi->netdev, "%sabling VLAN pruning on VSI handle: %d, VSI HW ID: %d failed, err = %d, aq_err = %d\n", 2000 - ena ? "Ena" : "Dis", vsi->idx, vsi->vsi_num, status, 2000 + ena ? "En" : "Dis", vsi->idx, vsi->vsi_num, status, 2001 2001 vsi->back->hw.adminq.sq_last_status); 2002 2002 goto err_out; 2003 2003 } ··· 2458 2458 * on this wq 2459 2459 */ 2460 2460 if (vsi->netdev && !ice_is_reset_in_progress(pf->state)) { 2461 + ice_napi_del(vsi); 2461 2462 unregister_netdev(vsi->netdev); 2462 2463 free_netdev(vsi->netdev); 2463 2464 vsi->netdev = NULL;
+48 -38
drivers/net/ethernet/intel/ice/ice_main.c
··· 1465 1465 * ice_napi_del - Remove NAPI handler for the VSI 1466 1466 * @vsi: VSI for which NAPI handler is to be removed 1467 1467 */ 1468 - static void ice_napi_del(struct ice_vsi *vsi) 1468 + void ice_napi_del(struct ice_vsi *vsi) 1469 1469 { 1470 1470 int v_idx; 1471 1471 ··· 1622 1622 { 1623 1623 struct ice_netdev_priv *np = netdev_priv(netdev); 1624 1624 struct ice_vsi *vsi = np->vsi; 1625 - int ret; 1626 1625 1627 1626 if (vid >= VLAN_N_VID) { 1628 1627 netdev_err(netdev, "VLAN id requested %d is out of range %d\n", ··· 1634 1635 1635 1636 /* Enable VLAN pruning when VLAN 0 is added */ 1636 1637 if (unlikely(!vid)) { 1637 - ret = ice_cfg_vlan_pruning(vsi, true); 1638 + int ret = ice_cfg_vlan_pruning(vsi, true); 1639 + 1638 1640 if (ret) 1639 1641 return ret; 1640 1642 } ··· 1644 1644 * needed to continue allowing all untagged packets since VLAN prune 1645 1645 * list is applied to all packets by the switch 1646 1646 */ 1647 - ret = ice_vsi_add_vlan(vsi, vid); 1648 - 1649 - if (!ret) 1650 - set_bit(vid, vsi->active_vlans); 1651 - 1652 - return ret; 1647 + return ice_vsi_add_vlan(vsi, vid); 1653 1648 } 1654 1649 1655 1650 /** ··· 1671 1676 status = ice_vsi_kill_vlan(vsi, vid); 1672 1677 if (status) 1673 1678 return status; 1674 - 1675 - clear_bit(vid, vsi->active_vlans); 1676 1679 1677 1680 /* Disable VLAN pruning when VLAN 0 is removed */ 1678 1681 if (unlikely(!vid)) ··· 1995 2002 } 1996 2003 1997 2004 /** 2005 + * ice_verify_cacheline_size - verify driver's assumption of 64 Byte cache lines 2006 + * @pf: pointer to the PF structure 2007 + * 2008 + * There is no error returned here because the driver should be able to handle 2009 + * 128 Byte cache lines, so we only print a warning in case issues are seen, 2010 + * specifically with Tx. 2011 + */ 2012 + static void ice_verify_cacheline_size(struct ice_pf *pf) 2013 + { 2014 + if (rd32(&pf->hw, GLPCI_CNF2) & GLPCI_CNF2_CACHELINE_SIZE_M) 2015 + dev_warn(&pf->pdev->dev, 2016 + "%d Byte cache line assumption is invalid, driver may have Tx timeouts!\n", 2017 + ICE_CACHE_LINE_BYTES); 2018 + } 2019 + 2020 + /** 1998 2021 * ice_probe - Device initialization routine 1999 2022 * @pdev: PCI device information struct 2000 2023 * @ent: entry in ice_pci_tbl ··· 2160 2151 /* since everything is good, start the service timer */ 2161 2152 mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period)); 2162 2153 2154 + ice_verify_cacheline_size(pf); 2155 + 2163 2156 return 0; 2164 2157 2165 2158 err_alloc_sw_unroll: ··· 2192 2181 2193 2182 if (!pf) 2194 2183 return; 2184 + 2185 + for (i = 0; i < ICE_MAX_RESET_WAIT; i++) { 2186 + if (!ice_is_reset_in_progress(pf->state)) 2187 + break; 2188 + msleep(100); 2189 + } 2195 2190 2196 2191 set_bit(__ICE_DOWN, pf->state); 2197 2192 ice_service_task_stop(pf); ··· 2527 2510 } 2528 2511 2529 2512 /** 2530 - * ice_restore_vlan - Reinstate VLANs when vsi/netdev comes back up 2531 - * @vsi: the VSI being brought back up 2532 - */ 2533 - static int ice_restore_vlan(struct ice_vsi *vsi) 2534 - { 2535 - int err; 2536 - u16 vid; 2537 - 2538 - if (!vsi->netdev) 2539 - return -EINVAL; 2540 - 2541 - err = ice_vsi_vlan_setup(vsi); 2542 - if (err) 2543 - return err; 2544 - 2545 - for_each_set_bit(vid, vsi->active_vlans, VLAN_N_VID) { 2546 - err = ice_vlan_rx_add_vid(vsi->netdev, htons(ETH_P_8021Q), vid); 2547 - if (err) 2548 - break; 2549 - } 2550 - 2551 - return err; 2552 - } 2553 - 2554 - /** 2555 2513 * ice_vsi_cfg - Setup the VSI 2556 2514 * @vsi: the VSI being configured 2557 2515 * ··· 2538 2546 2539 2547 if (vsi->netdev) { 2540 2548 ice_set_rx_mode(vsi->netdev); 2541 - err = ice_restore_vlan(vsi); 2549 + 2550 + err = ice_vsi_vlan_setup(vsi); 2551 + 2542 2552 if (err) 2543 2553 return err; 2544 2554 } ··· 3290 3296 struct device *dev = &pf->pdev->dev; 3291 3297 struct ice_hw *hw = &pf->hw; 3292 3298 enum ice_status ret; 3293 - int err; 3299 + int err, i; 3294 3300 3295 3301 if (test_bit(__ICE_DOWN, pf->state)) 3296 3302 goto clear_recovery; ··· 3364 3370 } 3365 3371 3366 3372 ice_reset_all_vfs(pf, true); 3373 + 3374 + for (i = 0; i < pf->num_alloc_vsi; i++) { 3375 + bool link_up; 3376 + 3377 + if (!pf->vsi[i] || pf->vsi[i]->type != ICE_VSI_PF) 3378 + continue; 3379 + ice_get_link_status(pf->vsi[i]->port_info, &link_up); 3380 + if (link_up) { 3381 + netif_carrier_on(pf->vsi[i]->netdev); 3382 + netif_tx_wake_all_queues(pf->vsi[i]->netdev); 3383 + } else { 3384 + netif_carrier_off(pf->vsi[i]->netdev); 3385 + netif_tx_stop_all_queues(pf->vsi[i]->netdev); 3386 + } 3387 + } 3388 + 3367 3389 /* if we get here, reset flow is successful */ 3368 3390 clear_bit(__ICE_RESET_FAILED, pf->state); 3369 3391 return;
+12
drivers/net/ethernet/intel/ice/ice_switch.c
··· 348 348 } 349 349 350 350 /** 351 + * ice_clear_all_vsi_ctx - clear all the VSI context entries 352 + * @hw: pointer to the hw struct 353 + */ 354 + void ice_clear_all_vsi_ctx(struct ice_hw *hw) 355 + { 356 + u16 i; 357 + 358 + for (i = 0; i < ICE_MAX_VSI; i++) 359 + ice_clear_vsi_ctx(hw, i); 360 + } 361 + 362 + /** 351 363 * ice_add_vsi - add VSI context to the hardware and VSI handle list 352 364 * @hw: pointer to the hw struct 353 365 * @vsi_handle: unique VSI handle provided by drivers
+2
drivers/net/ethernet/intel/ice/ice_switch.h
··· 190 190 struct ice_sq_cd *cd); 191 191 bool ice_is_vsi_valid(struct ice_hw *hw, u16 vsi_handle); 192 192 struct ice_vsi_ctx *ice_get_vsi_ctx(struct ice_hw *hw, u16 vsi_handle); 193 + void ice_clear_all_vsi_ctx(struct ice_hw *hw); 194 + /* Switch config */ 193 195 enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw); 194 196 195 197 /* Switch/bridge related commands */
+6 -5
drivers/net/ethernet/intel/ice/ice_txrx.c
··· 1520 1520 1521 1521 /* update gso_segs and bytecount */ 1522 1522 first->gso_segs = skb_shinfo(skb)->gso_segs; 1523 - first->bytecount = (first->gso_segs - 1) * off->header_len; 1523 + first->bytecount += (first->gso_segs - 1) * off->header_len; 1524 1524 1525 1525 cd_tso_len = skb->len - off->header_len; 1526 1526 cd_mss = skb_shinfo(skb)->gso_size; ··· 1556 1556 * magnitude greater than our largest possible GSO size. 1557 1557 * 1558 1558 * This would then be implemented as: 1559 - * return (((size >> 12) * 85) >> 8) + 1; 1559 + * return (((size >> 12) * 85) >> 8) + ICE_DESCS_FOR_SKB_DATA_PTR; 1560 1560 * 1561 1561 * Since multiplication and division are commutative, we can reorder 1562 1562 * operations into: 1563 - * return ((size * 85) >> 20) + 1; 1563 + * return ((size * 85) >> 20) + ICE_DESCS_FOR_SKB_DATA_PTR; 1564 1564 */ 1565 1565 static unsigned int ice_txd_use_count(unsigned int size) 1566 1566 { 1567 - return ((size * 85) >> 20) + 1; 1567 + return ((size * 85) >> 20) + ICE_DESCS_FOR_SKB_DATA_PTR; 1568 1568 } 1569 1569 1570 1570 /** ··· 1706 1706 * + 1 desc for context descriptor, 1707 1707 * otherwise try next time 1708 1708 */ 1709 - if (ice_maybe_stop_tx(tx_ring, count + 4 + 1)) { 1709 + if (ice_maybe_stop_tx(tx_ring, count + ICE_DESCS_PER_CACHE_LINE + 1710 + ICE_DESCS_FOR_CTX_DESC)) { 1710 1711 tx_ring->tx_stats.tx_busy++; 1711 1712 return NETDEV_TX_BUSY; 1712 1713 }
+15 -2
drivers/net/ethernet/intel/ice/ice_txrx.h
··· 22 22 #define ICE_RX_BUF_WRITE 16 /* Must be power of 2 */ 23 23 #define ICE_MAX_TXQ_PER_TXQG 128 24 24 25 - /* Tx Descriptors needed, worst case */ 26 - #define DESC_NEEDED (MAX_SKB_FRAGS + 4) 25 + /* We are assuming that the cache line is always 64 Bytes here for ice. 26 + * In order to make sure that is a correct assumption there is a check in probe 27 + * to print a warning if the read from GLPCI_CNF2 tells us that the cache line 28 + * size is 128 bytes. We do it this way because we do not want to read the 29 + * GLPCI_CNF2 register or a variable containing the value on every pass through 30 + * the Tx path. 31 + */ 32 + #define ICE_CACHE_LINE_BYTES 64 33 + #define ICE_DESCS_PER_CACHE_LINE (ICE_CACHE_LINE_BYTES / \ 34 + sizeof(struct ice_tx_desc)) 35 + #define ICE_DESCS_FOR_CTX_DESC 1 36 + #define ICE_DESCS_FOR_SKB_DATA_PTR 1 37 + /* Tx descriptors needed, worst case */ 38 + #define DESC_NEEDED (MAX_SKB_FRAGS + ICE_DESCS_FOR_CTX_DESC + \ 39 + ICE_DESCS_PER_CACHE_LINE + ICE_DESCS_FOR_SKB_DATA_PTR) 27 40 #define ICE_DESC_UNUSED(R) \ 28 41 ((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \ 29 42 (R)->next_to_clean - (R)->next_to_use - 1)
+1 -1
drivers/net/ethernet/intel/ice/ice_type.h
··· 92 92 u64 phy_type_low; 93 93 u16 max_frame_size; 94 94 u16 link_speed; 95 + u16 req_speeds; 95 96 u8 lse_ena; /* Link Status Event notification */ 96 97 u8 link_info; 97 98 u8 an_info; 98 99 u8 ext_info; 99 100 u8 pacing; 100 - u8 req_speeds; 101 101 /* Refer to #define from module_type[ICE_MODULE_TYPE_TOTAL_BYTE] of 102 102 * ice_aqc_get_phy_caps structure 103 103 */
+1 -3
drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
··· 348 348 struct ice_vsi_ctx ctxt = { 0 }; 349 349 enum ice_status status; 350 350 351 - ctxt.info.vlan_flags = ICE_AQ_VSI_VLAN_MODE_TAGGED | 351 + ctxt.info.vlan_flags = ICE_AQ_VSI_VLAN_MODE_UNTAGGED | 352 352 ICE_AQ_VSI_PVLAN_INSERT_PVID | 353 353 ICE_AQ_VSI_VLAN_EMOD_STR; 354 354 ctxt.info.pvid = cpu_to_le16(vid); ··· 2171 2171 2172 2172 if (!ice_vsi_add_vlan(vsi, vid)) { 2173 2173 vf->num_vlan++; 2174 - set_bit(vid, vsi->active_vlans); 2175 2174 2176 2175 /* Enable VLAN pruning when VLAN 0 is added */ 2177 2176 if (unlikely(!vid)) ··· 2189 2190 */ 2190 2191 if (!ice_vsi_kill_vlan(vsi, vid)) { 2191 2192 vf->num_vlan--; 2192 - clear_bit(vid, vsi->active_vlans); 2193 2193 2194 2194 /* Disable VLAN pruning when removing VLAN 0 */ 2195 2195 if (unlikely(!vid))
+7 -5
drivers/net/ethernet/intel/igb/igb_ptp.c
··· 53 53 * 2^40 * 10^-9 / 60 = 18.3 minutes. 54 54 * 55 55 * SYSTIM is converted to real time using a timecounter. As 56 - * timecounter_cyc2time() allows old timestamps, the timecounter 57 - * needs to be updated at least once per half of the SYSTIM interval. 58 - * Scheduling of delayed work is not very accurate, so we aim for 8 59 - * minutes to be sure the actual interval is shorter than 9.16 minutes. 56 + * timecounter_cyc2time() allows old timestamps, the timecounter needs 57 + * to be updated at least once per half of the SYSTIM interval. 58 + * Scheduling of delayed work is not very accurate, and also the NIC 59 + * clock can be adjusted to run up to 6% faster and the system clock 60 + * up to 10% slower, so we aim for 6 minutes to be sure the actual 61 + * interval in the NIC time is shorter than 9.16 minutes. 60 62 */ 61 63 62 - #define IGB_SYSTIM_OVERFLOW_PERIOD (HZ * 60 * 8) 64 + #define IGB_SYSTIM_OVERFLOW_PERIOD (HZ * 60 * 6) 63 65 #define IGB_PTP_TX_TIMEOUT (HZ * 15) 64 66 #define INCPERIOD_82576 BIT(E1000_TIMINCA_16NS_SHIFT) 65 67 #define INCVALUE_82576_MASK GENMASK(E1000_TIMINCA_16NS_SHIFT - 1, 0)
+2 -2
drivers/net/ethernet/marvell/mvneta.c
··· 494 494 #if defined(__LITTLE_ENDIAN) 495 495 struct mvneta_tx_desc { 496 496 u32 command; /* Options used by HW for packet transmitting.*/ 497 - u16 reserverd1; /* csum_l4 (for future use) */ 497 + u16 reserved1; /* csum_l4 (for future use) */ 498 498 u16 data_size; /* Data size of transmitted packet in bytes */ 499 499 u32 buf_phys_addr; /* Physical addr of transmitted buffer */ 500 500 u32 reserved2; /* hw_cmd - (for future use, PMT) */ ··· 519 519 #else 520 520 struct mvneta_tx_desc { 521 521 u16 data_size; /* Data size of transmitted packet in bytes */ 522 - u16 reserverd1; /* csum_l4 (for future use) */ 522 + u16 reserved1; /* csum_l4 (for future use) */ 523 523 u32 command; /* Options used by HW for packet transmitting.*/ 524 524 u32 reserved2; /* hw_cmd - (for future use, PMT) */ 525 525 u32 buf_phys_addr; /* Physical addr of transmitted buffer */
+8 -3
drivers/net/ethernet/qlogic/qed/qed_fcoe.c
··· 147 147 "Cannot satisfy CQ amount. CQs requested %d, CQs available %d. Aborting function start\n", 148 148 fcoe_pf_params->num_cqs, 149 149 p_hwfn->hw_info.feat_num[QED_FCOE_CQ]); 150 - return -EINVAL; 150 + rc = -EINVAL; 151 + goto err; 151 152 } 152 153 153 154 p_data->mtu = cpu_to_le16(fcoe_pf_params->mtu); ··· 157 156 158 157 rc = qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_FCOE, &dummy_cid); 159 158 if (rc) 160 - return rc; 159 + goto err; 161 160 162 161 cxt_info.iid = dummy_cid; 163 162 rc = qed_cxt_get_cid_info(p_hwfn, &cxt_info); 164 163 if (rc) { 165 164 DP_NOTICE(p_hwfn, "Cannot find context info for dummy cid=%d\n", 166 165 dummy_cid); 167 - return rc; 166 + goto err; 168 167 } 169 168 p_cxt = cxt_info.p_cxt; 170 169 SET_FIELD(p_cxt->tstorm_ag_context.flags3, ··· 240 239 241 240 rc = qed_spq_post(p_hwfn, p_ent, NULL); 242 241 242 + return rc; 243 + 244 + err: 245 + qed_sp_destroy_request(p_hwfn, p_ent); 243 246 return rc; 244 247 } 245 248
+1
drivers/net/ethernet/qlogic/qed/qed_iscsi.c
··· 200 200 "Cannot satisfy CQ amount. Queues requested %d, CQs available %d. Aborting function start\n", 201 201 p_params->num_queues, 202 202 p_hwfn->hw_info.feat_num[QED_ISCSI_CQ]); 203 + qed_sp_destroy_request(p_hwfn, p_ent); 203 204 return -EINVAL; 204 205 } 205 206
+8 -4
drivers/net/ethernet/qlogic/qed/qed_l2.c
··· 740 740 741 741 rc = qed_sp_vport_update_rss(p_hwfn, p_ramrod, p_rss_params); 742 742 if (rc) { 743 - /* Return spq entry which is taken in qed_sp_init_request()*/ 744 - qed_spq_return_entry(p_hwfn, p_ent); 743 + qed_sp_destroy_request(p_hwfn, p_ent); 745 744 return rc; 746 745 } 747 746 ··· 1354 1355 DP_NOTICE(p_hwfn, 1355 1356 "%d is not supported yet\n", 1356 1357 p_filter_cmd->opcode); 1358 + qed_sp_destroy_request(p_hwfn, *pp_ent); 1357 1359 return -EINVAL; 1358 1360 } 1359 1361 ··· 2056 2056 } else { 2057 2057 rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id); 2058 2058 if (rc) 2059 - return rc; 2059 + goto err; 2060 2060 2061 2061 if (p_params->qid != QED_RFS_NTUPLE_QID_RSS) { 2062 2062 rc = qed_fw_l2_queue(p_hwfn, p_params->qid, 2063 2063 &abs_rx_q_id); 2064 2064 if (rc) 2065 - return rc; 2065 + goto err; 2066 2066 2067 2067 p_ramrod->rx_qid_valid = 1; 2068 2068 p_ramrod->rx_qid = cpu_to_le16(abs_rx_q_id); ··· 2083 2083 (u64)p_params->addr, p_params->length); 2084 2084 2085 2085 return qed_spq_post(p_hwfn, p_ent, NULL); 2086 + 2087 + err: 2088 + qed_sp_destroy_request(p_hwfn, p_ent); 2089 + return rc; 2086 2090 } 2087 2091 2088 2092 int qed_get_rxq_coalesce(struct qed_hwfn *p_hwfn,
+1
drivers/net/ethernet/qlogic/qed/qed_rdma.c
··· 1514 1514 default: 1515 1515 rc = -EINVAL; 1516 1516 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc); 1517 + qed_sp_destroy_request(p_hwfn, p_ent); 1517 1518 return rc; 1518 1519 } 1519 1520 SET_FIELD(p_ramrod->flags1,
+1
drivers/net/ethernet/qlogic/qed/qed_roce.c
··· 745 745 DP_NOTICE(p_hwfn, 746 746 "qed destroy responder failed: cannot allocate memory (ramrod). rc = %d\n", 747 747 rc); 748 + qed_sp_destroy_request(p_hwfn, p_ent); 748 749 return rc; 749 750 } 750 751
+14
drivers/net/ethernet/qlogic/qed/qed_sp.h
··· 167 167 enum spq_mode comp_mode; 168 168 struct qed_spq_comp_cb comp_cb; 169 169 struct qed_spq_comp_done comp_done; /* SPQ_MODE_EBLOCK */ 170 + 171 + /* Posted entry for unlimited list entry in EBLOCK mode */ 172 + struct qed_spq_entry *post_ent; 170 173 }; 171 174 172 175 struct qed_eq { ··· 398 395 enum spq_mode comp_mode; 399 396 struct qed_spq_comp_cb *p_comp_data; 400 397 }; 398 + 399 + /** 400 + * @brief Returns a SPQ entry to the pool / frees the entry if allocated. 401 + * Should be called on in error flows after initializing the SPQ entry 402 + * and before posting it. 403 + * 404 + * @param p_hwfn 405 + * @param p_ent 406 + */ 407 + void qed_sp_destroy_request(struct qed_hwfn *p_hwfn, 408 + struct qed_spq_entry *p_ent); 401 409 402 410 int qed_sp_init_request(struct qed_hwfn *p_hwfn, 403 411 struct qed_spq_entry **pp_ent,
+20 -2
drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
··· 47 47 #include "qed_sp.h" 48 48 #include "qed_sriov.h" 49 49 50 + void qed_sp_destroy_request(struct qed_hwfn *p_hwfn, 51 + struct qed_spq_entry *p_ent) 52 + { 53 + /* qed_spq_get_entry() can either get an entry from the free_pool, 54 + * or, if no entries are left, allocate a new entry and add it to 55 + * the unlimited_pending list. 56 + */ 57 + if (p_ent->queue == &p_hwfn->p_spq->unlimited_pending) 58 + kfree(p_ent); 59 + else 60 + qed_spq_return_entry(p_hwfn, p_ent); 61 + } 62 + 50 63 int qed_sp_init_request(struct qed_hwfn *p_hwfn, 51 64 struct qed_spq_entry **pp_ent, 52 65 u8 cmd, u8 protocol, struct qed_sp_init_data *p_data) ··· 93 80 94 81 case QED_SPQ_MODE_BLOCK: 95 82 if (!p_data->p_comp_data) 96 - return -EINVAL; 83 + goto err; 97 84 98 85 p_ent->comp_cb.cookie = p_data->p_comp_data->cookie; 99 86 break; ··· 108 95 default: 109 96 DP_NOTICE(p_hwfn, "Unknown SPQE completion mode %d\n", 110 97 p_ent->comp_mode); 111 - return -EINVAL; 98 + goto err; 112 99 } 113 100 114 101 DP_VERBOSE(p_hwfn, QED_MSG_SPQ, ··· 122 109 memset(&p_ent->ramrod, 0, sizeof(p_ent->ramrod)); 123 110 124 111 return 0; 112 + 113 + err: 114 + qed_sp_destroy_request(p_hwfn, p_ent); 115 + 116 + return -EINVAL; 125 117 } 126 118 127 119 static enum tunnel_clss qed_tunn_clss_to_fw_clss(u8 type)
+35 -34
drivers/net/ethernet/qlogic/qed/qed_spq.c
··· 142 142 143 143 DP_INFO(p_hwfn, "Ramrod is stuck, requesting MCP drain\n"); 144 144 rc = qed_mcp_drain(p_hwfn, p_ptt); 145 + qed_ptt_release(p_hwfn, p_ptt); 145 146 if (rc) { 146 147 DP_NOTICE(p_hwfn, "MCP drain failed\n"); 147 148 goto err; ··· 151 150 /* Retry after drain */ 152 151 rc = __qed_spq_block(p_hwfn, p_ent, p_fw_ret, true); 153 152 if (!rc) 154 - goto out; 153 + return 0; 155 154 156 155 comp_done = (struct qed_spq_comp_done *)p_ent->comp_cb.cookie; 157 - if (comp_done->done == 1) 156 + if (comp_done->done == 1) { 158 157 if (p_fw_ret) 159 158 *p_fw_ret = comp_done->fw_return_code; 160 - out: 161 - qed_ptt_release(p_hwfn, p_ptt); 162 - return 0; 163 - 159 + return 0; 160 + } 164 161 err: 165 - qed_ptt_release(p_hwfn, p_ptt); 166 162 DP_NOTICE(p_hwfn, 167 163 "Ramrod is stuck [CID %08x cmd %02x protocol %02x echo %04x]\n", 168 164 le32_to_cpu(p_ent->elem.hdr.cid), ··· 683 685 /* EBLOCK responsible to free the allocated p_ent */ 684 686 if (p_ent->comp_mode != QED_SPQ_MODE_EBLOCK) 685 687 kfree(p_ent); 688 + else 689 + p_ent->post_ent = p_en2; 686 690 687 691 p_ent = p_en2; 688 692 } ··· 767 767 SPQ_HIGH_PRI_RESERVE_DEFAULT); 768 768 } 769 769 770 + /* Avoid overriding of SPQ entries when getting out-of-order completions, by 771 + * marking the completions in a bitmap and increasing the chain consumer only 772 + * for the first successive completed entries. 773 + */ 774 + static void qed_spq_comp_bmap_update(struct qed_hwfn *p_hwfn, __le16 echo) 775 + { 776 + u16 pos = le16_to_cpu(echo) % SPQ_RING_SIZE; 777 + struct qed_spq *p_spq = p_hwfn->p_spq; 778 + 779 + __set_bit(pos, p_spq->p_comp_bitmap); 780 + while (test_bit(p_spq->comp_bitmap_idx, 781 + p_spq->p_comp_bitmap)) { 782 + __clear_bit(p_spq->comp_bitmap_idx, 783 + p_spq->p_comp_bitmap); 784 + p_spq->comp_bitmap_idx++; 785 + qed_chain_return_produced(&p_spq->chain); 786 + } 787 + } 788 + 770 789 int qed_spq_post(struct qed_hwfn *p_hwfn, 771 790 struct qed_spq_entry *p_ent, u8 *fw_return_code) 772 791 { ··· 843 824 p_ent->queue == &p_spq->unlimited_pending); 844 825 845 826 if (p_ent->queue == &p_spq->unlimited_pending) { 846 - /* This is an allocated p_ent which does not need to 847 - * return to pool. 848 - */ 827 + struct qed_spq_entry *p_post_ent = p_ent->post_ent; 828 + 849 829 kfree(p_ent); 850 - return rc; 830 + 831 + /* Return the entry which was actually posted */ 832 + p_ent = p_post_ent; 851 833 } 852 834 853 835 if (rc) ··· 862 842 spq_post_fail2: 863 843 spin_lock_bh(&p_spq->lock); 864 844 list_del(&p_ent->list); 865 - qed_chain_return_produced(&p_spq->chain); 845 + qed_spq_comp_bmap_update(p_hwfn, p_ent->elem.hdr.echo); 866 846 867 847 spq_post_fail: 868 848 /* return to the free pool */ ··· 894 874 spin_lock_bh(&p_spq->lock); 895 875 list_for_each_entry_safe(p_ent, tmp, &p_spq->completion_pending, list) { 896 876 if (p_ent->elem.hdr.echo == echo) { 897 - u16 pos = le16_to_cpu(echo) % SPQ_RING_SIZE; 898 - 899 877 list_del(&p_ent->list); 900 - 901 - /* Avoid overriding of SPQ entries when getting 902 - * out-of-order completions, by marking the completions 903 - * in a bitmap and increasing the chain consumer only 904 - * for the first successive completed entries. 905 - */ 906 - __set_bit(pos, p_spq->p_comp_bitmap); 907 - 908 - while (test_bit(p_spq->comp_bitmap_idx, 909 - p_spq->p_comp_bitmap)) { 910 - __clear_bit(p_spq->comp_bitmap_idx, 911 - p_spq->p_comp_bitmap); 912 - p_spq->comp_bitmap_idx++; 913 - qed_chain_return_produced(&p_spq->chain); 914 - } 915 - 878 + qed_spq_comp_bmap_update(p_hwfn, echo); 916 879 p_spq->comp_count++; 917 880 found = p_ent; 918 881 break; ··· 934 931 QED_MSG_SPQ, 935 932 "Got a completion without a callback function\n"); 936 933 937 - if ((found->comp_mode != QED_SPQ_MODE_EBLOCK) || 938 - (found->queue == &p_spq->unlimited_pending)) 934 + if (found->comp_mode != QED_SPQ_MODE_EBLOCK) 939 935 /* EBLOCK is responsible for returning its own entry into the 940 - * free list, unless it originally added the entry into the 941 - * unlimited pending list. 936 + * free list. 942 937 */ 943 938 qed_spq_return_entry(p_hwfn, found); 944 939
+1
drivers/net/ethernet/qlogic/qed/qed_sriov.c
··· 101 101 default: 102 102 DP_NOTICE(p_hwfn, "Unknown VF personality %d\n", 103 103 p_hwfn->hw_info.personality); 104 + qed_sp_destroy_request(p_hwfn, p_ent); 104 105 return -EINVAL; 105 106 } 106 107
+5 -3
drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
··· 459 459 struct cmd_desc_type0 *first_desc, struct sk_buff *skb, 460 460 struct qlcnic_host_tx_ring *tx_ring) 461 461 { 462 - u8 l4proto, opcode = 0, hdr_len = 0; 462 + u8 l4proto, opcode = 0, hdr_len = 0, tag_vlan = 0; 463 463 u16 flags = 0, vlan_tci = 0; 464 464 int copied, offset, copy_len, size; 465 465 struct cmd_desc_type0 *hwdesc; ··· 472 472 flags = QLCNIC_FLAGS_VLAN_TAGGED; 473 473 vlan_tci = ntohs(vh->h_vlan_TCI); 474 474 protocol = ntohs(vh->h_vlan_encapsulated_proto); 475 + tag_vlan = 1; 475 476 } else if (skb_vlan_tag_present(skb)) { 476 477 flags = QLCNIC_FLAGS_VLAN_OOB; 477 478 vlan_tci = skb_vlan_tag_get(skb); 479 + tag_vlan = 1; 478 480 } 479 481 if (unlikely(adapter->tx_pvid)) { 480 - if (vlan_tci && !(adapter->flags & QLCNIC_TAGGING_ENABLED)) 482 + if (tag_vlan && !(adapter->flags & QLCNIC_TAGGING_ENABLED)) 481 483 return -EIO; 482 - if (vlan_tci && (adapter->flags & QLCNIC_TAGGING_ENABLED)) 484 + if (tag_vlan && (adapter->flags & QLCNIC_TAGGING_ENABLED)) 483 485 goto set_flags; 484 486 485 487 flags = QLCNIC_FLAGS_VLAN_OOB;
+3 -3
drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
··· 234 234 struct net_device *real_dev, 235 235 struct rmnet_endpoint *ep) 236 236 { 237 - struct rmnet_priv *priv; 237 + struct rmnet_priv *priv = netdev_priv(rmnet_dev); 238 238 int rc; 239 239 240 240 if (ep->egress_dev) ··· 247 247 rmnet_dev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; 248 248 rmnet_dev->hw_features |= NETIF_F_SG; 249 249 250 + priv->real_dev = real_dev; 251 + 250 252 rc = register_netdevice(rmnet_dev); 251 253 if (!rc) { 252 254 ep->egress_dev = rmnet_dev; ··· 257 255 258 256 rmnet_dev->rtnl_link_ops = &rmnet_link_ops; 259 257 260 - priv = netdev_priv(rmnet_dev); 261 258 priv->mux_id = id; 262 - priv->real_dev = real_dev; 263 259 264 260 netdev_dbg(rmnet_dev, "rmnet dev created\n"); 265 261 }
+2 -1
drivers/net/ethernet/stmicro/stmmac/common.h
··· 365 365 366 366 /* GMAC TX FIFO is 8K, Rx FIFO is 16K */ 367 367 #define BUF_SIZE_16KiB 16384 368 - #define BUF_SIZE_8KiB 8192 368 + /* RX Buffer size must be < 8191 and multiple of 4/8/16 bytes */ 369 + #define BUF_SIZE_8KiB 8188 369 370 #define BUF_SIZE_4KiB 4096 370 371 #define BUF_SIZE_2KiB 2048 371 372
+1 -1
drivers/net/ethernet/stmicro/stmmac/descs_com.h
··· 31 31 /* Enhanced descriptors */ 32 32 static inline void ehn_desc_rx_set_on_ring(struct dma_desc *p, int end) 33 33 { 34 - p->des1 |= cpu_to_le32(((BUF_SIZE_8KiB - 1) 34 + p->des1 |= cpu_to_le32((BUF_SIZE_8KiB 35 35 << ERDES1_BUFFER2_SIZE_SHIFT) 36 36 & ERDES1_BUFFER2_SIZE_MASK); 37 37
+1 -1
drivers/net/ethernet/stmicro/stmmac/enh_desc.c
··· 262 262 int mode, int end) 263 263 { 264 264 p->des0 |= cpu_to_le32(RDES0_OWN); 265 - p->des1 |= cpu_to_le32((BUF_SIZE_8KiB - 1) & ERDES1_BUFFER1_SIZE_MASK); 265 + p->des1 |= cpu_to_le32(BUF_SIZE_8KiB & ERDES1_BUFFER1_SIZE_MASK); 266 266 267 267 if (mode == STMMAC_CHAIN_MODE) 268 268 ehn_desc_rx_set_on_chain(p);
+1 -1
drivers/net/ethernet/stmicro/stmmac/ring_mode.c
··· 140 140 static int set_16kib_bfsize(int mtu) 141 141 { 142 142 int ret = 0; 143 - if (unlikely(mtu >= BUF_SIZE_8KiB)) 143 + if (unlikely(mtu > BUF_SIZE_8KiB)) 144 144 ret = BUF_SIZE_16KiB; 145 145 return ret; 146 146 }
+4 -3
drivers/net/fddi/defza.c
··· 1 - // SPDX-License-Identifier: GPL-2.0 1 + // SPDX-License-Identifier: GPL-2.0+ 2 2 /* FDDI network adapter driver for DEC FDDIcontroller 700/700-C devices. 3 3 * 4 4 * Copyright (c) 2018 Maciej W. Rozycki ··· 56 56 #define DRV_VERSION "v.1.1.4" 57 57 #define DRV_RELDATE "Oct 6 2018" 58 58 59 - static char version[] = 59 + static const char version[] = 60 60 DRV_NAME ": " DRV_VERSION " " DRV_RELDATE " Maciej W. Rozycki\n"; 61 61 62 62 MODULE_AUTHOR("Maciej W. Rozycki <macro@linux-mips.org>"); ··· 784 784 static void fza_tx_smt(struct net_device *dev) 785 785 { 786 786 struct fza_private *fp = netdev_priv(dev); 787 - struct fza_buffer_tx __iomem *smt_tx_ptr, *skb_data_ptr; 787 + struct fza_buffer_tx __iomem *smt_tx_ptr; 788 788 int i, len; 789 789 u32 own; 790 790 ··· 799 799 800 800 if (!netif_queue_stopped(dev)) { 801 801 if (dev_nit_active(dev)) { 802 + struct fza_buffer_tx *skb_data_ptr; 802 803 struct sk_buff *skb; 803 804 804 805 /* Length must be a multiple of 4 as only word
+2 -1
drivers/net/fddi/defza.h
··· 1 - /* SPDX-License-Identifier: GPL-2.0 */ 1 + /* SPDX-License-Identifier: GPL-2.0+ */ 2 2 /* FDDI network adapter driver for DEC FDDIcontroller 700/700-C devices. 3 3 * 4 4 * Copyright (c) 2018 Maciej W. Rozycki ··· 235 235 #define FZA_RING_CMD 0x200400 /* command ring address */ 236 236 #define FZA_RING_CMD_SIZE 0x40 /* command descriptor ring 237 237 * size 238 + */ 238 239 /* Command constants. */ 239 240 #define FZA_RING_CMD_MASK 0x7fffffff 240 241 #define FZA_RING_CMD_NOP 0x00000000 /* nop */
+16 -2
drivers/net/phy/broadcom.c
··· 92 92 return 0; 93 93 } 94 94 95 - static int bcm5481x_config(struct phy_device *phydev) 95 + static int bcm54xx_config_clock_delay(struct phy_device *phydev) 96 96 { 97 97 int rc, val; 98 98 ··· 429 429 ret = genphy_config_aneg(phydev); 430 430 431 431 /* Then we can set up the delay. */ 432 - bcm5481x_config(phydev); 432 + bcm54xx_config_clock_delay(phydev); 433 433 434 434 if (of_property_read_bool(np, "enet-phy-lane-swap")) { 435 435 /* Lane Swap - Undocumented register...magic! */ ··· 438 438 if (ret < 0) 439 439 return ret; 440 440 } 441 + 442 + return ret; 443 + } 444 + 445 + static int bcm54616s_config_aneg(struct phy_device *phydev) 446 + { 447 + int ret; 448 + 449 + /* Aneg firsly. */ 450 + ret = genphy_config_aneg(phydev); 451 + 452 + /* Then we can set up the delay. */ 453 + bcm54xx_config_clock_delay(phydev); 441 454 442 455 return ret; 443 456 } ··· 649 636 .features = PHY_GBIT_FEATURES, 650 637 .flags = PHY_HAS_INTERRUPT, 651 638 .config_init = bcm54xx_config_init, 639 + .config_aneg = bcm54616s_config_aneg, 652 640 .ack_interrupt = bcm_phy_ack_intr, 653 641 .config_intr = bcm_phy_config_intr, 654 642 }, {
+2
drivers/net/usb/smsc95xx.c
··· 1321 1321 dev->net->ethtool_ops = &smsc95xx_ethtool_ops; 1322 1322 dev->net->flags |= IFF_MULTICAST; 1323 1323 dev->net->hard_header_len += SMSC95XX_TX_OVERHEAD_CSUM; 1324 + dev->net->min_mtu = ETH_MIN_MTU; 1325 + dev->net->max_mtu = ETH_DATA_LEN; 1324 1326 dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len; 1325 1327 1326 1328 pdata->dev = dev;
+2 -2
net/core/flow_dissector.c
··· 1166 1166 break; 1167 1167 } 1168 1168 1169 - if (dissector_uses_key(flow_dissector, 1170 - FLOW_DISSECTOR_KEY_PORTS)) { 1169 + if (dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_PORTS) && 1170 + !(key_control->flags & FLOW_DIS_IS_FRAGMENT)) { 1171 1171 key_ports = skb_flow_dissector_target(flow_dissector, 1172 1172 FLOW_DISSECTOR_KEY_PORTS, 1173 1173 target_container);
+15 -14
net/ipv4/inet_fragment.c
··· 178 178 } 179 179 180 180 static struct inet_frag_queue *inet_frag_create(struct netns_frags *nf, 181 - void *arg) 181 + void *arg, 182 + struct inet_frag_queue **prev) 182 183 { 183 184 struct inet_frags *f = nf->f; 184 185 struct inet_frag_queue *q; 185 - int err; 186 186 187 187 q = inet_frag_alloc(nf, f, arg); 188 - if (!q) 188 + if (!q) { 189 + *prev = ERR_PTR(-ENOMEM); 189 190 return NULL; 190 - 191 + } 191 192 mod_timer(&q->timer, jiffies + nf->timeout); 192 193 193 - err = rhashtable_insert_fast(&nf->rhashtable, &q->node, 194 - f->rhash_params); 195 - if (err < 0) { 194 + *prev = rhashtable_lookup_get_insert_key(&nf->rhashtable, &q->key, 195 + &q->node, f->rhash_params); 196 + if (*prev) { 196 197 q->flags |= INET_FRAG_COMPLETE; 197 198 inet_frag_kill(q); 198 199 inet_frag_destroy(q); ··· 205 204 /* TODO : call from rcu_read_lock() and no longer use refcount_inc_not_zero() */ 206 205 struct inet_frag_queue *inet_frag_find(struct netns_frags *nf, void *key) 207 206 { 208 - struct inet_frag_queue *fq; 207 + struct inet_frag_queue *fq = NULL, *prev; 209 208 210 209 if (!nf->high_thresh || frag_mem_limit(nf) > nf->high_thresh) 211 210 return NULL; 212 211 213 212 rcu_read_lock(); 214 213 215 - fq = rhashtable_lookup(&nf->rhashtable, key, nf->f->rhash_params); 216 - if (fq) { 214 + prev = rhashtable_lookup(&nf->rhashtable, key, nf->f->rhash_params); 215 + if (!prev) 216 + fq = inet_frag_create(nf, key, &prev); 217 + if (prev && !IS_ERR(prev)) { 218 + fq = prev; 217 219 if (!refcount_inc_not_zero(&fq->refcnt)) 218 220 fq = NULL; 219 - rcu_read_unlock(); 220 - return fq; 221 221 } 222 222 rcu_read_unlock(); 223 - 224 - return inet_frag_create(nf, key); 223 + return fq; 225 224 } 226 225 EXPORT_SYMBOL(inet_frag_find);
+2 -1
net/sched/act_mirred.c
··· 258 258 if (is_redirect) { 259 259 skb2->tc_redirected = 1; 260 260 skb2->tc_from_ingress = skb2->tc_at_ingress; 261 - 261 + if (skb2->tc_from_ingress) 262 + skb2->tstamp = 0; 262 263 /* let's the caller reinsert the packet, if possible */ 263 264 if (use_reinsert) { 264 265 res->ingress = want_ingress;
+13 -1
net/sched/cls_flower.c
··· 709 709 struct netlink_ext_ack *extack) 710 710 { 711 711 const struct nlattr *nla_enc_key, *nla_opt_key, *nla_opt_msk = NULL; 712 - int option_len, key_depth, msk_depth = 0; 712 + int err, option_len, key_depth, msk_depth = 0; 713 + 714 + err = nla_validate_nested(tb[TCA_FLOWER_KEY_ENC_OPTS], 715 + TCA_FLOWER_KEY_ENC_OPTS_MAX, 716 + enc_opts_policy, extack); 717 + if (err) 718 + return err; 713 719 714 720 nla_enc_key = nla_data(tb[TCA_FLOWER_KEY_ENC_OPTS]); 715 721 716 722 if (tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]) { 723 + err = nla_validate_nested(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK], 724 + TCA_FLOWER_KEY_ENC_OPTS_MAX, 725 + enc_opts_policy, extack); 726 + if (err) 727 + return err; 728 + 717 729 nla_opt_msk = nla_data(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]); 718 730 msk_depth = nla_len(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]); 719 731 }
-9
net/sched/sch_netem.c
··· 648 648 */ 649 649 skb->dev = qdisc_dev(sch); 650 650 651 - #ifdef CONFIG_NET_CLS_ACT 652 - /* 653 - * If it's at ingress let's pretend the delay is 654 - * from the network (tstamp will be updated). 655 - */ 656 - if (skb->tc_redirected && skb->tc_from_ingress) 657 - skb->tstamp = 0; 658 - #endif 659 - 660 651 if (q->slot.slot_next) { 661 652 q->slot.packets_left--; 662 653 q->slot.bytes_left -= qdisc_pkt_len(skb);
+7 -4
net/tipc/link.c
··· 1594 1594 if (in_range(peers_prio, l->priority + 1, TIPC_MAX_LINK_PRI)) 1595 1595 l->priority = peers_prio; 1596 1596 1597 - /* ACTIVATE_MSG serves as PEER_RESET if link is already down */ 1598 - if (msg_peer_stopping(hdr)) 1597 + /* If peer is going down we want full re-establish cycle */ 1598 + if (msg_peer_stopping(hdr)) { 1599 1599 rc = tipc_link_fsm_evt(l, LINK_FAILURE_EVT); 1600 - else if ((mtyp == RESET_MSG) || !link_is_up(l)) 1600 + break; 1601 + } 1602 + /* ACTIVATE_MSG serves as PEER_RESET if link is already down */ 1603 + if (mtyp == RESET_MSG || !link_is_up(l)) 1601 1604 rc = tipc_link_fsm_evt(l, LINK_PEER_RESET_EVT); 1602 1605 1603 1606 /* ACTIVATE_MSG takes up link if it was already locally reset */ 1604 - if ((mtyp == ACTIVATE_MSG) && (l->state == LINK_ESTABLISHING)) 1607 + if (mtyp == ACTIVATE_MSG && l->state == LINK_ESTABLISHING) 1605 1608 rc = TIPC_LINK_UP_EVT; 1606 1609 1607 1610 l->peer_session = msg_session(hdr);