Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

octeontx2: Set appropriate PF, VF masks and shifts based on silicon

Number of RVU PFs on CN20K silicon have increased to 96 from maximum
of 32 that were supported on earlier silicons. Every RVU PF and VF is
identified by HW using a 16bit PF_FUNC value. Due to the change in
Max number of PFs in CN20K, the bit encoding of this PF_FUNC has changed.

This patch handles the change by using helper functions(using silicon
check) to use PF,VF masks and shifts to support both new silicon CN20K,
OcteonTx series. These helper functions are used in different modules.

Also moved the NIX AF register offset macros to other files which
will be posted in coming patches.

Signed-off-by: Subbaraya Sundeep <sbhatta@marvell.com>
Signed-off-by: Sai Krishna <saikrishnag@marvell.com>
Signed-off-by: Sunil Kovvuri Goutham <sgoutham@marvell.com>
Link: https://patch.msgid.link/1749639716-13868-2-git-send-email-sbhatta@marvell.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>

authored by

Subbaraya Sundeep and committed by
Jakub Kicinski
25d51ebf b34441e3

+225 -203
+2 -3
drivers/crypto/marvell/octeontx2/otx2_cpt_common.h
··· 18 18 #define OTX2_CPT_MAX_VFS_NUM 128 19 19 #define OTX2_CPT_RVU_FUNC_ADDR_S(blk, slot, offs) \ 20 20 (((blk) << 20) | ((slot) << 12) | (offs)) 21 - #define OTX2_CPT_RVU_PFFUNC(pf, func) \ 22 - ((((pf) & RVU_PFVF_PF_MASK) << RVU_PFVF_PF_SHIFT) | \ 23 - (((func) & RVU_PFVF_FUNC_MASK) << RVU_PFVF_FUNC_SHIFT)) 21 + 22 + #define OTX2_CPT_RVU_PFFUNC(pdev, pf, func) rvu_make_pcifunc(pdev, pf, func) 24 23 25 24 #define OTX2_CPT_INVALID_CRYPTO_ENG_GRP 0xFF 26 25 #define OTX2_CPT_NAME_LENGTH 64
+6 -7
drivers/crypto/marvell/octeontx2/otx2_cptpf_mbox.c
··· 142 142 memset(req, 0, sizeof(*req)); 143 143 req->hdr.id = MBOX_MSG_CPT_INLINE_IPSEC_CFG; 144 144 req->hdr.sig = OTX2_MBOX_REQ_SIG; 145 - req->hdr.pcifunc = OTX2_CPT_RVU_PFFUNC(cptpf->pf_id, 0); 145 + req->hdr.pcifunc = OTX2_CPT_RVU_PFFUNC(cptpf->pdev, cptpf->pf_id, 0); 146 146 req->dir = CPT_INLINE_INBOUND; 147 147 req->slot = slot; 148 148 req->sso_pf_func_ovrd = cptpf->sso_pf_func_ovrd; ··· 184 184 nix_req->gen_cfg.opcode = cpt_inline_rx_opcode(pdev); 185 185 nix_req->gen_cfg.param1 = req->param1; 186 186 nix_req->gen_cfg.param2 = req->param2; 187 - nix_req->inst_qsel.cpt_pf_func = OTX2_CPT_RVU_PFFUNC(cptpf->pf_id, 0); 187 + nix_req->inst_qsel.cpt_pf_func = 188 + OTX2_CPT_RVU_PFFUNC(cptpf->pdev, cptpf->pf_id, 0); 188 189 nix_req->inst_qsel.cpt_slot = 0; 189 190 ret = otx2_cpt_send_mbox_msg(&cptpf->afpf_mbox, pdev); 190 191 if (ret) ··· 393 392 msg = (struct mbox_msghdr *)(mdev->mbase + offset); 394 393 395 394 /* Set which VF sent this message based on mbox IRQ */ 396 - msg->pcifunc = ((u16)cptpf->pf_id << RVU_PFVF_PF_SHIFT) | 397 - ((vf->vf_id + 1) & RVU_PFVF_FUNC_MASK); 398 - 395 + msg->pcifunc = rvu_make_pcifunc(cptpf->pdev, cptpf->pf_id, 396 + (vf->vf_id + 1)); 399 397 err = cptpf_handle_vf_req(cptpf, vf, msg, 400 398 msg->next_msgoff - offset); 401 399 /* ··· 469 469 470 470 switch (msg->id) { 471 471 case MBOX_MSG_READY: 472 - cptpf->pf_id = (msg->pcifunc >> RVU_PFVF_PF_SHIFT) & 473 - RVU_PFVF_PF_MASK; 472 + cptpf->pf_id = rvu_get_pf(cptpf->pdev, msg->pcifunc); 474 473 break; 475 474 case MBOX_MSG_MSIX_OFFSET: 476 475 rsp_msix = (struct msix_offset_rsp *) msg;
+3 -1
drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c
··· 176 176 /* Set PF number for microcode fetches */ 177 177 ret = otx2_cpt_write_af_reg(&cptpf->afpf_mbox, cptpf->pdev, 178 178 CPT_AF_PF_FUNC, 179 - cptpf->pf_id << RVU_PFVF_PF_SHIFT, blkaddr); 179 + rvu_make_pcifunc(cptpf->pdev, 180 + cptpf->pf_id, 0), 181 + blkaddr); 180 182 if (ret) 181 183 return ret; 182 184
+3 -3
drivers/crypto/marvell/octeontx2/otx2_cptvf_mbox.c
··· 189 189 } 190 190 req->hdr.id = MBOX_MSG_GET_ENG_GRP_NUM; 191 191 req->hdr.sig = OTX2_MBOX_REQ_SIG; 192 - req->hdr.pcifunc = OTX2_CPT_RVU_PFFUNC(cptvf->vf_id, 0); 192 + req->hdr.pcifunc = OTX2_CPT_RVU_PFFUNC(cptvf->pdev, cptvf->vf_id, 0); 193 193 req->eng_type = eng_type; 194 194 195 195 return otx2_cpt_send_mbox_msg(mbox, pdev); ··· 210 210 } 211 211 req->id = MBOX_MSG_GET_KVF_LIMITS; 212 212 req->sig = OTX2_MBOX_REQ_SIG; 213 - req->pcifunc = OTX2_CPT_RVU_PFFUNC(cptvf->vf_id, 0); 213 + req->pcifunc = OTX2_CPT_RVU_PFFUNC(cptvf->pdev, cptvf->vf_id, 0); 214 214 215 215 return otx2_cpt_send_mbox_msg(mbox, pdev); 216 216 } ··· 230 230 } 231 231 req->id = MBOX_MSG_GET_CAPS; 232 232 req->sig = OTX2_MBOX_REQ_SIG; 233 - req->pcifunc = OTX2_CPT_RVU_PFFUNC(cptvf->vf_id, 0); 233 + req->pcifunc = OTX2_CPT_RVU_PFFUNC(cptvf->pdev, cptvf->vf_id, 0); 234 234 235 235 return otx2_cpt_send_mbox_msg(mbox, pdev); 236 236 }
+3 -3
drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c
··· 97 97 if (pcifunc & RVU_PFVF_FUNC_MASK) 98 98 pfvf = &mcs->vf[rvu_get_hwvf(rvu, pcifunc)]; 99 99 else 100 - pfvf = &mcs->pf[rvu_get_pf(pcifunc)]; 100 + pfvf = &mcs->pf[rvu_get_pf(rvu->pdev, pcifunc)]; 101 101 102 102 event->intr_mask &= pfvf->intr_mask; 103 103 ··· 123 123 struct mcs_intr_info *req; 124 124 int pf; 125 125 126 - pf = rvu_get_pf(event->pcifunc); 126 + pf = rvu_get_pf(rvu->pdev, event->pcifunc); 127 127 128 128 mutex_lock(&rvu->mbox_lock); 129 129 ··· 193 193 if (pcifunc & RVU_PFVF_FUNC_MASK) 194 194 pfvf = &mcs->vf[rvu_get_hwvf(rvu, pcifunc)]; 195 195 else 196 - pfvf = &mcs->pf[rvu_get_pf(pcifunc)]; 196 + pfvf = &mcs->pf[rvu_get_pf(rvu->pdev, pcifunc)]; 197 197 198 198 mcs->pf_map[0] = pcifunc; 199 199 pfvf->intr_mask = req->intr_mask;
+12 -18
drivers/net/ethernet/marvell/octeontx2/af/rvu.c
··· 294 294 devnum = rvu_get_hwvf(rvu, pcifunc); 295 295 } else { 296 296 is_pf = true; 297 - devnum = rvu_get_pf(pcifunc); 297 + devnum = rvu_get_pf(rvu->pdev, pcifunc); 298 298 } 299 299 300 300 /* Check if the 'pcifunc' has a NIX LF from 'BLKADDR_NIX0' or ··· 359 359 devnum = rvu_get_hwvf(rvu, pcifunc); 360 360 } else { 361 361 is_pf = true; 362 - devnum = rvu_get_pf(pcifunc); 362 + devnum = rvu_get_pf(rvu->pdev, pcifunc); 363 363 } 364 364 365 365 block->fn_map[lf] = attach ? pcifunc : 0; ··· 400 400 rvu_write64(rvu, BLKADDR_RVUM, reg | (devnum << 16), num_lfs); 401 401 } 402 402 403 - inline int rvu_get_pf(u16 pcifunc) 404 - { 405 - return (pcifunc >> RVU_PFVF_PF_SHIFT) & RVU_PFVF_PF_MASK; 406 - } 407 - 408 403 void rvu_get_pf_numvfs(struct rvu *rvu, int pf, int *numvfs, int *hwvf) 409 404 { 410 405 u64 cfg; ··· 417 422 int pf, func; 418 423 u64 cfg; 419 424 420 - pf = rvu_get_pf(pcifunc); 425 + pf = rvu_get_pf(rvu->pdev, pcifunc); 421 426 func = pcifunc & RVU_PFVF_FUNC_MASK; 422 427 423 428 /* Get first HWVF attached to this PF */ ··· 432 437 if (pcifunc & RVU_PFVF_FUNC_MASK) 433 438 return &rvu->hwvf[rvu_get_hwvf(rvu, pcifunc)]; 434 439 else 435 - return &rvu->pf[rvu_get_pf(pcifunc)]; 440 + return &rvu->pf[rvu_get_pf(rvu->pdev, pcifunc)]; 436 441 } 437 442 438 443 static bool is_pf_func_valid(struct rvu *rvu, u16 pcifunc) ··· 440 445 int pf, vf, nvfs; 441 446 u64 cfg; 442 447 443 - pf = rvu_get_pf(pcifunc); 448 + pf = rvu_get_pf(rvu->pdev, pcifunc); 444 449 if (pf >= rvu->hw->total_pfs) 445 450 return false; 446 451 ··· 1482 1487 pf = rvu_get_pfvf(rvu, pcifunc & ~RVU_PFVF_FUNC_MASK); 1483 1488 1484 1489 /* All CGX mapped PFs are set with assigned NIX block during init */ 1485 - if (is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc))) { 1490 + if (is_pf_cgxmapped(rvu, rvu_get_pf(rvu->pdev, pcifunc))) { 1486 1491 blkaddr = pf->nix_blkaddr; 1487 1492 } else if (is_lbk_vf(rvu, pcifunc)) { 1488 1493 vf = pcifunc - 1; ··· 1496 1501 } 1497 1502 1498 1503 /* if SDP1 then the blkaddr is NIX1 */ 1499 - if (is_sdp_pfvf(pcifunc) && pf->sdp_info->node_id == 1) 1504 + if (is_sdp_pfvf(rvu, pcifunc) && pf->sdp_info->node_id == 1) 1500 1505 blkaddr = BLKADDR_NIX1; 1501 1506 1502 1507 switch (blkaddr) { ··· 2001 2006 2002 2007 vf = pcifunc & RVU_PFVF_FUNC_MASK; 2003 2008 cfg = rvu_read64(rvu, BLKADDR_RVUM, 2004 - RVU_PRIV_PFX_CFG(rvu_get_pf(pcifunc))); 2009 + RVU_PRIV_PFX_CFG(rvu_get_pf(rvu->pdev, pcifunc))); 2005 2010 numvfs = (cfg >> 12) & 0xFF; 2006 2011 2007 2012 if (vf && vf <= numvfs) ··· 2224 2229 /* Set which PF/VF sent this message based on mbox IRQ */ 2225 2230 switch (type) { 2226 2231 case TYPE_AFPF: 2227 - msg->pcifunc &= 2228 - ~(RVU_PFVF_PF_MASK << RVU_PFVF_PF_SHIFT); 2229 - msg->pcifunc |= (devid << RVU_PFVF_PF_SHIFT); 2232 + msg->pcifunc &= rvu_pcifunc_pf_mask(rvu->pdev); 2233 + msg->pcifunc |= rvu_make_pcifunc(rvu->pdev, devid, 0); 2230 2234 break; 2231 2235 case TYPE_AFVF: 2232 2236 msg->pcifunc &= ··· 2243 2249 if (msg->pcifunc & RVU_PFVF_FUNC_MASK) 2244 2250 dev_warn(rvu->dev, "Error %d when processing message %s (0x%x) from PF%d:VF%d\n", 2245 2251 err, otx2_mbox_id2name(msg->id), 2246 - msg->id, rvu_get_pf(msg->pcifunc), 2252 + msg->id, rvu_get_pf(rvu->pdev, msg->pcifunc), 2247 2253 (msg->pcifunc & RVU_PFVF_FUNC_MASK) - 1); 2248 2254 else 2249 2255 dev_warn(rvu->dev, "Error %d when processing message %s (0x%x) from PF%d\n", ··· 2767 2773 2768 2774 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf)); 2769 2775 numvfs = (cfg >> 12) & 0xFF; 2770 - pcifunc = pf << RVU_PFVF_PF_SHIFT; 2776 + pcifunc = rvu_make_pcifunc(rvu->pdev, pf, 0); 2771 2777 2772 2778 for (vf = 0; vf < numvfs; vf++) 2773 2779 __rvu_flr_handler(rvu, (pcifunc | (vf + 1)));
+43 -9
drivers/net/ethernet/marvell/octeontx2/af/rvu.h
··· 10 10 11 11 #include <linux/pci.h> 12 12 #include <net/devlink.h> 13 + #include <linux/soc/marvell/silicons.h> 13 14 14 15 #include "rvu_struct.h" 15 16 #include "rvu_devlink.h" ··· 44 43 #define MAX_CPT_BLKS 2 45 44 46 45 /* PF_FUNC */ 47 - #define RVU_PFVF_PF_SHIFT 10 48 - #define RVU_PFVF_PF_MASK 0x3F 49 - #define RVU_PFVF_FUNC_SHIFT 0 50 - #define RVU_PFVF_FUNC_MASK 0x3FF 46 + #define RVU_OTX2_PFVF_PF_SHIFT 10 47 + #define RVU_OTX2_PFVF_PF_MASK 0x3F 48 + #define RVU_PFVF_FUNC_SHIFT 0 49 + #define RVU_PFVF_FUNC_MASK 0x3FF 50 + #define RVU_CN20K_PFVF_PF_SHIFT 9 51 + #define RVU_CN20K_PFVF_PF_MASK 0x7F 52 + 53 + static inline u16 rvu_make_pcifunc(struct pci_dev *pdev, int pf, int func) 54 + { 55 + if (is_cn20k(pdev)) 56 + return ((pf & RVU_CN20K_PFVF_PF_MASK) << 57 + RVU_CN20K_PFVF_PF_SHIFT) | 58 + ((func & RVU_PFVF_FUNC_MASK) << 59 + RVU_PFVF_FUNC_SHIFT); 60 + else 61 + return ((pf & RVU_OTX2_PFVF_PF_MASK) << 62 + RVU_OTX2_PFVF_PF_SHIFT) | 63 + ((func & RVU_PFVF_FUNC_MASK) << 64 + RVU_PFVF_FUNC_SHIFT); 65 + } 66 + 67 + static inline int rvu_pcifunc_pf_mask(struct pci_dev *pdev) 68 + { 69 + if (is_cn20k(pdev)) 70 + return ~(RVU_CN20K_PFVF_PF_MASK << RVU_CN20K_PFVF_PF_SHIFT); 71 + else 72 + return ~(RVU_OTX2_PFVF_PF_MASK << RVU_OTX2_PFVF_PF_SHIFT); 73 + } 51 74 52 75 #ifdef CONFIG_DEBUG_FS 53 76 struct dump_ctx { ··· 861 836 void rvu_free_rsrc_contig(struct rsrc_bmap *rsrc, int nrsrc, int start); 862 837 bool rvu_rsrc_check_contig(struct rsrc_bmap *rsrc, int nrsrc); 863 838 u16 rvu_get_rsrc_mapcount(struct rvu_pfvf *pfvf, int blkaddr); 864 - int rvu_get_pf(u16 pcifunc); 865 839 struct rvu_pfvf *rvu_get_pfvf(struct rvu *rvu, int pcifunc); 866 840 void rvu_get_pf_numvfs(struct rvu *rvu, int pf, int *numvfs, int *hwvf); 867 841 bool is_block_implemented(struct rvu_hwinfo *hw, int blkaddr); ··· 889 865 890 866 /* SDP APIs */ 891 867 int rvu_sdp_init(struct rvu *rvu); 892 - bool is_sdp_pfvf(u16 pcifunc); 893 - bool is_sdp_pf(u16 pcifunc); 868 + bool is_sdp_pfvf(struct rvu *rvu, u16 pcifunc); 869 + bool is_sdp_pf(struct rvu *rvu, u16 pcifunc); 894 870 bool is_sdp_vf(struct rvu *rvu, u16 pcifunc); 895 871 896 872 static inline bool is_rep_dev(struct rvu *rvu, u16 pcifunc) ··· 901 877 return false; 902 878 } 903 879 880 + static inline int rvu_get_pf(struct pci_dev *pdev, u16 pcifunc) 881 + { 882 + if (is_cn20k(pdev)) 883 + return (pcifunc >> RVU_CN20K_PFVF_PF_SHIFT) & 884 + RVU_CN20K_PFVF_PF_MASK; 885 + else 886 + return (pcifunc >> RVU_OTX2_PFVF_PF_SHIFT) & 887 + RVU_OTX2_PFVF_PF_MASK; 888 + } 889 + 904 890 /* CGX APIs */ 905 891 static inline bool is_pf_cgxmapped(struct rvu *rvu, u8 pf) 906 892 { 907 893 return (pf >= PF_CGXMAP_BASE && pf <= rvu->cgx_mapped_pfs) && 908 - !is_sdp_pf(pf << RVU_PFVF_PF_SHIFT); 894 + !is_sdp_pf(rvu, rvu_make_pcifunc(rvu->pdev, pf, 0)); 909 895 } 910 896 911 897 static inline void rvu_get_cgx_lmac_id(u8 map, u8 *cgx_id, u8 *lmac_id) ··· 927 893 static inline bool is_cgx_vf(struct rvu *rvu, u16 pcifunc) 928 894 { 929 895 return ((pcifunc & RVU_PFVF_FUNC_MASK) && 930 - is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc))); 896 + is_pf_cgxmapped(rvu, rvu_get_pf(rvu->pdev, pcifunc))); 931 897 } 932 898 933 899 #define M(_name, _id, fn_name, req, rsp) \
+34 -34
drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
··· 457 457 inline bool is_cgx_config_permitted(struct rvu *rvu, u16 pcifunc) 458 458 { 459 459 if ((pcifunc & RVU_PFVF_FUNC_MASK) || 460 - !is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc))) 460 + !is_pf_cgxmapped(rvu, rvu_get_pf(rvu->pdev, pcifunc))) 461 461 return false; 462 462 return true; 463 463 } ··· 484 484 485 485 int rvu_cgx_config_rxtx(struct rvu *rvu, u16 pcifunc, bool start) 486 486 { 487 - int pf = rvu_get_pf(pcifunc); 487 + int pf = rvu_get_pf(rvu->pdev, pcifunc); 488 488 struct mac_ops *mac_ops; 489 489 u8 cgx_id, lmac_id; 490 490 void *cgxd; ··· 501 501 502 502 int rvu_cgx_tx_enable(struct rvu *rvu, u16 pcifunc, bool enable) 503 503 { 504 - int pf = rvu_get_pf(pcifunc); 504 + int pf = rvu_get_pf(rvu->pdev, pcifunc); 505 505 struct mac_ops *mac_ops; 506 506 u8 cgx_id, lmac_id; 507 507 void *cgxd; ··· 526 526 527 527 void rvu_cgx_disable_dmac_entries(struct rvu *rvu, u16 pcifunc) 528 528 { 529 - int pf = rvu_get_pf(pcifunc); 529 + int pf = rvu_get_pf(rvu->pdev, pcifunc); 530 530 int i = 0, lmac_count = 0; 531 531 struct mac_ops *mac_ops; 532 532 u8 max_dmac_filters; ··· 577 577 static int rvu_lmac_get_stats(struct rvu *rvu, struct msg_req *req, 578 578 void *rsp) 579 579 { 580 - int pf = rvu_get_pf(req->hdr.pcifunc); 580 + int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc); 581 581 struct mac_ops *mac_ops; 582 582 int stat = 0, err = 0; 583 583 u64 tx_stat, rx_stat; ··· 633 633 int rvu_mbox_handler_cgx_stats_rst(struct rvu *rvu, struct msg_req *req, 634 634 struct msg_rsp *rsp) 635 635 { 636 - int pf = rvu_get_pf(req->hdr.pcifunc); 636 + int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc); 637 637 struct rvu_pfvf *parent_pf; 638 638 struct mac_ops *mac_ops; 639 639 u8 cgx_idx, lmac; ··· 663 663 struct msg_req *req, 664 664 struct cgx_fec_stats_rsp *rsp) 665 665 { 666 - int pf = rvu_get_pf(req->hdr.pcifunc); 666 + int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc); 667 667 struct mac_ops *mac_ops; 668 668 u8 cgx_idx, lmac; 669 669 void *cgxd; ··· 681 681 struct cgx_mac_addr_set_or_get *req, 682 682 struct cgx_mac_addr_set_or_get *rsp) 683 683 { 684 - int pf = rvu_get_pf(req->hdr.pcifunc); 684 + int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc); 685 685 u8 cgx_id, lmac_id; 686 686 687 687 if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc)) ··· 701 701 struct cgx_mac_addr_add_req *req, 702 702 struct cgx_mac_addr_add_rsp *rsp) 703 703 { 704 - int pf = rvu_get_pf(req->hdr.pcifunc); 704 + int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc); 705 705 u8 cgx_id, lmac_id; 706 706 int rc = 0; 707 707 ··· 725 725 struct cgx_mac_addr_del_req *req, 726 726 struct msg_rsp *rsp) 727 727 { 728 - int pf = rvu_get_pf(req->hdr.pcifunc); 728 + int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc); 729 729 u8 cgx_id, lmac_id; 730 730 731 731 if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc)) ··· 743 743 struct cgx_max_dmac_entries_get_rsp 744 744 *rsp) 745 745 { 746 - int pf = rvu_get_pf(req->hdr.pcifunc); 746 + int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc); 747 747 u8 cgx_id, lmac_id; 748 748 749 749 /* If msg is received from PFs(which are not mapped to CGX LMACs) ··· 769 769 struct cgx_mac_addr_set_or_get *req, 770 770 struct cgx_mac_addr_set_or_get *rsp) 771 771 { 772 - int pf = rvu_get_pf(req->hdr.pcifunc); 772 + int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc); 773 773 u8 cgx_id, lmac_id; 774 774 int rc = 0; 775 775 u64 cfg; ··· 790 790 struct msg_rsp *rsp) 791 791 { 792 792 u16 pcifunc = req->hdr.pcifunc; 793 - int pf = rvu_get_pf(pcifunc); 793 + int pf = rvu_get_pf(rvu->pdev, pcifunc); 794 794 u8 cgx_id, lmac_id; 795 795 796 796 if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc)) ··· 809 809 int rvu_mbox_handler_cgx_promisc_disable(struct rvu *rvu, struct msg_req *req, 810 810 struct msg_rsp *rsp) 811 811 { 812 - int pf = rvu_get_pf(req->hdr.pcifunc); 812 + int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc); 813 813 u8 cgx_id, lmac_id; 814 814 815 815 if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc)) ··· 828 828 static int rvu_cgx_ptp_rx_cfg(struct rvu *rvu, u16 pcifunc, bool enable) 829 829 { 830 830 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); 831 - int pf = rvu_get_pf(pcifunc); 831 + int pf = rvu_get_pf(rvu->pdev, pcifunc); 832 832 struct mac_ops *mac_ops; 833 833 u8 cgx_id, lmac_id; 834 834 void *cgxd; ··· 864 864 int rvu_mbox_handler_cgx_ptp_rx_enable(struct rvu *rvu, struct msg_req *req, 865 865 struct msg_rsp *rsp) 866 866 { 867 - if (!is_pf_cgxmapped(rvu, rvu_get_pf(req->hdr.pcifunc))) 867 + if (!is_pf_cgxmapped(rvu, rvu_get_pf(rvu->pdev, req->hdr.pcifunc))) 868 868 return -EPERM; 869 869 870 870 return rvu_cgx_ptp_rx_cfg(rvu, req->hdr.pcifunc, true); ··· 878 878 879 879 static int rvu_cgx_config_linkevents(struct rvu *rvu, u16 pcifunc, bool en) 880 880 { 881 - int pf = rvu_get_pf(pcifunc); 881 + int pf = rvu_get_pf(rvu->pdev, pcifunc); 882 882 u8 cgx_id, lmac_id; 883 883 884 884 if (!is_cgx_config_permitted(rvu, pcifunc)) ··· 917 917 u8 cgx_id, lmac_id; 918 918 int pf, err; 919 919 920 - pf = rvu_get_pf(req->hdr.pcifunc); 920 + pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc); 921 921 922 922 if (!is_pf_cgxmapped(rvu, pf)) 923 923 return -ENODEV; ··· 933 933 struct msg_req *req, 934 934 struct cgx_features_info_msg *rsp) 935 935 { 936 - int pf = rvu_get_pf(req->hdr.pcifunc); 936 + int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc); 937 937 u8 cgx_idx, lmac; 938 938 void *cgxd; 939 939 ··· 975 975 976 976 static int rvu_cgx_config_intlbk(struct rvu *rvu, u16 pcifunc, bool en) 977 977 { 978 - int pf = rvu_get_pf(pcifunc); 978 + int pf = rvu_get_pf(rvu->pdev, pcifunc); 979 979 struct mac_ops *mac_ops; 980 980 u8 cgx_id, lmac_id; 981 981 ··· 1005 1005 1006 1006 int rvu_cgx_cfg_pause_frm(struct rvu *rvu, u16 pcifunc, u8 tx_pause, u8 rx_pause) 1007 1007 { 1008 - int pf = rvu_get_pf(pcifunc); 1008 + int pf = rvu_get_pf(rvu->pdev, pcifunc); 1009 1009 u8 rx_pfc = 0, tx_pfc = 0; 1010 1010 struct mac_ops *mac_ops; 1011 1011 u8 cgx_id, lmac_id; ··· 1046 1046 struct cgx_pause_frm_cfg *req, 1047 1047 struct cgx_pause_frm_cfg *rsp) 1048 1048 { 1049 - int pf = rvu_get_pf(req->hdr.pcifunc); 1049 + int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc); 1050 1050 struct mac_ops *mac_ops; 1051 1051 u8 cgx_id, lmac_id; 1052 1052 int err = 0; ··· 1073 1073 int rvu_mbox_handler_cgx_get_phy_fec_stats(struct rvu *rvu, struct msg_req *req, 1074 1074 struct msg_rsp *rsp) 1075 1075 { 1076 - int pf = rvu_get_pf(req->hdr.pcifunc); 1076 + int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc); 1077 1077 u8 cgx_id, lmac_id; 1078 1078 1079 1079 if (!is_pf_cgxmapped(rvu, pf)) ··· 1106 1106 /* Assumes LF of a PF and all of its VF belongs to the same 1107 1107 * NIX block 1108 1108 */ 1109 - pcifunc = pf << RVU_PFVF_PF_SHIFT; 1109 + pcifunc = rvu_make_pcifunc(rvu->pdev, pf, 0); 1110 1110 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 1111 1111 if (blkaddr < 0) 1112 1112 return 0; ··· 1133 1133 struct rvu_pfvf *parent_pf, *pfvf; 1134 1134 int cgx_users, err = 0; 1135 1135 1136 - if (!is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc))) 1136 + if (!is_pf_cgxmapped(rvu, rvu_get_pf(rvu->pdev, pcifunc))) 1137 1137 return 0; 1138 1138 1139 - parent_pf = &rvu->pf[rvu_get_pf(pcifunc)]; 1139 + parent_pf = &rvu->pf[rvu_get_pf(rvu->pdev, pcifunc)]; 1140 1140 pfvf = rvu_get_pfvf(rvu, pcifunc); 1141 1141 1142 1142 mutex_lock(&rvu->cgx_cfg_lock); ··· 1179 1179 struct fec_mode *req, 1180 1180 struct fec_mode *rsp) 1181 1181 { 1182 - int pf = rvu_get_pf(req->hdr.pcifunc); 1182 + int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc); 1183 1183 u8 cgx_id, lmac_id; 1184 1184 1185 1185 if (!is_pf_cgxmapped(rvu, pf)) ··· 1195 1195 int rvu_mbox_handler_cgx_get_aux_link_info(struct rvu *rvu, struct msg_req *req, 1196 1196 struct cgx_fw_data *rsp) 1197 1197 { 1198 - int pf = rvu_get_pf(req->hdr.pcifunc); 1198 + int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc); 1199 1199 u8 cgx_id, lmac_id; 1200 1200 1201 1201 if (!rvu->fwdata) ··· 1222 1222 struct cgx_set_link_mode_req *req, 1223 1223 struct cgx_set_link_mode_rsp *rsp) 1224 1224 { 1225 - int pf = rvu_get_pf(req->hdr.pcifunc); 1225 + int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc); 1226 1226 u8 cgx_idx, lmac; 1227 1227 void *cgxd; 1228 1228 ··· 1238 1238 int rvu_mbox_handler_cgx_mac_addr_reset(struct rvu *rvu, struct cgx_mac_addr_reset_req *req, 1239 1239 struct msg_rsp *rsp) 1240 1240 { 1241 - int pf = rvu_get_pf(req->hdr.pcifunc); 1241 + int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc); 1242 1242 u8 cgx_id, lmac_id; 1243 1243 1244 1244 if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc)) ··· 1256 1256 struct cgx_mac_addr_update_req *req, 1257 1257 struct cgx_mac_addr_update_rsp *rsp) 1258 1258 { 1259 - int pf = rvu_get_pf(req->hdr.pcifunc); 1259 + int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc); 1260 1260 u8 cgx_id, lmac_id; 1261 1261 1262 1262 if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc)) ··· 1272 1272 int rvu_cgx_prio_flow_ctrl_cfg(struct rvu *rvu, u16 pcifunc, u8 tx_pause, 1273 1273 u8 rx_pause, u16 pfc_en) 1274 1274 { 1275 - int pf = rvu_get_pf(pcifunc); 1275 + int pf = rvu_get_pf(rvu->pdev, pcifunc); 1276 1276 u8 rx_8023 = 0, tx_8023 = 0; 1277 1277 struct mac_ops *mac_ops; 1278 1278 u8 cgx_id, lmac_id; ··· 1310 1310 struct cgx_pfc_cfg *req, 1311 1311 struct cgx_pfc_rsp *rsp) 1312 1312 { 1313 - int pf = rvu_get_pf(req->hdr.pcifunc); 1313 + int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc); 1314 1314 struct mac_ops *mac_ops; 1315 1315 u8 cgx_id, lmac_id; 1316 1316 void *cgxd; ··· 1335 1335 1336 1336 void rvu_mac_reset(struct rvu *rvu, u16 pcifunc) 1337 1337 { 1338 - int pf = rvu_get_pf(pcifunc); 1338 + int pf = rvu_get_pf(rvu->pdev, pcifunc); 1339 1339 struct mac_ops *mac_ops; 1340 1340 struct cgx *cgxd; 1341 1341 u8 cgx, lmac;
+2 -2
drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c
··· 66 66 #define LMT_MAP_TBL_W1_OFF 8 67 67 static u32 rvu_get_lmtst_tbl_index(struct rvu *rvu, u16 pcifunc) 68 68 { 69 - return ((rvu_get_pf(pcifunc) * LMT_MAX_VFS) + 69 + return ((rvu_get_pf(rvu->pdev, pcifunc) * LMT_MAX_VFS) + 70 70 (pcifunc & RVU_PFVF_FUNC_MASK)) * LMT_MAPTBL_ENTRY_SIZE; 71 71 } 72 72 ··· 83 83 84 84 mutex_lock(&rvu->rsrc_lock); 85 85 rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_SMMU_ADDR_REQ, iova); 86 - pf = rvu_get_pf(pcifunc) & RVU_PFVF_PF_MASK; 86 + pf = rvu_get_pf(rvu->pdev, pcifunc) & RVU_OTX2_PFVF_PF_MASK; 87 87 val = BIT_ULL(63) | BIT_ULL(14) | BIT_ULL(13) | pf << 8 | 88 88 ((pcifunc & RVU_PFVF_FUNC_MASK) & 0xFF); 89 89 rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_SMMU_TXN_REQ, val);
+2 -2
drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c
··· 410 410 { 411 411 int cpt_pf_num = rvu->cpt_pf_num; 412 412 413 - if (rvu_get_pf(pcifunc) != cpt_pf_num) 413 + if (rvu_get_pf(rvu->pdev, pcifunc) != cpt_pf_num) 414 414 return false; 415 415 if (pcifunc & RVU_PFVF_FUNC_MASK) 416 416 return false; ··· 422 422 { 423 423 int cpt_pf_num = rvu->cpt_pf_num; 424 424 425 - if (rvu_get_pf(pcifunc) != cpt_pf_num) 425 + if (rvu_get_pf(rvu->pdev, pcifunc) != cpt_pf_num) 426 426 return false; 427 427 if (!(pcifunc & RVU_PFVF_FUNC_MASK)) 428 428 return false;
+11 -11
drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
··· 688 688 689 689 for (pf = 0; pf < rvu->hw->total_pfs; pf++) { 690 690 for (vf = 0; vf <= rvu->hw->total_vfs; vf++) { 691 - pcifunc = pf << 10 | vf; 691 + pcifunc = rvu_make_pcifunc(rvu->pdev, pf, vf); 692 692 if (!pcifunc) 693 693 continue; 694 694 ··· 759 759 for (vf = 0; vf <= rvu->hw->total_vfs; vf++) { 760 760 off = 0; 761 761 flag = 0; 762 - pcifunc = pf << 10 | vf; 762 + pcifunc = rvu_make_pcifunc(rvu->pdev, pf, vf); 763 763 if (!pcifunc) 764 764 continue; 765 765 ··· 842 842 843 843 cgx[0] = 0; 844 844 lmac[0] = 0; 845 - pcifunc = pf << 10; 845 + pcifunc = rvu_make_pcifunc(rvu->pdev, pf, 0); 846 846 pfvf = rvu_get_pfvf(rvu, pcifunc); 847 847 848 848 if (pfvf->nix_blkaddr == BLKADDR_NIX0) ··· 2623 2623 pcifunc = ipolicer->pfvf_map[idx]; 2624 2624 if (!(pcifunc & RVU_PFVF_FUNC_MASK)) 2625 2625 seq_printf(m, "Allocated to :: PF %d\n", 2626 - rvu_get_pf(pcifunc)); 2626 + rvu_get_pf(rvu->pdev, pcifunc)); 2627 2627 else 2628 2628 seq_printf(m, "Allocated to :: PF %d VF %d\n", 2629 - rvu_get_pf(pcifunc), 2629 + rvu_get_pf(rvu->pdev, pcifunc), 2630 2630 (pcifunc & RVU_PFVF_FUNC_MASK) - 1); 2631 2631 print_band_prof_ctx(m, &aq_rsp.prof); 2632 2632 } ··· 2983 2983 2984 2984 if (!(pcifunc & RVU_PFVF_FUNC_MASK)) 2985 2985 seq_printf(s, "\n\t\t Device \t\t: PF%d\n", 2986 - rvu_get_pf(pcifunc)); 2986 + rvu_get_pf(rvu->pdev, pcifunc)); 2987 2987 else 2988 2988 seq_printf(s, "\n\t\t Device \t\t: PF%d VF%d\n", 2989 - rvu_get_pf(pcifunc), 2989 + rvu_get_pf(rvu->pdev, pcifunc), 2990 2990 (pcifunc & RVU_PFVF_FUNC_MASK) - 1); 2991 2991 2992 2992 if (entry_acnt) { ··· 3049 3049 seq_puts(filp, "\n\t\t Current allocation\n"); 3050 3050 seq_puts(filp, "\t\t====================\n"); 3051 3051 for (pf = 0; pf < rvu->hw->total_pfs; pf++) { 3052 - pcifunc = (pf << RVU_PFVF_PF_SHIFT); 3052 + pcifunc = rvu_make_pcifunc(rvu->pdev, pf, 0); 3053 3053 rvu_print_npc_mcam_info(filp, pcifunc, blkaddr); 3054 3054 3055 3055 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf)); 3056 3056 numvfs = (cfg >> 12) & 0xFF; 3057 3057 for (vf = 0; vf < numvfs; vf++) { 3058 - pcifunc = (pf << RVU_PFVF_PF_SHIFT) | (vf + 1); 3058 + pcifunc = rvu_make_pcifunc(rvu->pdev, pf, (vf + 1)); 3059 3059 rvu_print_npc_mcam_info(filp, pcifunc, blkaddr); 3060 3060 } 3061 3061 } ··· 3326 3326 3327 3327 mutex_lock(&mcam->lock); 3328 3328 list_for_each_entry(iter, &mcam->mcam_rules, list) { 3329 - pf = (iter->owner >> RVU_PFVF_PF_SHIFT) & RVU_PFVF_PF_MASK; 3329 + pf = rvu_get_pf(rvu->pdev, iter->owner); 3330 3330 seq_printf(s, "\n\tInstalled by: PF%d ", pf); 3331 3331 3332 3332 if (iter->owner & RVU_PFVF_FUNC_MASK) { ··· 3344 3344 rvu_dbg_npc_mcam_show_flows(s, iter); 3345 3345 if (is_npc_intf_rx(iter->intf)) { 3346 3346 target = iter->rx_action.pf_func; 3347 - pf = (target >> RVU_PFVF_PF_SHIFT) & RVU_PFVF_PF_MASK; 3347 + pf = rvu_get_pf(rvu->pdev, target); 3348 3348 seq_printf(s, "\tForward to: PF%d ", pf); 3349 3349 3350 3350 if (target & RVU_PFVF_FUNC_MASK) {
+29 -25
drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
··· 315 315 if (lvl >= hw->cap.nix_tx_aggr_lvl) { 316 316 if ((nix_get_tx_link(rvu, map_func) != 317 317 nix_get_tx_link(rvu, pcifunc)) && 318 - (rvu_get_pf(map_func) != rvu_get_pf(pcifunc))) 318 + (rvu_get_pf(rvu->pdev, map_func) != 319 + rvu_get_pf(rvu->pdev, pcifunc))) 319 320 return false; 320 321 else 321 322 return true; ··· 340 339 bool from_vf; 341 340 int err; 342 341 343 - pf = rvu_get_pf(pcifunc); 342 + pf = rvu_get_pf(rvu->pdev, pcifunc); 344 343 if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK && 345 344 type != NIX_INTF_TYPE_SDP) 346 345 return 0; ··· 417 416 break; 418 417 case NIX_INTF_TYPE_SDP: 419 418 from_vf = !!(pcifunc & RVU_PFVF_FUNC_MASK); 420 - parent_pf = &rvu->pf[rvu_get_pf(pcifunc)]; 419 + parent_pf = &rvu->pf[rvu_get_pf(rvu->pdev, pcifunc)]; 421 420 sdp_info = parent_pf->sdp_info; 422 421 if (!sdp_info) { 423 422 dev_err(rvu->dev, "Invalid sdp_info pointer\n"); ··· 591 590 u16 chan_v; 592 591 u64 cfg; 593 592 594 - pf = rvu_get_pf(pcifunc); 593 + pf = rvu_get_pf(rvu->pdev, pcifunc); 595 594 type = is_lbk_vf(rvu, pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX; 596 595 if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK) 597 596 return 0; 598 597 599 - if (is_sdp_pfvf(pcifunc)) 598 + if (is_sdp_pfvf(rvu, pcifunc)) 600 599 type = NIX_INTF_TYPE_SDP; 601 600 602 601 if (cpt_link && !rvu->hw->cpt_links) ··· 737 736 u16 chan_v; 738 737 u64 cfg; 739 738 740 - pf = rvu_get_pf(pcifunc); 739 + pf = rvu_get_pf(rvu->pdev, pcifunc); 741 740 type = is_lbk_vf(rvu, pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX; 742 - if (is_sdp_pfvf(pcifunc)) 741 + if (is_sdp_pfvf(rvu, pcifunc)) 743 742 type = NIX_INTF_TYPE_SDP; 744 743 745 744 /* Enable backpressure only for CGX mapped PFs and LBK/SDP interface */ ··· 1675 1674 } 1676 1675 1677 1676 intf = is_lbk_vf(rvu, pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX; 1678 - if (is_sdp_pfvf(pcifunc)) 1677 + if (is_sdp_pfvf(rvu, pcifunc)) 1679 1678 intf = NIX_INTF_TYPE_SDP; 1680 1679 1681 1680 err = nix_interface_init(rvu, pcifunc, intf, nixlf, rsp, ··· 1799 1798 rc = rvu_nix_reserve_mark_format(rvu, nix_hw, blkaddr, cfg); 1800 1799 if (rc < 0) { 1801 1800 dev_err(rvu->dev, "No mark_format_ctl for (pf:%d, vf:%d)", 1802 - rvu_get_pf(pcifunc), pcifunc & RVU_PFVF_FUNC_MASK); 1801 + rvu_get_pf(rvu->pdev, pcifunc), 1802 + pcifunc & RVU_PFVF_FUNC_MASK); 1803 1803 return NIX_AF_ERR_MARK_CFG_FAIL; 1804 1804 } 1805 1805 ··· 2052 2050 static int nix_get_tx_link(struct rvu *rvu, u16 pcifunc) 2053 2051 { 2054 2052 struct rvu_hwinfo *hw = rvu->hw; 2055 - int pf = rvu_get_pf(pcifunc); 2053 + int pf = rvu_get_pf(rvu->pdev, pcifunc); 2056 2054 u8 cgx_id = 0, lmac_id = 0; 2057 2055 2058 2056 if (is_lbk_vf(rvu, pcifunc)) {/* LBK links */ ··· 2070 2068 int link, int *start, int *end) 2071 2069 { 2072 2070 struct rvu_hwinfo *hw = rvu->hw; 2073 - int pf = rvu_get_pf(pcifunc); 2071 + int pf = rvu_get_pf(rvu->pdev, pcifunc); 2074 2072 2075 2073 /* LBK links */ 2076 2074 if (is_lbk_vf(rvu, pcifunc) || is_rep_dev(rvu, pcifunc)) { ··· 2428 2426 { 2429 2427 struct nix_smq_flush_ctx *smq_flush_ctx; 2430 2428 int err, restore_tx_en = 0, i; 2431 - int pf = rvu_get_pf(pcifunc); 2429 + int pf = rvu_get_pf(rvu->pdev, pcifunc); 2432 2430 u8 cgx_id = 0, lmac_id = 0; 2433 2431 u16 tl2_tl3_link_schq; 2434 2432 u8 link, link_level; ··· 2822 2820 { 2823 2821 struct rvu_hwinfo *hw = rvu->hw; 2824 2822 int lbk_link_start, lbk_links; 2825 - u8 pf = rvu_get_pf(pcifunc); 2823 + u8 pf = rvu_get_pf(rvu->pdev, pcifunc); 2826 2824 int schq; 2827 2825 u64 cfg; 2828 2826 ··· 3192 3190 err = rvu_nix_blk_aq_enq_inst(rvu, nix_hw, &aq_req, NULL); 3193 3191 if (err) { 3194 3192 dev_err(rvu->dev, "Failed to setup Bcast MCE for PF%d:VF%d\n", 3195 - rvu_get_pf(pcifunc), pcifunc & RVU_PFVF_FUNC_MASK); 3193 + rvu_get_pf(rvu->pdev, pcifunc), 3194 + pcifunc & RVU_PFVF_FUNC_MASK); 3196 3195 return err; 3197 3196 } 3198 3197 return 0; ··· 3461 3458 dev_err(rvu->dev, 3462 3459 "%s: Idx %d > max MCE idx %d, for PF%d bcast list\n", 3463 3460 __func__, idx, mce_list->max, 3464 - pcifunc >> RVU_PFVF_PF_SHIFT); 3461 + rvu_get_pf(rvu->pdev, pcifunc)); 3465 3462 return -EINVAL; 3466 3463 } 3467 3464 ··· 3513 3510 struct rvu_pfvf *pfvf; 3514 3511 3515 3512 if (!hw->cap.nix_rx_multicast || 3516 - !is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc & ~RVU_PFVF_FUNC_MASK))) { 3513 + !is_pf_cgxmapped(rvu, rvu_get_pf(rvu->pdev, 3514 + pcifunc & ~RVU_PFVF_FUNC_MASK))) { 3517 3515 *mce_list = NULL; 3518 3516 *mce_idx = 0; 3519 3517 return; ··· 3548 3544 int pf; 3549 3545 3550 3546 /* skip multicast pkt replication for AF's VFs & SDP links */ 3551 - if (is_lbk_vf(rvu, pcifunc) || is_sdp_pfvf(pcifunc)) 3547 + if (is_lbk_vf(rvu, pcifunc) || is_sdp_pfvf(rvu, pcifunc)) 3552 3548 return 0; 3553 3549 3554 3550 if (!hw->cap.nix_rx_multicast) 3555 3551 return 0; 3556 3552 3557 - pf = rvu_get_pf(pcifunc); 3553 + pf = rvu_get_pf(rvu->pdev, pcifunc); 3558 3554 if (!is_pf_cgxmapped(rvu, pf)) 3559 3555 return 0; 3560 3556 ··· 3623 3619 3624 3620 for (idx = 0; idx < (numvfs + 1); idx++) { 3625 3621 /* idx-0 is for PF, followed by VFs */ 3626 - pcifunc = (pf << RVU_PFVF_PF_SHIFT); 3622 + pcifunc = rvu_make_pcifunc(rvu->pdev, pf, 0); 3627 3623 pcifunc |= idx; 3628 3624 /* Add dummy entries now, so that we don't have to check 3629 3625 * for whether AQ_OP should be INIT/WRITE later on. ··· 4558 4554 static void nix_find_link_frs(struct rvu *rvu, 4559 4555 struct nix_frs_cfg *req, u16 pcifunc) 4560 4556 { 4561 - int pf = rvu_get_pf(pcifunc); 4557 + int pf = rvu_get_pf(rvu->pdev, pcifunc); 4562 4558 struct rvu_pfvf *pfvf; 4563 4559 int maxlen, minlen; 4564 4560 int numvfs, hwvf; ··· 4605 4601 { 4606 4602 struct rvu_hwinfo *hw = rvu->hw; 4607 4603 u16 pcifunc = req->hdr.pcifunc; 4608 - int pf = rvu_get_pf(pcifunc); 4604 + int pf = rvu_get_pf(rvu->pdev, pcifunc); 4609 4605 int blkaddr, link = -1; 4610 4606 struct nix_hw *nix_hw; 4611 4607 struct rvu_pfvf *pfvf; ··· 5255 5251 5256 5252 rvu_switch_update_rules(rvu, pcifunc, true); 5257 5253 5258 - pf = rvu_get_pf(pcifunc); 5254 + pf = rvu_get_pf(rvu->pdev, pcifunc); 5259 5255 if (is_pf_cgxmapped(rvu, pf) && rvu->rep_mode) 5260 5256 rvu_rep_notify_pfvf_state(rvu, pcifunc, true); 5261 5257 ··· 5288 5284 rvu_switch_update_rules(rvu, pcifunc, false); 5289 5285 rvu_cgx_tx_enable(rvu, pcifunc, true); 5290 5286 5291 - pf = rvu_get_pf(pcifunc); 5287 + pf = rvu_get_pf(rvu->pdev, pcifunc); 5292 5288 if (is_pf_cgxmapped(rvu, pf) && rvu->rep_mode) 5293 5289 rvu_rep_notify_pfvf_state(rvu, pcifunc, false); 5294 5290 return 0; ··· 5300 5296 { 5301 5297 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); 5302 5298 struct hwctx_disable_req ctx_req; 5303 - int pf = rvu_get_pf(pcifunc); 5299 + int pf = rvu_get_pf(rvu->pdev, pcifunc); 5304 5300 struct mac_ops *mac_ops; 5305 5301 u8 cgx_id, lmac_id; 5306 5302 u64 sa_base; ··· 5389 5385 int nixlf; 5390 5386 u64 cfg; 5391 5387 5392 - pf = rvu_get_pf(pcifunc); 5388 + pf = rvu_get_pf(rvu->pdev, pcifunc); 5393 5389 if (!is_mac_feature_supported(rvu, pf, RVU_LMAC_FEAT_PTP)) 5394 5390 return 0; 5395 5391
+5 -3
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
··· 147 147 int npc_get_nixlf_mcam_index(struct npc_mcam *mcam, 148 148 u16 pcifunc, int nixlf, int type) 149 149 { 150 - int pf = rvu_get_pf(pcifunc); 150 + struct rvu_hwinfo *hw = container_of(mcam, struct rvu_hwinfo, mcam); 151 + struct rvu *rvu = hw->rvu; 152 + int pf = rvu_get_pf(rvu->pdev, pcifunc); 151 153 int index; 152 154 153 155 /* Check if this is for a PF */ ··· 700 698 701 699 /* RX_ACTION set to MCAST for CGX PF's */ 702 700 if (hw->cap.nix_rx_multicast && pfvf->use_mce_list && 703 - is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc))) { 701 + is_pf_cgxmapped(rvu, rvu_get_pf(rvu->pdev, pcifunc))) { 704 702 *(u64 *)&action = 0; 705 703 action.op = NIX_RX_ACTIONOP_MCAST; 706 704 pfvf = rvu_get_pfvf(rvu, pcifunc & ~RVU_PFVF_FUNC_MASK); ··· 3436 3434 { 3437 3435 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); 3438 3436 int blkaddr, nixlf, rc, intf_mode; 3439 - int pf = rvu_get_pf(pcifunc); 3437 + int pf = rvu_get_pf(rvu->pdev, pcifunc); 3440 3438 u64 rxpkind, txpkind; 3441 3439 u8 cgx_id, lmac_id; 3442 3440
+8 -8
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
··· 1465 1465 int rvu_npc_exact_promisc_disable(struct rvu *rvu, u16 pcifunc) 1466 1466 { 1467 1467 struct npc_exact_table *table; 1468 - int pf = rvu_get_pf(pcifunc); 1468 + int pf = rvu_get_pf(rvu->pdev, pcifunc); 1469 1469 u8 cgx_id, lmac_id; 1470 1470 u32 drop_mcam_idx; 1471 1471 bool *promisc; ··· 1512 1512 int rvu_npc_exact_promisc_enable(struct rvu *rvu, u16 pcifunc) 1513 1513 { 1514 1514 struct npc_exact_table *table; 1515 - int pf = rvu_get_pf(pcifunc); 1515 + int pf = rvu_get_pf(rvu->pdev, pcifunc); 1516 1516 u8 cgx_id, lmac_id; 1517 1517 u32 drop_mcam_idx; 1518 1518 bool *promisc; ··· 1560 1560 int rvu_npc_exact_mac_addr_reset(struct rvu *rvu, struct cgx_mac_addr_reset_req *req, 1561 1561 struct msg_rsp *rsp) 1562 1562 { 1563 - int pf = rvu_get_pf(req->hdr.pcifunc); 1563 + int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc); 1564 1564 u32 seq_id = req->index; 1565 1565 struct rvu_pfvf *pfvf; 1566 1566 u8 cgx_id, lmac_id; ··· 1593 1593 struct cgx_mac_addr_update_req *req, 1594 1594 struct cgx_mac_addr_update_rsp *rsp) 1595 1595 { 1596 - int pf = rvu_get_pf(req->hdr.pcifunc); 1596 + int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc); 1597 1597 struct npc_exact_table_entry *entry; 1598 1598 struct npc_exact_table *table; 1599 1599 struct rvu_pfvf *pfvf; ··· 1675 1675 struct cgx_mac_addr_add_req *req, 1676 1676 struct cgx_mac_addr_add_rsp *rsp) 1677 1677 { 1678 - int pf = rvu_get_pf(req->hdr.pcifunc); 1678 + int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc); 1679 1679 struct rvu_pfvf *pfvf; 1680 1680 u8 cgx_id, lmac_id; 1681 1681 int rc = 0; ··· 1711 1711 struct cgx_mac_addr_del_req *req, 1712 1712 struct msg_rsp *rsp) 1713 1713 { 1714 - int pf = rvu_get_pf(req->hdr.pcifunc); 1714 + int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc); 1715 1715 int rc; 1716 1716 1717 1717 rc = rvu_npc_exact_del_table_entry_by_id(rvu, req->index); ··· 1736 1736 int rvu_npc_exact_mac_addr_set(struct rvu *rvu, struct cgx_mac_addr_set_or_get *req, 1737 1737 struct cgx_mac_addr_set_or_get *rsp) 1738 1738 { 1739 - int pf = rvu_get_pf(req->hdr.pcifunc); 1739 + int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc); 1740 1740 u32 seq_id = req->index; 1741 1741 struct rvu_pfvf *pfvf; 1742 1742 u8 cgx_id, lmac_id; ··· 2001 2001 } 2002 2002 2003 2003 /* Filter rules are only for PF */ 2004 - pcifunc = RVU_PFFUNC(i, 0); 2004 + pcifunc = RVU_PFFUNC(rvu->pdev, i, 0); 2005 2005 2006 2006 dev_dbg(rvu->dev, 2007 2007 "%s:Drop rule cgx=%d lmac=%d chan(val=0x%llx, mask=0x%llx\n",
+1 -3
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.h
··· 139 139 #define NPC_MCAM_DROP_RULE_MAX 30 140 140 #define NPC_MCAM_SDP_DROP_RULE_IDX 0 141 141 142 - #define RVU_PFFUNC(pf, func) \ 143 - ((((pf) & RVU_PFVF_PF_MASK) << RVU_PFVF_PF_SHIFT) | \ 144 - (((func) & RVU_PFVF_FUNC_MASK) << RVU_PFVF_FUNC_SHIFT)) 142 + #define RVU_PFFUNC(pdev, pf, func) rvu_make_pcifunc(pdev, pf, func) 145 143 146 144 enum npc_exact_opc_type { 147 145 NPC_EXACT_OPC_MEM,
+6 -7
drivers/net/ethernet/marvell/octeontx2/af/rvu_rep.c
··· 39 39 struct rep_event *msg; 40 40 int pf; 41 41 42 - pf = rvu_get_pf(event->pcifunc); 42 + pf = rvu_get_pf(rvu->pdev, event->pcifunc); 43 43 44 44 if (event->event & RVU_EVENT_MAC_ADDR_CHANGE) 45 45 ether_addr_copy(pfvf->mac_addr, event->evt_data.mac); ··· 114 114 struct rep_event *req; 115 115 int pf; 116 116 117 - if (!is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc))) 117 + if (!is_pf_cgxmapped(rvu, rvu_get_pf(rvu->pdev, pcifunc))) 118 118 return 0; 119 119 120 - pf = rvu_get_pf(rvu->rep_pcifunc); 120 + pf = rvu_get_pf(rvu->pdev, rvu->rep_pcifunc); 121 121 122 122 mutex_lock(&rvu->mbox_lock); 123 123 req = otx2_mbox_alloc_msg_rep_event_up_notify(rvu, pf); ··· 325 325 if (!is_pf_cgxmapped(rvu, pf)) 326 326 continue; 327 327 328 - pcifunc = pf << RVU_PFVF_PF_SHIFT; 328 + pcifunc = rvu_make_pcifunc(rvu->pdev, pf, 0); 329 329 rvu_get_nix_blkaddr(rvu, pcifunc); 330 330 rep = true; 331 331 for (i = 0; i < 2; i++) { ··· 345 345 346 346 rvu_get_pf_numvfs(rvu, pf, &numvfs, NULL); 347 347 for (vf = 0; vf < numvfs; vf++) { 348 - pcifunc = pf << RVU_PFVF_PF_SHIFT | 349 - ((vf + 1) & RVU_PFVF_FUNC_MASK); 348 + pcifunc = rvu_make_pcifunc(rvu->pdev, pf, vf + 1); 350 349 rvu_get_nix_blkaddr(rvu, pcifunc); 351 350 352 351 /* Skip installimg rules if nixlf is not attached */ ··· 453 454 for (pf = 0; pf < rvu->hw->total_pfs; pf++) { 454 455 if (!is_pf_cgxmapped(rvu, pf)) 455 456 continue; 456 - pcifunc = pf << RVU_PFVF_PF_SHIFT; 457 + pcifunc = rvu_make_pcifunc(rvu->pdev, pf, 0); 457 458 rvu->rep2pfvf_map[rep] = pcifunc; 458 459 rsp->rep_pf_map[rep] = pcifunc; 459 460 rep++;
+5 -5
drivers/net/ethernet/marvell/octeontx2/af/rvu_sdp.c
··· 17 17 /* SDP PF number */ 18 18 static int sdp_pf_num[MAX_SDP] = {-1, -1}; 19 19 20 - bool is_sdp_pfvf(u16 pcifunc) 20 + bool is_sdp_pfvf(struct rvu *rvu, u16 pcifunc) 21 21 { 22 - u16 pf = rvu_get_pf(pcifunc); 22 + u16 pf = rvu_get_pf(rvu->pdev, pcifunc); 23 23 u32 found = 0, i = 0; 24 24 25 25 while (i < MAX_SDP) { ··· 34 34 return true; 35 35 } 36 36 37 - bool is_sdp_pf(u16 pcifunc) 37 + bool is_sdp_pf(struct rvu *rvu, u16 pcifunc) 38 38 { 39 - return (is_sdp_pfvf(pcifunc) && 39 + return (is_sdp_pfvf(rvu, pcifunc) && 40 40 !(pcifunc & RVU_PFVF_FUNC_MASK)); 41 41 } 42 42 ··· 46 46 if (!(pcifunc & ~RVU_PFVF_FUNC_MASK)) 47 47 return (rvu->vf_devid == RVU_SDP_VF_DEVID); 48 48 49 - return (is_sdp_pfvf(pcifunc) && 49 + return (is_sdp_pfvf(rvu, pcifunc) && 50 50 !!(pcifunc & RVU_PFVF_FUNC_MASK)); 51 51 } 52 52
+4 -4
drivers/net/ethernet/marvell/octeontx2/af/rvu_switch.c
··· 93 93 if (!is_pf_cgxmapped(rvu, pf)) 94 94 continue; 95 95 96 - pcifunc = pf << 10; 96 + pcifunc = rvu_make_pcifunc(rvu->pdev, pf, 0); 97 97 /* rvu_get_nix_blkaddr sets up the corresponding NIX block 98 98 * address and NIX RX and TX interfaces for a pcifunc. 99 99 * Generally it is called during attach call of a pcifunc but it ··· 126 126 127 127 rvu_get_pf_numvfs(rvu, pf, &numvfs, NULL); 128 128 for (vf = 0; vf < numvfs; vf++) { 129 - pcifunc = pf << 10 | ((vf + 1) & 0x3FF); 129 + pcifunc = rvu_make_pcifunc(rvu->pdev, pf, (vf + 1)); 130 130 rvu_get_nix_blkaddr(rvu, pcifunc); 131 131 132 132 err = rvu_switch_install_rx_rule(rvu, pcifunc, 0x0); ··· 236 236 if (!is_pf_cgxmapped(rvu, pf)) 237 237 continue; 238 238 239 - pcifunc = pf << 10; 239 + pcifunc = rvu_make_pcifunc(rvu->pdev, pf, 0); 240 240 err = rvu_switch_install_rx_rule(rvu, pcifunc, 0xFFF); 241 241 if (err) 242 242 dev_err(rvu->dev, ··· 248 248 249 249 rvu_get_pf_numvfs(rvu, pf, &numvfs, NULL); 250 250 for (vf = 0; vf < numvfs; vf++) { 251 - pcifunc = pf << 10 | ((vf + 1) & 0x3FF); 251 + pcifunc = rvu_make_pcifunc(rvu->pdev, pf, (vf + 1)); 252 252 err = rvu_switch_install_rx_rule(rvu, pcifunc, 0xFFF); 253 253 if (err) 254 254 dev_err(rvu->dev,
+1 -1
drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.c
··· 481 481 goto set_available; 482 482 483 483 /* Trigger CTX flush to write dirty data back to DRAM */ 484 - reg_val = FIELD_PREP(CPT_LF_CTX_FLUSH, sa_iova >> 7); 484 + reg_val = FIELD_PREP(CPT_LF_CTX_FLUSH_CPTR, sa_iova >> 7); 485 485 otx2_write64(pf, CN10K_CPT_LF_CTX_FLUSH, reg_val); 486 486 487 487 set_available:
+1 -1
drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.h
··· 220 220 #define CPT_LF_Q_SIZE_DIV40 GENMASK_ULL(14, 0) 221 221 222 222 /* CPT LF CTX Flush Register */ 223 - #define CPT_LF_CTX_FLUSH GENMASK_ULL(45, 0) 223 + #define CPT_LF_CTX_FLUSH_CPTR GENMASK_ULL(45, 0) 224 224 225 225 #ifdef CONFIG_XFRM_OFFLOAD 226 226 int cn10k_ipsec_init(struct net_device *netdev);
+1 -10
drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
··· 28 28 #include "otx2_reg.h" 29 29 #include "otx2_txrx.h" 30 30 #include "otx2_devlink.h" 31 + #include <rvu.h> 31 32 #include <rvu_trace.h> 32 33 #include "qos.h" 33 34 #include "rep.h" ··· 905 904 /* Time to wait before watchdog kicks off */ 906 905 #define OTX2_TX_TIMEOUT (100 * HZ) 907 906 908 - #define RVU_PFVF_PF_SHIFT 10 909 - #define RVU_PFVF_PF_MASK 0x3F 910 - #define RVU_PFVF_FUNC_SHIFT 0 911 - #define RVU_PFVF_FUNC_MASK 0x3FF 912 - 913 907 static inline bool is_otx2_vf(u16 pcifunc) 914 908 { 915 909 return !!(pcifunc & RVU_PFVF_FUNC_MASK); 916 - } 917 - 918 - static inline int rvu_get_pf(u16 pcifunc) 919 - { 920 - return (pcifunc >> RVU_PFVF_PF_SHIFT) & RVU_PFVF_PF_MASK; 921 910 } 922 911 923 912 static inline dma_addr_t otx2_dma_map_page(struct otx2_nic *pfvf,
+12 -9
drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
··· 206 206 207 207 /* Register ME interrupt handler*/ 208 208 irq_name = &hw->irq_name[RVU_PF_INT_VEC_VFME0 * NAME_SIZE]; 209 - snprintf(irq_name, NAME_SIZE, "RVUPF%d_ME0", rvu_get_pf(pf->pcifunc)); 209 + snprintf(irq_name, NAME_SIZE, "RVUPF%d_ME0", 210 + rvu_get_pf(pf->pdev, pf->pcifunc)); 210 211 ret = request_irq(pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFME0), 211 212 otx2_pf_me_intr_handler, 0, irq_name, pf); 212 213 if (ret) { ··· 217 216 218 217 /* Register FLR interrupt handler */ 219 218 irq_name = &hw->irq_name[RVU_PF_INT_VEC_VFFLR0 * NAME_SIZE]; 220 - snprintf(irq_name, NAME_SIZE, "RVUPF%d_FLR0", rvu_get_pf(pf->pcifunc)); 219 + snprintf(irq_name, NAME_SIZE, "RVUPF%d_FLR0", 220 + rvu_get_pf(pf->pdev, pf->pcifunc)); 221 221 ret = request_irq(pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFFLR0), 222 222 otx2_pf_flr_intr_handler, 0, irq_name, pf); 223 223 if (ret) { ··· 230 228 if (numvfs > 64) { 231 229 irq_name = &hw->irq_name[RVU_PF_INT_VEC_VFME1 * NAME_SIZE]; 232 230 snprintf(irq_name, NAME_SIZE, "RVUPF%d_ME1", 233 - rvu_get_pf(pf->pcifunc)); 231 + rvu_get_pf(pf->pdev, pf->pcifunc)); 234 232 ret = request_irq(pci_irq_vector 235 233 (pf->pdev, RVU_PF_INT_VEC_VFME1), 236 234 otx2_pf_me_intr_handler, 0, irq_name, pf); ··· 240 238 } 241 239 irq_name = &hw->irq_name[RVU_PF_INT_VEC_VFFLR1 * NAME_SIZE]; 242 240 snprintf(irq_name, NAME_SIZE, "RVUPF%d_FLR1", 243 - rvu_get_pf(pf->pcifunc)); 241 + rvu_get_pf(pf->pdev, pf->pcifunc)); 244 242 ret = request_irq(pci_irq_vector 245 243 (pf->pdev, RVU_PF_INT_VEC_VFFLR1), 246 244 otx2_pf_flr_intr_handler, 0, irq_name, pf); ··· 702 700 irq_name = &hw->irq_name[RVU_PF_INT_VEC_VFPF_MBOX0 * NAME_SIZE]; 703 701 if (pf->pcifunc) 704 702 snprintf(irq_name, NAME_SIZE, 705 - "RVUPF%d_VF Mbox0", rvu_get_pf(pf->pcifunc)); 703 + "RVUPF%d_VF Mbox0", rvu_get_pf(pf->pdev, pf->pcifunc)); 706 704 else 707 705 snprintf(irq_name, NAME_SIZE, "RVUPF_VF Mbox0"); 708 706 err = request_irq(pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFPF_MBOX0), ··· 718 716 irq_name = &hw->irq_name[RVU_PF_INT_VEC_VFPF_MBOX1 * NAME_SIZE]; 719 717 if (pf->pcifunc) 720 718 snprintf(irq_name, NAME_SIZE, 721 - "RVUPF%d_VF Mbox1", rvu_get_pf(pf->pcifunc)); 719 + "RVUPF%d_VF Mbox1", 720 + rvu_get_pf(pf->pdev, pf->pcifunc)); 722 721 else 723 722 snprintf(irq_name, NAME_SIZE, "RVUPF_VF Mbox1"); 724 723 err = request_irq(pci_irq_vector(pf->pdev, ··· 1974 1971 if (err) { 1975 1972 dev_err(pf->dev, 1976 1973 "RVUPF%d: IRQ registration failed for QERR\n", 1977 - rvu_get_pf(pf->pcifunc)); 1974 + rvu_get_pf(pf->pdev, pf->pcifunc)); 1978 1975 goto err_disable_napi; 1979 1976 } 1980 1977 ··· 1992 1989 if (name_len >= NAME_SIZE) { 1993 1990 dev_err(pf->dev, 1994 1991 "RVUPF%d: IRQ registration failed for CQ%d, irq name is too long\n", 1995 - rvu_get_pf(pf->pcifunc), qidx); 1992 + rvu_get_pf(pf->pdev, pf->pcifunc), qidx); 1996 1993 err = -EINVAL; 1997 1994 goto err_free_cints; 1998 1995 } ··· 2003 2000 if (err) { 2004 2001 dev_err(pf->dev, 2005 2002 "RVUPF%d: IRQ registration failed for CQ%d\n", 2006 - rvu_get_pf(pf->pcifunc), qidx); 2003 + rvu_get_pf(pf->pdev, pf->pcifunc), qidx); 2007 2004 goto err_free_cints; 2008 2005 } 2009 2006 vec++;
-30
drivers/net/ethernet/marvell/octeontx2/nic/otx2_reg.h
··· 138 138 #define NIX_LF_CINTX_ENA_W1S(a) (NIX_LFBASE | 0xD40 | (a) << 12) 139 139 #define NIX_LF_CINTX_ENA_W1C(a) (NIX_LFBASE | 0xD50 | (a) << 12) 140 140 141 - /* NIX AF transmit scheduler registers */ 142 - #define NIX_AF_SMQX_CFG(a) (0x700 | (u64)(a) << 16) 143 - #define NIX_AF_TL4X_SDP_LINK_CFG(a) (0xB10 | (u64)(a) << 16) 144 - #define NIX_AF_TL1X_SCHEDULE(a) (0xC00 | (u64)(a) << 16) 145 - #define NIX_AF_TL1X_CIR(a) (0xC20 | (u64)(a) << 16) 146 - #define NIX_AF_TL1X_TOPOLOGY(a) (0xC80 | (u64)(a) << 16) 147 - #define NIX_AF_TL2X_PARENT(a) (0xE88 | (u64)(a) << 16) 148 - #define NIX_AF_TL2X_SCHEDULE(a) (0xE00 | (u64)(a) << 16) 149 - #define NIX_AF_TL2X_TOPOLOGY(a) (0xE80 | (u64)(a) << 16) 150 - #define NIX_AF_TL2X_CIR(a) (0xE20 | (u64)(a) << 16) 151 - #define NIX_AF_TL2X_PIR(a) (0xE30 | (u64)(a) << 16) 152 - #define NIX_AF_TL3X_PARENT(a) (0x1088 | (u64)(a) << 16) 153 - #define NIX_AF_TL3X_SCHEDULE(a) (0x1000 | (u64)(a) << 16) 154 - #define NIX_AF_TL3X_SHAPE(a) (0x1010 | (u64)(a) << 16) 155 - #define NIX_AF_TL3X_CIR(a) (0x1020 | (u64)(a) << 16) 156 - #define NIX_AF_TL3X_PIR(a) (0x1030 | (u64)(a) << 16) 157 - #define NIX_AF_TL3X_TOPOLOGY(a) (0x1080 | (u64)(a) << 16) 158 - #define NIX_AF_TL4X_PARENT(a) (0x1288 | (u64)(a) << 16) 159 - #define NIX_AF_TL4X_SCHEDULE(a) (0x1200 | (u64)(a) << 16) 160 - #define NIX_AF_TL4X_SHAPE(a) (0x1210 | (u64)(a) << 16) 161 - #define NIX_AF_TL4X_CIR(a) (0x1220 | (u64)(a) << 16) 162 - #define NIX_AF_TL4X_PIR(a) (0x1230 | (u64)(a) << 16) 163 - #define NIX_AF_TL4X_TOPOLOGY(a) (0x1280 | (u64)(a) << 16) 164 - #define NIX_AF_MDQX_SCHEDULE(a) (0x1400 | (u64)(a) << 16) 165 - #define NIX_AF_MDQX_SHAPE(a) (0x1410 | (u64)(a) << 16) 166 - #define NIX_AF_MDQX_CIR(a) (0x1420 | (u64)(a) << 16) 167 - #define NIX_AF_MDQX_PIR(a) (0x1430 | (u64)(a) << 16) 168 - #define NIX_AF_MDQX_PARENT(a) (0x1480 | (u64)(a) << 16) 169 - #define NIX_AF_TL3_TL2X_LINKX_CFG(a, b) (0x1700 | (u64)(a) << 16 | (b) << 3) 170 - 171 141 /* LMT LF registers */ 172 142 #define LMT_LFBASE BIT_ULL(RVU_FUNC_BLKADDR_SHIFT) 173 143 #define LMT_LF_LMTLINEX(a) (LMT_LFBASE | 0x000 | (a) << 12)
+2 -1
drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
··· 467 467 target = act->dev; 468 468 if (target->dev.parent) { 469 469 priv = netdev_priv(target); 470 - if (rvu_get_pf(nic->pcifunc) != rvu_get_pf(priv->pcifunc)) { 470 + if (rvu_get_pf(nic->pdev, nic->pcifunc) != 471 + rvu_get_pf(nic->pdev, priv->pcifunc)) { 471 472 NL_SET_ERR_MSG_MOD(extack, 472 473 "can't redirect to other pf/vf"); 473 474 return -EOPNOTSUPP;
+4 -3
drivers/net/ethernet/marvell/octeontx2/nic/rep.c
··· 244 244 245 245 if (!(rep->pcifunc & RVU_PFVF_FUNC_MASK)) { 246 246 attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL; 247 - attrs.phys.port_number = rvu_get_pf(rep->pcifunc); 247 + attrs.phys.port_number = rvu_get_pf(priv->pdev, rep->pcifunc); 248 248 } else { 249 249 attrs.flavour = DEVLINK_PORT_FLAVOUR_PCI_VF; 250 - attrs.pci_vf.pf = rvu_get_pf(rep->pcifunc); 250 + attrs.pci_vf.pf = rvu_get_pf(priv->pdev, rep->pcifunc); 251 251 attrs.pci_vf.vf = rep->pcifunc & RVU_PFVF_FUNC_MASK; 252 252 } 253 253 ··· 672 672 rep->pcifunc = pcifunc; 673 673 674 674 snprintf(ndev->name, sizeof(ndev->name), "Rpf%dvf%d", 675 - rvu_get_pf(pcifunc), (pcifunc & RVU_PFVF_FUNC_MASK)); 675 + rvu_get_pf(priv->pdev, pcifunc), 676 + (pcifunc & RVU_PFVF_FUNC_MASK)); 676 677 677 678 ndev->hw_features = (NETIF_F_RXCSUM | NETIF_F_IP_CSUM | 678 679 NETIF_F_IPV6_CSUM | NETIF_F_RXHASH |
+25
include/linux/soc/marvell/silicons.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0-only 2 + * Copyright (C) 2024 Marvell. 3 + */ 4 + 5 + #ifndef __SOC_SILICON_H 6 + #define __SOC_SILICON_H 7 + 8 + #include <linux/types.h> 9 + #include <linux/pci.h> 10 + 11 + #if defined(CONFIG_ARM64) 12 + 13 + #define CN20K_CHIPID 0x20 14 + /* 15 + * Silicon check for CN20K family 16 + */ 17 + static inline bool is_cn20k(struct pci_dev *pdev) 18 + { 19 + return (pdev->subsystem_device & 0xFF) == CN20K_CHIPID; 20 + } 21 + #else 22 + #define is_cn20k(pdev) ((void)(pdev), 0) 23 + #endif 24 + 25 + #endif /* __SOC_SILICON_H */