Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at master 289 lines 7.3 kB view raw
1// SPDX-License-Identifier: MIT 2/* 3 * Copyright © 2023-2024 Intel Corporation 4 */ 5 6#include <linux/debugfs.h> 7#include <drm/drm_debugfs.h> 8#include <drm/drm_managed.h> 9 10#include "xe_assert.h" 11#include "xe_configfs.h" 12#include "xe_device.h" 13#include "xe_gt_sriov_pf.h" 14#include "xe_module.h" 15#include "xe_sriov.h" 16#include "xe_sriov_pf.h" 17#include "xe_sriov_pf_helpers.h" 18#include "xe_sriov_pf_migration.h" 19#include "xe_sriov_pf_service.h" 20#include "xe_sriov_pf_sysfs.h" 21#include "xe_sriov_printk.h" 22 23static bool wanted_admin_only(struct xe_device *xe) 24{ 25 return xe_configfs_admin_only_pf(to_pci_dev(xe->drm.dev)); 26} 27 28static unsigned int wanted_max_vfs(struct xe_device *xe) 29{ 30 return xe_configfs_get_max_vfs(to_pci_dev(xe->drm.dev)); 31} 32 33static int pf_reduce_totalvfs(struct xe_device *xe, int limit) 34{ 35 struct device *dev = xe->drm.dev; 36 struct pci_dev *pdev = to_pci_dev(dev); 37 int err; 38 39 err = pci_sriov_set_totalvfs(pdev, limit); 40 if (err) 41 xe_sriov_notice(xe, "Failed to set number of VFs to %d (%pe)\n", 42 limit, ERR_PTR(err)); 43 return err; 44} 45 46static bool pf_continue_as_native(struct xe_device *xe, const char *why) 47{ 48 xe_sriov_dbg(xe, "%s, continuing as native\n", why); 49 pf_reduce_totalvfs(xe, 0); 50 return false; 51} 52 53/** 54 * xe_sriov_pf_readiness - Check if PF functionality can be enabled. 55 * @xe: the &xe_device to check 56 * 57 * This function is called as part of the SR-IOV probe to validate if all 58 * PF prerequisites are satisfied and we can continue with enabling PF mode. 59 * 60 * Return: true if the PF mode can be turned on. 61 */ 62bool xe_sriov_pf_readiness(struct xe_device *xe) 63{ 64 struct device *dev = xe->drm.dev; 65 struct pci_dev *pdev = to_pci_dev(dev); 66 int totalvfs = pci_sriov_get_totalvfs(pdev); 67 int newlimit = min_t(u16, wanted_max_vfs(xe), totalvfs); 68 69 xe_assert(xe, totalvfs <= U16_MAX); 70 71 if (!dev_is_pf(dev)) 72 return false; 73 74 if (!xe_device_uc_enabled(xe)) 75 return pf_continue_as_native(xe, "Guc submission disabled"); 76 77 if (!newlimit) 78 return pf_continue_as_native(xe, "all VFs disabled"); 79 80 pf_reduce_totalvfs(xe, newlimit); 81 82 xe->sriov.pf.admin_only = wanted_admin_only(xe); 83 xe->sriov.pf.device_total_vfs = totalvfs; 84 xe->sriov.pf.driver_max_vfs = newlimit; 85 86 return true; 87} 88 89/** 90 * xe_sriov_pf_init_early - Initialize SR-IOV PF specific data. 91 * @xe: the &xe_device to initialize 92 * 93 * Return: 0 on success or a negative error code on failure. 94 */ 95int xe_sriov_pf_init_early(struct xe_device *xe) 96{ 97 int err; 98 99 xe_assert(xe, IS_SRIOV_PF(xe)); 100 101 xe->sriov.pf.vfs = drmm_kcalloc(&xe->drm, 1 + xe_sriov_pf_get_totalvfs(xe), 102 sizeof(*xe->sriov.pf.vfs), GFP_KERNEL); 103 if (!xe->sriov.pf.vfs) 104 return -ENOMEM; 105 106 err = drmm_mutex_init(&xe->drm, &xe->sriov.pf.master_lock); 107 if (err) 108 return err; 109 110 err = xe_sriov_pf_migration_init(xe); 111 if (err) 112 return err; 113 114 xe_guard_init(&xe->sriov.pf.guard_vfs_enabling, "vfs_enabling"); 115 116 xe_sriov_pf_service_init(xe); 117 118 xe_mert_init_early(xe); 119 120 return 0; 121} 122 123/** 124 * xe_sriov_pf_init_late() - Late initialization of the SR-IOV PF. 125 * @xe: the &xe_device to initialize 126 * 127 * This function can only be called on PF. 128 * 129 * Return: 0 on success or a negative error code on failure. 130 */ 131int xe_sriov_pf_init_late(struct xe_device *xe) 132{ 133 struct xe_gt *gt; 134 unsigned int id; 135 int err; 136 137 xe_assert(xe, IS_SRIOV_PF(xe)); 138 139 for_each_gt(gt, xe, id) { 140 err = xe_gt_sriov_pf_init(gt); 141 if (err) 142 return err; 143 } 144 145 err = xe_sriov_pf_sysfs_init(xe); 146 if (err) 147 return err; 148 149 return 0; 150} 151 152/** 153 * xe_sriov_pf_wait_ready() - Wait until PF is ready to operate. 154 * @xe: the &xe_device to test 155 * 156 * This function can only be called on PF. 157 * 158 * Return: 0 on success or a negative error code on failure. 159 */ 160int xe_sriov_pf_wait_ready(struct xe_device *xe) 161{ 162 struct xe_gt *gt; 163 unsigned int id; 164 int err; 165 166 if (xe_device_wedged(xe)) 167 return -ECANCELED; 168 169 for_each_gt(gt, xe, id) { 170 err = xe_gt_sriov_pf_wait_ready(gt); 171 if (err) 172 return err; 173 } 174 175 return 0; 176} 177 178/** 179 * xe_sriov_pf_arm_guard() - Arm the guard for exclusive/lockdown mode. 180 * @xe: the PF &xe_device 181 * @guard: the &xe_guard to arm 182 * @lockdown: arm for lockdown(true) or exclusive(false) mode 183 * @who: the address of the new owner, or NULL if it's a caller 184 * 185 * This function can only be called on PF. 186 * 187 * It is a simple wrapper for xe_guard_arm() with additional debug 188 * messages. 189 * 190 * Return: 0 on success or a negative error code on failure. 191 */ 192int xe_sriov_pf_arm_guard(struct xe_device *xe, struct xe_guard *guard, 193 bool lockdown, void *who) 194{ 195 void *new_owner = who ?: __builtin_return_address(0); 196 int err; 197 198 err = xe_guard_arm(guard, lockdown, new_owner); 199 if (err) { 200 xe_sriov_dbg(xe, "%s/%s mode denied (%pe) last owner %ps\n", 201 guard->name, xe_guard_mode_str(lockdown), 202 ERR_PTR(err), guard->owner); 203 return err; 204 } 205 206 xe_sriov_dbg_verbose(xe, "%s/%s by %ps\n", 207 guard->name, xe_guard_mode_str(lockdown), 208 new_owner); 209 return 0; 210} 211 212/** 213 * xe_sriov_pf_disarm_guard() - Disarm the guard. 214 * @xe: the PF &xe_device 215 * @guard: the &xe_guard to disarm 216 * @lockdown: disarm from lockdown(true) or exclusive(false) mode 217 * @who: the address of the indirect owner, or NULL if it's a caller 218 * 219 * This function can only be called on PF. 220 * 221 * It is a simple wrapper for xe_guard_disarm() with additional debug 222 * messages and xe_assert() to easily catch any illegal calls. 223 */ 224void xe_sriov_pf_disarm_guard(struct xe_device *xe, struct xe_guard *guard, 225 bool lockdown, void *who) 226{ 227 bool disarmed; 228 229 xe_sriov_dbg_verbose(xe, "%s/%s by %ps\n", 230 guard->name, xe_guard_mode_str(lockdown), 231 who ?: __builtin_return_address(0)); 232 233 disarmed = xe_guard_disarm(guard, lockdown); 234 xe_assert_msg(xe, disarmed, "%s/%s not armed? last owner %ps", 235 guard->name, xe_guard_mode_str(lockdown), guard->owner); 236} 237 238/** 239 * xe_sriov_pf_lockdown() - Lockdown the PF to prevent VFs enabling. 240 * @xe: the PF &xe_device 241 * 242 * This function can only be called on PF. 243 * 244 * Once the PF is locked down, it will not enable VFs. 245 * If VFs are already enabled, the -EBUSY will be returned. 246 * To allow the PF enable VFs again call xe_sriov_pf_end_lockdown(). 247 * 248 * Return: 0 on success or a negative error code on failure. 249 */ 250int xe_sriov_pf_lockdown(struct xe_device *xe) 251{ 252 xe_assert(xe, IS_SRIOV_PF(xe)); 253 254 return xe_sriov_pf_arm_guard(xe, &xe->sriov.pf.guard_vfs_enabling, true, 255 __builtin_return_address(0)); 256} 257 258/** 259 * xe_sriov_pf_end_lockdown() - Allow the PF to enable VFs again. 260 * @xe: the PF &xe_device 261 * 262 * This function can only be called on PF. 263 * See xe_sriov_pf_lockdown() for details. 264 */ 265void xe_sriov_pf_end_lockdown(struct xe_device *xe) 266{ 267 xe_assert(xe, IS_SRIOV_PF(xe)); 268 269 xe_sriov_pf_disarm_guard(xe, &xe->sriov.pf.guard_vfs_enabling, true, 270 __builtin_return_address(0)); 271} 272 273/** 274 * xe_sriov_pf_print_vfs_summary - Print SR-IOV PF information. 275 * @xe: the &xe_device to print info from 276 * @p: the &drm_printer 277 * 278 * Print SR-IOV PF related information into provided DRM printer. 279 */ 280void xe_sriov_pf_print_vfs_summary(struct xe_device *xe, struct drm_printer *p) 281{ 282 struct pci_dev *pdev = to_pci_dev(xe->drm.dev); 283 284 xe_assert(xe, IS_SRIOV_PF(xe)); 285 286 drm_printf(p, "total: %u\n", xe->sriov.pf.device_total_vfs); 287 drm_printf(p, "supported: %u\n", xe->sriov.pf.driver_max_vfs); 288 drm_printf(p, "enabled: %u\n", pci_num_vf(pdev)); 289}