Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: MIT
2/*
3 * Copyright(c) 2024 Intel Corporation.
4 */
5
6#include "xe_pxp.h"
7
8#include <drm/drm_managed.h>
9#include <uapi/drm/xe_drm.h>
10
11#include "xe_bo.h"
12#include "xe_bo_types.h"
13#include "xe_device_types.h"
14#include "xe_exec_queue.h"
15#include "xe_force_wake.h"
16#include "xe_guc_submit.h"
17#include "xe_gsc_proxy.h"
18#include "xe_gt_types.h"
19#include "xe_huc.h"
20#include "xe_mmio.h"
21#include "xe_pm.h"
22#include "xe_pxp_submit.h"
23#include "xe_pxp_types.h"
24#include "xe_uc_fw.h"
25#include "regs/xe_irq_regs.h"
26#include "regs/xe_pxp_regs.h"
27
28/**
29 * DOC: PXP
30 *
31 * PXP (Protected Xe Path) allows execution and flip to display of protected
32 * (i.e. encrypted) objects. This feature is currently only supported in
33 * integrated parts.
34 */
35
36#define ARB_SESSION DRM_XE_PXP_HWDRM_DEFAULT_SESSION /* shorter define */
37
38/*
39 * A submission to GSC can take up to 250ms to complete, so use a 300ms
40 * timeout for activation where only one of those is involved. Termination
41 * additionally requires a submission to VCS and an interaction with KCR, so
42 * bump the timeout to 500ms for that.
43 */
44#define PXP_ACTIVATION_TIMEOUT_MS 300
45#define PXP_TERMINATION_TIMEOUT_MS 500
46
47bool xe_pxp_is_supported(const struct xe_device *xe)
48{
49 return xe->info.has_pxp && IS_ENABLED(CONFIG_INTEL_MEI_GSC_PROXY);
50}
51
52bool xe_pxp_is_enabled(const struct xe_pxp *pxp)
53{
54 return pxp;
55}
56
57static bool pxp_prerequisites_done(const struct xe_pxp *pxp)
58{
59 struct xe_gt *gt = pxp->gt;
60 bool ready;
61
62 CLASS(xe_force_wake, fw_ref)(gt_to_fw(gt), XE_FORCEWAKE_ALL);
63
64 /*
65 * If force_wake fails we could falsely report the prerequisites as not
66 * done even if they are; the consequence of this would be that the
67 * callers won't go ahead with using PXP, but if force_wake doesn't work
68 * the GT is very likely in a bad state so not really a problem to abort
69 * PXP. Therefore, we can just log the force_wake error and not escalate
70 * it.
71 */
72 XE_WARN_ON(!xe_force_wake_ref_has_domain(fw_ref.domains, XE_FORCEWAKE_ALL));
73
74 /* PXP requires both HuC authentication via GSC and GSC proxy initialized */
75 ready = xe_huc_is_authenticated(>->uc.huc, XE_HUC_AUTH_VIA_GSC) &&
76 xe_gsc_proxy_init_done(>->uc.gsc);
77
78 return ready;
79}
80
81/**
82 * xe_pxp_get_readiness_status - check whether PXP is ready for userspace use
83 * @pxp: the xe_pxp pointer (can be NULL if PXP is disabled)
84 *
85 * Returns: 0 if PXP is not ready yet, 1 if it is ready, a negative errno value
86 * if PXP is not supported/enabled or if something went wrong in the
87 * initialization of the prerequisites. Note that the return values of this
88 * function follow the uapi (see drm_xe_query_pxp_status), so they can be used
89 * directly in the query ioctl.
90 */
91int xe_pxp_get_readiness_status(struct xe_pxp *pxp)
92{
93 int ret = 0;
94
95 if (!xe_pxp_is_enabled(pxp))
96 return -ENODEV;
97
98 /* if the GSC or HuC FW are in an error state, PXP will never work */
99 if (xe_uc_fw_status_to_error(pxp->gt->uc.huc.fw.status) ||
100 xe_uc_fw_status_to_error(pxp->gt->uc.gsc.fw.status))
101 return -EIO;
102
103 guard(xe_pm_runtime)(pxp->xe);
104
105 /* PXP requires both HuC loaded and GSC proxy initialized */
106 if (pxp_prerequisites_done(pxp))
107 ret = 1;
108
109 return ret;
110}
111
112static bool pxp_session_is_in_play(struct xe_pxp *pxp, u32 id)
113{
114 struct xe_gt *gt = pxp->gt;
115
116 return xe_mmio_read32(>->mmio, KCR_SIP) & BIT(id);
117}
118
119static int pxp_wait_for_session_state(struct xe_pxp *pxp, u32 id, bool in_play)
120{
121 struct xe_gt *gt = pxp->gt;
122 u32 mask = BIT(id);
123
124 return xe_mmio_wait32(>->mmio, KCR_SIP, mask, in_play ? mask : 0,
125 250, NULL, false);
126}
127
128static void pxp_invalidate_queues(struct xe_pxp *pxp);
129
130static int pxp_terminate_hw(struct xe_pxp *pxp)
131{
132 struct xe_gt *gt = pxp->gt;
133 int ret = 0;
134
135 drm_dbg(&pxp->xe->drm, "Terminating PXP\n");
136
137 CLASS(xe_force_wake, fw_ref)(gt_to_fw(gt), XE_FW_GT);
138 if (!xe_force_wake_ref_has_domain(fw_ref.domains, XE_FW_GT))
139 return -EIO;
140
141 /* terminate the hw session */
142 ret = xe_pxp_submit_session_termination(pxp, ARB_SESSION);
143 if (ret)
144 return ret;
145
146 ret = pxp_wait_for_session_state(pxp, ARB_SESSION, false);
147 if (ret)
148 return ret;
149
150 /* Trigger full HW cleanup */
151 xe_mmio_write32(>->mmio, KCR_GLOBAL_TERMINATE, 1);
152
153 /* now we can tell the GSC to clean up its own state */
154 return xe_pxp_submit_session_invalidation(&pxp->gsc_res, ARB_SESSION);
155}
156
157static void mark_termination_in_progress(struct xe_pxp *pxp)
158{
159 lockdep_assert_held(&pxp->mutex);
160
161 reinit_completion(&pxp->termination);
162 pxp->status = XE_PXP_TERMINATION_IN_PROGRESS;
163}
164
165static void pxp_terminate(struct xe_pxp *pxp)
166{
167 int ret = 0;
168 struct xe_device *xe = pxp->xe;
169
170 if (!wait_for_completion_timeout(&pxp->activation,
171 msecs_to_jiffies(PXP_ACTIVATION_TIMEOUT_MS)))
172 drm_err(&xe->drm, "failed to wait for PXP start before termination\n");
173
174 mutex_lock(&pxp->mutex);
175
176 if (pxp->status == XE_PXP_ACTIVE)
177 pxp->key_instance++;
178
179 /*
180 * we'll mark the status as needing termination on resume, so no need to
181 * emit a termination now.
182 */
183 if (pxp->status == XE_PXP_SUSPENDED) {
184 mutex_unlock(&pxp->mutex);
185 return;
186 }
187
188 /*
189 * If we have a termination already in progress, we need to wait for
190 * it to complete before queueing another one. Once the first
191 * termination is completed we'll set the state back to
192 * NEEDS_TERMINATION and leave it to the pxp start code to issue it.
193 */
194 if (pxp->status == XE_PXP_TERMINATION_IN_PROGRESS) {
195 pxp->status = XE_PXP_NEEDS_ADDITIONAL_TERMINATION;
196 mutex_unlock(&pxp->mutex);
197 return;
198 }
199
200 mark_termination_in_progress(pxp);
201
202 mutex_unlock(&pxp->mutex);
203
204 pxp_invalidate_queues(pxp);
205
206 ret = pxp_terminate_hw(pxp);
207 if (ret) {
208 drm_err(&xe->drm, "PXP termination failed: %pe\n", ERR_PTR(ret));
209 mutex_lock(&pxp->mutex);
210 pxp->status = XE_PXP_ERROR;
211 complete_all(&pxp->termination);
212 mutex_unlock(&pxp->mutex);
213 }
214}
215
216static void pxp_terminate_complete(struct xe_pxp *pxp)
217{
218 /*
219 * We expect PXP to be in one of 3 states when we get here:
220 * - XE_PXP_TERMINATION_IN_PROGRESS: a single termination event was
221 * requested and it is now completing, so we're ready to start.
222 * - XE_PXP_NEEDS_ADDITIONAL_TERMINATION: a second termination was
223 * requested while the first one was still being processed.
224 * - XE_PXP_SUSPENDED: PXP is now suspended, so we defer everything to
225 * when we come back on resume.
226 */
227 mutex_lock(&pxp->mutex);
228
229 switch (pxp->status) {
230 case XE_PXP_TERMINATION_IN_PROGRESS:
231 pxp->status = XE_PXP_READY_TO_START;
232 break;
233 case XE_PXP_NEEDS_ADDITIONAL_TERMINATION:
234 pxp->status = XE_PXP_NEEDS_TERMINATION;
235 break;
236 case XE_PXP_SUSPENDED:
237 /* Nothing to do */
238 break;
239 default:
240 drm_err(&pxp->xe->drm,
241 "PXP termination complete while status was %u\n",
242 pxp->status);
243 }
244
245 complete_all(&pxp->termination);
246
247 mutex_unlock(&pxp->mutex);
248}
249
250static void pxp_irq_work(struct work_struct *work)
251{
252 struct xe_pxp *pxp = container_of(work, typeof(*pxp), irq.work);
253 struct xe_device *xe = pxp->xe;
254 u32 events = 0;
255
256 spin_lock_irq(&xe->irq.lock);
257 events = pxp->irq.events;
258 pxp->irq.events = 0;
259 spin_unlock_irq(&xe->irq.lock);
260
261 if (!events)
262 return;
263
264 /*
265 * If we're processing a termination irq while suspending then don't
266 * bother, we're going to re-init everything on resume anyway.
267 */
268 if ((events & PXP_TERMINATION_REQUEST) && !xe_pm_runtime_get_if_active(xe))
269 return;
270
271 if (events & PXP_TERMINATION_REQUEST) {
272 events &= ~PXP_TERMINATION_COMPLETE;
273 pxp_terminate(pxp);
274 }
275
276 if (events & PXP_TERMINATION_COMPLETE)
277 pxp_terminate_complete(pxp);
278
279 if (events & PXP_TERMINATION_REQUEST)
280 xe_pm_runtime_put(xe);
281}
282
283/**
284 * xe_pxp_irq_handler - Handles PXP interrupts.
285 * @xe: the xe_device structure
286 * @iir: interrupt vector
287 */
288void xe_pxp_irq_handler(struct xe_device *xe, u16 iir)
289{
290 struct xe_pxp *pxp = xe->pxp;
291
292 if (!xe_pxp_is_enabled(pxp)) {
293 drm_err(&xe->drm, "PXP irq 0x%x received with PXP disabled!\n", iir);
294 return;
295 }
296
297 lockdep_assert_held(&xe->irq.lock);
298
299 if (unlikely(!iir))
300 return;
301
302 if (iir & (KCR_PXP_STATE_TERMINATED_INTERRUPT |
303 KCR_APP_TERMINATED_PER_FW_REQ_INTERRUPT))
304 pxp->irq.events |= PXP_TERMINATION_REQUEST;
305
306 if (iir & KCR_PXP_STATE_RESET_COMPLETE_INTERRUPT)
307 pxp->irq.events |= PXP_TERMINATION_COMPLETE;
308
309 if (pxp->irq.events)
310 queue_work(pxp->irq.wq, &pxp->irq.work);
311}
312
313static int kcr_pxp_set_status(const struct xe_pxp *pxp, bool enable)
314{
315 u32 val = enable ? REG_MASKED_FIELD_ENABLE(KCR_INIT_ALLOW_DISPLAY_ME_WRITES) :
316 REG_MASKED_FIELD_DISABLE(KCR_INIT_ALLOW_DISPLAY_ME_WRITES);
317
318 CLASS(xe_force_wake, fw_ref)(gt_to_fw(pxp->gt), XE_FW_GT);
319 if (!xe_force_wake_ref_has_domain(fw_ref.domains, XE_FW_GT))
320 return -EIO;
321
322 xe_mmio_write32(&pxp->gt->mmio, KCR_INIT, val);
323
324 return 0;
325}
326
327static int kcr_pxp_enable(const struct xe_pxp *pxp)
328{
329 return kcr_pxp_set_status(pxp, true);
330}
331
332static int kcr_pxp_disable(const struct xe_pxp *pxp)
333{
334 return kcr_pxp_set_status(pxp, false);
335}
336
337static void pxp_fini(void *arg)
338{
339 struct xe_pxp *pxp = arg;
340
341 destroy_workqueue(pxp->irq.wq);
342 xe_pxp_destroy_execution_resources(pxp);
343
344 /* no need to explicitly disable KCR since we're going to do an FLR */
345}
346
347/**
348 * xe_pxp_init - initialize PXP support
349 * @xe: the xe_device structure
350 *
351 * Initialize the HW state and allocate the objects required for PXP support.
352 * Note that some of the requirement for PXP support (GSC proxy init, HuC auth)
353 * are performed asynchronously as part of the GSC init. PXP can only be used
354 * after both this function and the async worker have completed.
355 *
356 * Returns 0 if PXP is not supported or if PXP initialization is successful,
357 * other errno value if there is an error during the init.
358 */
359int xe_pxp_init(struct xe_device *xe)
360{
361 struct xe_gt *gt = xe->tiles[0].media_gt;
362 struct xe_pxp *pxp;
363 int err;
364
365 if (!xe_pxp_is_supported(xe))
366 return 0;
367
368 /* we only support PXP on single tile devices with a media GT */
369 if (xe->info.tile_count > 1 || !gt)
370 return 0;
371
372 /* The GSCCS is required for submissions to the GSC FW */
373 if (!(gt->info.engine_mask & BIT(XE_HW_ENGINE_GSCCS0)))
374 return 0;
375
376 /* PXP requires both GSC and HuC firmwares to be available */
377 if (!xe_uc_fw_is_loadable(>->uc.gsc.fw) ||
378 !xe_uc_fw_is_loadable(>->uc.huc.fw)) {
379 drm_info(&xe->drm, "skipping PXP init due to missing FW dependencies");
380 return 0;
381 }
382
383 /*
384 * On PTL, older GSC FWs have a bug that can cause them to crash during
385 * PXP invalidation events, which leads to a complete loss of power
386 * management on the media GT. Therefore, we can't use PXP on FWs that
387 * have this bug, which was fixed in PTL GSC build 1396.
388 */
389 if (xe->info.platform == XE_PANTHERLAKE &&
390 gt->uc.gsc.fw.versions.found[XE_UC_FW_VER_RELEASE].build < 1396) {
391 drm_info(&xe->drm, "PXP requires PTL GSC build 1396 or newer\n");
392 return 0;
393 }
394
395 pxp = drmm_kzalloc(&xe->drm, sizeof(struct xe_pxp), GFP_KERNEL);
396 if (!pxp) {
397 err = -ENOMEM;
398 goto out;
399 }
400
401 INIT_LIST_HEAD(&pxp->queues.list);
402 spin_lock_init(&pxp->queues.lock);
403 INIT_WORK(&pxp->irq.work, pxp_irq_work);
404 pxp->xe = xe;
405 pxp->gt = gt;
406
407 pxp->key_instance = 1;
408 pxp->last_suspend_key_instance = 1;
409
410 /*
411 * we'll use the completions to check if there is an action pending,
412 * so we start them as completed and we reinit it when an action is
413 * triggered.
414 */
415 init_completion(&pxp->activation);
416 init_completion(&pxp->termination);
417 complete_all(&pxp->termination);
418 complete_all(&pxp->activation);
419
420 mutex_init(&pxp->mutex);
421
422 pxp->irq.wq = alloc_ordered_workqueue("pxp-wq", 0);
423 if (!pxp->irq.wq) {
424 err = -ENOMEM;
425 goto out_free;
426 }
427
428 err = kcr_pxp_enable(pxp);
429 if (err)
430 goto out_wq;
431
432 err = xe_pxp_allocate_execution_resources(pxp);
433 if (err)
434 goto out_kcr_disable;
435
436 xe->pxp = pxp;
437
438 return devm_add_action_or_reset(xe->drm.dev, pxp_fini, pxp);
439
440out_kcr_disable:
441 kcr_pxp_disable(pxp);
442out_wq:
443 destroy_workqueue(pxp->irq.wq);
444out_free:
445 drmm_kfree(&xe->drm, pxp);
446out:
447 drm_err(&xe->drm, "PXP initialization failed: %pe\n", ERR_PTR(err));
448 return err;
449}
450
451static int __pxp_start_arb_session(struct xe_pxp *pxp)
452{
453 int ret;
454
455 CLASS(xe_force_wake, fw_ref)(gt_to_fw(pxp->gt), XE_FW_GT);
456 if (!xe_force_wake_ref_has_domain(fw_ref.domains, XE_FW_GT))
457 return -EIO;
458
459 if (pxp_session_is_in_play(pxp, ARB_SESSION))
460 return -EEXIST;
461
462 ret = xe_pxp_submit_session_init(&pxp->gsc_res, ARB_SESSION);
463 if (ret) {
464 drm_err(&pxp->xe->drm, "Failed to init PXP arb session: %pe\n", ERR_PTR(ret));
465 return ret;
466 }
467
468 ret = pxp_wait_for_session_state(pxp, ARB_SESSION, true);
469 if (ret) {
470 drm_err(&pxp->xe->drm, "PXP ARB session failed to go in play%pe\n", ERR_PTR(ret));
471 return ret;
472 }
473
474 drm_dbg(&pxp->xe->drm, "PXP ARB session is active\n");
475 return 0;
476}
477
478/**
479 * xe_pxp_exec_queue_set_type - Mark a queue as using PXP
480 * @pxp: the xe->pxp pointer (it will be NULL if PXP is disabled)
481 * @q: the queue to mark as using PXP
482 * @type: the type of PXP session this queue will use
483 *
484 * Returns 0 if the selected PXP type is supported, -ENODEV otherwise.
485 */
486int xe_pxp_exec_queue_set_type(struct xe_pxp *pxp, struct xe_exec_queue *q, u8 type)
487{
488 if (!xe_pxp_is_enabled(pxp))
489 return -ENODEV;
490
491 /* we only support HWDRM sessions right now */
492 xe_assert(pxp->xe, type == DRM_XE_PXP_TYPE_HWDRM);
493
494 q->pxp.type = type;
495
496 return 0;
497}
498
499static int __exec_queue_add(struct xe_pxp *pxp, struct xe_exec_queue *q)
500{
501 int ret = 0;
502
503 /*
504 * A queue can be added to the list only if the PXP is in active status,
505 * otherwise the termination might not handle it correctly.
506 */
507 mutex_lock(&pxp->mutex);
508
509 if (pxp->status == XE_PXP_ACTIVE) {
510 spin_lock_irq(&pxp->queues.lock);
511 list_add_tail(&q->pxp.link, &pxp->queues.list);
512 spin_unlock_irq(&pxp->queues.lock);
513 } else if (pxp->status == XE_PXP_ERROR || pxp->status == XE_PXP_SUSPENDED) {
514 ret = -EIO;
515 } else {
516 ret = -EBUSY; /* try again later */
517 }
518
519 mutex_unlock(&pxp->mutex);
520
521 return ret;
522}
523
524static int pxp_start(struct xe_pxp *pxp, u8 type)
525{
526 int ret = 0;
527 bool restart;
528
529 if (!xe_pxp_is_enabled(pxp))
530 return -ENODEV;
531
532 /* we only support HWDRM sessions right now */
533 xe_assert(pxp->xe, type == DRM_XE_PXP_TYPE_HWDRM);
534
535 /* get_readiness_status() returns 0 for in-progress and 1 for done */
536 ret = xe_pxp_get_readiness_status(pxp);
537 if (ret <= 0)
538 return ret ?: -EBUSY;
539
540 ret = 0;
541
542wait_for_idle:
543 /*
544 * if there is an action in progress, wait for it. We need to wait
545 * outside the lock because the completion is done from within the lock.
546 * Note that the two actions should never be pending at the same time.
547 */
548 if (!wait_for_completion_timeout(&pxp->termination,
549 msecs_to_jiffies(PXP_TERMINATION_TIMEOUT_MS)))
550 return -ETIMEDOUT;
551
552 if (!wait_for_completion_timeout(&pxp->activation,
553 msecs_to_jiffies(PXP_ACTIVATION_TIMEOUT_MS)))
554 return -ETIMEDOUT;
555
556 restart = false;
557
558 mutex_lock(&pxp->mutex);
559
560 /* If PXP is not already active, turn it on */
561 switch (pxp->status) {
562 case XE_PXP_ERROR:
563 ret = -EIO;
564 goto out_unlock;
565 case XE_PXP_ACTIVE:
566 goto out_unlock;
567 case XE_PXP_READY_TO_START:
568 pxp->status = XE_PXP_START_IN_PROGRESS;
569 reinit_completion(&pxp->activation);
570 break;
571 case XE_PXP_START_IN_PROGRESS:
572 /* If a start is in progress then the completion must not be done */
573 XE_WARN_ON(completion_done(&pxp->activation));
574 restart = true;
575 goto out_unlock;
576 case XE_PXP_NEEDS_TERMINATION:
577 mark_termination_in_progress(pxp);
578 break;
579 case XE_PXP_TERMINATION_IN_PROGRESS:
580 case XE_PXP_NEEDS_ADDITIONAL_TERMINATION:
581 /* If a termination is in progress then the completion must not be done */
582 XE_WARN_ON(completion_done(&pxp->termination));
583 restart = true;
584 goto out_unlock;
585 case XE_PXP_SUSPENDED:
586 default:
587 drm_err(&pxp->xe->drm, "unexpected state during PXP start: %u\n", pxp->status);
588 ret = -EIO;
589 goto out_unlock;
590 }
591
592 mutex_unlock(&pxp->mutex);
593
594 if (!completion_done(&pxp->termination)) {
595 ret = pxp_terminate_hw(pxp);
596 if (ret) {
597 drm_err(&pxp->xe->drm, "PXP termination failed before start\n");
598 mutex_lock(&pxp->mutex);
599 pxp->status = XE_PXP_ERROR;
600 complete_all(&pxp->termination);
601
602 goto out_unlock;
603 }
604
605 goto wait_for_idle;
606 }
607
608 /* All the cases except for start should have exited earlier */
609 XE_WARN_ON(completion_done(&pxp->activation));
610 ret = __pxp_start_arb_session(pxp);
611
612 mutex_lock(&pxp->mutex);
613
614 complete_all(&pxp->activation);
615
616 /*
617 * Any other process should wait until the state goes away from
618 * XE_PXP_START_IN_PROGRESS, so if the state is not that something went
619 * wrong. Mark the status as needing termination and try again.
620 */
621 if (pxp->status != XE_PXP_START_IN_PROGRESS) {
622 drm_err(&pxp->xe->drm, "unexpected state after PXP start: %u\n", pxp->status);
623 pxp->status = XE_PXP_NEEDS_TERMINATION;
624 restart = true;
625 goto out_unlock;
626 }
627
628 /* If everything went ok, update the status and add the queue to the list */
629 if (!ret)
630 pxp->status = XE_PXP_ACTIVE;
631 else
632 pxp->status = XE_PXP_ERROR;
633
634out_unlock:
635 mutex_unlock(&pxp->mutex);
636
637 if (restart)
638 goto wait_for_idle;
639
640 return ret;
641}
642
643/**
644 * xe_pxp_exec_queue_add - add a queue to the PXP list
645 * @pxp: the xe->pxp pointer (it will be NULL if PXP is disabled)
646 * @q: the queue to add to the list
647 *
648 * If PXP is enabled and the prerequisites are done, start the PXP default
649 * session (if not already running) and add the queue to the PXP list.
650 *
651 * Returns 0 if the PXP session is running and the queue is in the list,
652 * -ENODEV if PXP is disabled, -EBUSY if the PXP prerequisites are not done,
653 * other errno value if something goes wrong during the session start.
654 */
655int xe_pxp_exec_queue_add(struct xe_pxp *pxp, struct xe_exec_queue *q)
656{
657 int ret;
658
659 if (!xe_pxp_is_enabled(pxp))
660 return -ENODEV;
661
662 /*
663 * Runtime suspend kills PXP, so we take a reference to prevent it from
664 * happening while we have active queues that use PXP
665 */
666 xe_pm_runtime_get(pxp->xe);
667
668start:
669 ret = pxp_start(pxp, q->pxp.type);
670
671 if (!ret) {
672 ret = __exec_queue_add(pxp, q);
673 if (ret == -EBUSY)
674 goto start;
675 }
676
677 /*
678 * in the successful case the PM ref is released from
679 * xe_pxp_exec_queue_remove
680 */
681 if (ret)
682 xe_pm_runtime_put(pxp->xe);
683
684 return ret;
685}
686ALLOW_ERROR_INJECTION(xe_pxp_exec_queue_add, ERRNO);
687
688static void __pxp_exec_queue_remove(struct xe_pxp *pxp, struct xe_exec_queue *q, bool lock)
689{
690 bool need_pm_put = false;
691
692 if (!xe_pxp_is_enabled(pxp))
693 return;
694
695 if (lock)
696 spin_lock_irq(&pxp->queues.lock);
697
698 if (!list_empty(&q->pxp.link)) {
699 list_del_init(&q->pxp.link);
700 need_pm_put = true;
701 }
702
703 q->pxp.type = DRM_XE_PXP_TYPE_NONE;
704
705 if (lock)
706 spin_unlock_irq(&pxp->queues.lock);
707
708 if (need_pm_put)
709 xe_pm_runtime_put(pxp->xe);
710}
711
712/**
713 * xe_pxp_exec_queue_remove - remove a queue from the PXP list
714 * @pxp: the xe->pxp pointer (it will be NULL if PXP is disabled)
715 * @q: the queue to remove from the list
716 *
717 * If PXP is enabled and the exec_queue is in the list, the queue will be
718 * removed from the list and its PM reference will be released. It is safe to
719 * call this function multiple times for the same queue.
720 */
721void xe_pxp_exec_queue_remove(struct xe_pxp *pxp, struct xe_exec_queue *q)
722{
723 __pxp_exec_queue_remove(pxp, q, true);
724}
725
726static void pxp_invalidate_queues(struct xe_pxp *pxp)
727{
728 struct xe_exec_queue *tmp, *q;
729 LIST_HEAD(to_clean);
730
731 spin_lock_irq(&pxp->queues.lock);
732
733 list_for_each_entry_safe(q, tmp, &pxp->queues.list, pxp.link) {
734 q = xe_exec_queue_get_unless_zero(q);
735 if (!q)
736 continue;
737
738 list_move_tail(&q->pxp.link, &to_clean);
739 }
740 spin_unlock_irq(&pxp->queues.lock);
741
742 list_for_each_entry_safe(q, tmp, &to_clean, pxp.link) {
743 xe_exec_queue_kill(q);
744
745 /*
746 * We hold a ref to the queue so there is no risk of racing with
747 * the calls to exec_queue_remove coming from exec_queue_destroy.
748 */
749 __pxp_exec_queue_remove(pxp, q, false);
750
751 xe_exec_queue_put(q);
752 }
753}
754
755/**
756 * xe_pxp_key_assign - mark a BO as using the current PXP key iteration
757 * @pxp: the xe->pxp pointer (it will be NULL if PXP is disabled)
758 * @bo: the BO to mark
759 *
760 * Returns: -ENODEV if PXP is disabled, 0 otherwise.
761 */
762int xe_pxp_key_assign(struct xe_pxp *pxp, struct xe_bo *bo)
763{
764 if (!xe_pxp_is_enabled(pxp))
765 return -ENODEV;
766
767 xe_assert(pxp->xe, !bo->pxp_key_instance);
768
769 /*
770 * Note that the PXP key handling is inherently racey, because the key
771 * can theoretically change at any time (although it's unlikely to do
772 * so without triggers), even right after we copy it. Taking a lock
773 * wouldn't help because the value might still change as soon as we
774 * release the lock.
775 * Userspace needs to handle the fact that their BOs can go invalid at
776 * any point.
777 */
778 bo->pxp_key_instance = pxp->key_instance;
779
780 return 0;
781}
782
783/**
784 * xe_pxp_bo_key_check - check if the key used by a xe_bo is valid
785 * @pxp: the xe->pxp pointer (it will be NULL if PXP is disabled)
786 * @bo: the BO we want to check
787 *
788 * Checks whether a BO was encrypted with the current key or an obsolete one.
789 *
790 * Returns: 0 if the key is valid, -ENODEV if PXP is disabled, -EINVAL if the
791 * BO is not using PXP, -ENOEXEC if the key is not valid.
792 */
793int xe_pxp_bo_key_check(struct xe_pxp *pxp, struct xe_bo *bo)
794{
795 if (!xe_pxp_is_enabled(pxp))
796 return -ENODEV;
797
798 if (!xe_bo_is_protected(bo))
799 return -EINVAL;
800
801 xe_assert(pxp->xe, bo->pxp_key_instance);
802
803 /*
804 * Note that the PXP key handling is inherently racey, because the key
805 * can theoretically change at any time (although it's unlikely to do
806 * so without triggers), even right after we check it. Taking a lock
807 * wouldn't help because the value might still change as soon as we
808 * release the lock.
809 * We mitigate the risk by checking the key at multiple points (on each
810 * submission involving the BO and right before flipping it on the
811 * display), but there is still a very small chance that we could
812 * operate on an invalid BO for a single submission or a single frame
813 * flip. This is a compromise made to protect the encrypted data (which
814 * is what the key termination is for).
815 */
816 if (bo->pxp_key_instance != pxp->key_instance)
817 return -ENOEXEC;
818
819 return 0;
820}
821
822/**
823 * xe_pxp_obj_key_check - check if the key used by a drm_gem_obj is valid
824 * @obj: the drm_gem_obj we want to check
825 *
826 * Checks whether a drm_gem_obj was encrypted with the current key or an
827 * obsolete one.
828 *
829 * Returns: 0 if the key is valid, -ENODEV if PXP is disabled, -EINVAL if the
830 * obj is not using PXP, -ENOEXEC if the key is not valid.
831 */
832int xe_pxp_obj_key_check(struct drm_gem_object *obj)
833{
834 struct xe_bo *bo = gem_to_xe_bo(obj);
835 struct xe_device *xe = xe_bo_device(bo);
836 struct xe_pxp *pxp = xe->pxp;
837
838 return xe_pxp_bo_key_check(pxp, bo);
839}
840
841/**
842 * xe_pxp_pm_suspend - prepare PXP for HW suspend
843 * @pxp: the xe->pxp pointer (it will be NULL if PXP is disabled)
844 *
845 * Makes sure all PXP actions have completed and invalidates all PXP queues
846 * and objects before we go into a suspend state.
847 *
848 * Returns: 0 if successful, a negative errno value otherwise.
849 */
850int xe_pxp_pm_suspend(struct xe_pxp *pxp)
851{
852 bool needs_queue_inval = false;
853 int ret = 0;
854
855 if (!xe_pxp_is_enabled(pxp))
856 return 0;
857
858wait_for_activation:
859 if (!wait_for_completion_timeout(&pxp->activation,
860 msecs_to_jiffies(PXP_ACTIVATION_TIMEOUT_MS)))
861 ret = -ETIMEDOUT;
862
863 mutex_lock(&pxp->mutex);
864
865 switch (pxp->status) {
866 case XE_PXP_ERROR:
867 case XE_PXP_READY_TO_START:
868 case XE_PXP_SUSPENDED:
869 case XE_PXP_TERMINATION_IN_PROGRESS:
870 case XE_PXP_NEEDS_ADDITIONAL_TERMINATION:
871 /*
872 * If PXP is not running there is nothing to cleanup. If there
873 * is a termination pending then no need to issue another one.
874 */
875 break;
876 case XE_PXP_START_IN_PROGRESS:
877 mutex_unlock(&pxp->mutex);
878 goto wait_for_activation;
879 case XE_PXP_NEEDS_TERMINATION:
880 /* If PXP was never used we can skip the cleanup */
881 if (pxp->key_instance == pxp->last_suspend_key_instance)
882 break;
883 fallthrough;
884 case XE_PXP_ACTIVE:
885 pxp->key_instance++;
886 needs_queue_inval = true;
887 break;
888 }
889
890 /*
891 * We set this even if we were in error state, hoping the suspend clears
892 * the error. Worse case we fail again and go in error state again.
893 */
894 pxp->status = XE_PXP_SUSPENDED;
895
896 mutex_unlock(&pxp->mutex);
897
898 if (needs_queue_inval)
899 pxp_invalidate_queues(pxp);
900
901 /*
902 * if there is a termination in progress, wait for it.
903 * We need to wait outside the lock because the completion is done from
904 * within the lock
905 */
906 if (!wait_for_completion_timeout(&pxp->termination,
907 msecs_to_jiffies(PXP_TERMINATION_TIMEOUT_MS)))
908 ret = -ETIMEDOUT;
909
910 pxp->last_suspend_key_instance = pxp->key_instance;
911
912 return ret;
913}
914
915/**
916 * xe_pxp_pm_resume - re-init PXP after HW suspend
917 * @pxp: the xe->pxp pointer (it will be NULL if PXP is disabled)
918 */
919void xe_pxp_pm_resume(struct xe_pxp *pxp)
920{
921 int err;
922
923 if (!xe_pxp_is_enabled(pxp))
924 return;
925
926 err = kcr_pxp_enable(pxp);
927
928 mutex_lock(&pxp->mutex);
929
930 xe_assert(pxp->xe, pxp->status == XE_PXP_SUSPENDED);
931
932 if (err)
933 pxp->status = XE_PXP_ERROR;
934 else
935 pxp->status = XE_PXP_NEEDS_TERMINATION;
936
937 mutex_unlock(&pxp->mutex);
938}