Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* SPDX-License-Identifier: MIT */
2/*
3 * Copyright © 2021 Intel Corporation
4 */
5
6#ifndef _XE_VM_H_
7#define _XE_VM_H_
8
9#include "xe_assert.h"
10#include "xe_bo_types.h"
11#include "xe_macros.h"
12#include "xe_map.h"
13#include "xe_vm_types.h"
14
15/**
16 * MAX_FAULTS_SAVED_PER_VM - Maximum number of faults each vm can store before future
17 * faults are discarded to prevent memory overuse
18 */
19#define MAX_FAULTS_SAVED_PER_VM 50
20
21struct drm_device;
22struct drm_printer;
23struct drm_file;
24
25struct ttm_buffer_object;
26
27struct dma_fence;
28
29struct xe_exec_queue;
30struct xe_file;
31struct xe_pagefault;
32struct xe_sync_entry;
33struct xe_svm_range;
34struct drm_exec;
35
36struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags, struct xe_file *xef);
37
38struct xe_vm *xe_vm_lookup(struct xe_file *xef, u32 id);
39int xe_vma_cmp_vma_cb(const void *key, const struct rb_node *node);
40
41static inline struct xe_vm *xe_vm_get(struct xe_vm *vm)
42{
43 drm_gpuvm_get(&vm->gpuvm);
44 return vm;
45}
46
47static inline void xe_vm_put(struct xe_vm *vm)
48{
49 drm_gpuvm_put(&vm->gpuvm);
50}
51
52int xe_vm_lock(struct xe_vm *vm, bool intr);
53
54void xe_vm_unlock(struct xe_vm *vm);
55
56static inline bool xe_vm_is_closed(struct xe_vm *vm)
57{
58 /* Only guaranteed not to change when vm->lock is held */
59 return !vm->size;
60}
61
62static inline bool xe_vm_is_banned(struct xe_vm *vm)
63{
64 return vm->flags & XE_VM_FLAG_BANNED;
65}
66
67static inline bool xe_vm_is_closed_or_banned(struct xe_vm *vm)
68{
69 lockdep_assert_held(&vm->lock);
70 return xe_vm_is_closed(vm) || xe_vm_is_banned(vm);
71}
72
73struct xe_vma *
74xe_vm_find_overlapping_vma(struct xe_vm *vm, u64 start, u64 range);
75
76bool xe_vma_has_default_mem_attrs(struct xe_vma *vma);
77
78void xe_vm_find_cpu_addr_mirror_vma_range(struct xe_vm *vm,
79 u64 *start,
80 u64 *end);
81/**
82 * xe_vm_has_scratch() - Whether the vm is configured for scratch PTEs
83 * @vm: The vm
84 *
85 * Return: whether the vm populates unmapped areas with scratch PTEs
86 */
87static inline bool xe_vm_has_scratch(const struct xe_vm *vm)
88{
89 return vm->flags & XE_VM_FLAG_SCRATCH_PAGE;
90}
91
92/**
93 * gpuvm_to_vm() - Return the embedding xe_vm from a struct drm_gpuvm pointer
94 * @gpuvm: The struct drm_gpuvm pointer
95 *
96 * Return: Pointer to the embedding struct xe_vm.
97 */
98static inline struct xe_vm *gpuvm_to_vm(struct drm_gpuvm *gpuvm)
99{
100 return container_of(gpuvm, struct xe_vm, gpuvm);
101}
102
103static inline struct xe_vm *gpuva_to_vm(struct drm_gpuva *gpuva)
104{
105 return gpuvm_to_vm(gpuva->vm);
106}
107
108static inline struct xe_vma *gpuva_to_vma(struct drm_gpuva *gpuva)
109{
110 return container_of(gpuva, struct xe_vma, gpuva);
111}
112
113static inline struct xe_vma_op *gpuva_op_to_vma_op(struct drm_gpuva_op *op)
114{
115 return container_of(op, struct xe_vma_op, base);
116}
117
118/**
119 * DOC: Provide accessors for vma members to facilitate easy change of
120 * implementation.
121 */
122static inline u64 xe_vma_start(struct xe_vma *vma)
123{
124 return vma->gpuva.va.addr;
125}
126
127static inline u64 xe_vma_size(struct xe_vma *vma)
128{
129 return vma->gpuva.va.range;
130}
131
132static inline u64 xe_vma_end(struct xe_vma *vma)
133{
134 return xe_vma_start(vma) + xe_vma_size(vma);
135}
136
137static inline u64 xe_vma_bo_offset(struct xe_vma *vma)
138{
139 return vma->gpuva.gem.offset;
140}
141
142static inline struct xe_bo *xe_vma_bo(struct xe_vma *vma)
143{
144 return !vma->gpuva.gem.obj ? NULL :
145 container_of(vma->gpuva.gem.obj, struct xe_bo, ttm.base);
146}
147
148static inline struct xe_vm *xe_vma_vm(struct xe_vma *vma)
149{
150 return container_of(vma->gpuva.vm, struct xe_vm, gpuvm);
151}
152
153static inline bool xe_vma_read_only(struct xe_vma *vma)
154{
155 return vma->gpuva.flags & XE_VMA_READ_ONLY;
156}
157
158static inline u64 xe_vma_userptr(struct xe_vma *vma)
159{
160 return vma->gpuva.gem.offset;
161}
162
163static inline bool xe_vma_is_null(struct xe_vma *vma)
164{
165 return vma->gpuva.flags & DRM_GPUVA_SPARSE;
166}
167
168static inline bool xe_vma_is_cpu_addr_mirror(struct xe_vma *vma)
169{
170 return vma->gpuva.flags & XE_VMA_SYSTEM_ALLOCATOR;
171}
172
173static inline bool xe_vma_has_no_bo(struct xe_vma *vma)
174{
175 return !xe_vma_bo(vma);
176}
177
178static inline bool xe_vma_is_userptr(struct xe_vma *vma)
179{
180 return xe_vma_has_no_bo(vma) && !xe_vma_is_null(vma) &&
181 !xe_vma_is_cpu_addr_mirror(vma);
182}
183
184struct xe_vma *xe_vm_find_vma_by_addr(struct xe_vm *vm, u64 page_addr);
185
186int xe_vma_need_vram_for_atomic(struct xe_device *xe, struct xe_vma *vma, bool is_atomic);
187
188int xe_vm_alloc_madvise_vma(struct xe_vm *vm, uint64_t addr, uint64_t size);
189
190int xe_vm_alloc_cpu_addr_mirror_vma(struct xe_vm *vm, uint64_t addr, uint64_t size);
191
192/**
193 * to_userptr_vma() - Return a pointer to an embedding userptr vma
194 * @vma: Pointer to the embedded struct xe_vma
195 *
196 * Return: Pointer to the embedding userptr vma
197 */
198static inline struct xe_userptr_vma *to_userptr_vma(struct xe_vma *vma)
199{
200 xe_assert(xe_vma_vm(vma)->xe, xe_vma_is_userptr(vma));
201 return container_of(vma, struct xe_userptr_vma, vma);
202}
203
204u64 xe_vm_pdp4_descriptor(struct xe_vm *vm, struct xe_tile *tile);
205
206int xe_vm_create_ioctl(struct drm_device *dev, void *data,
207 struct drm_file *file);
208int xe_vm_destroy_ioctl(struct drm_device *dev, void *data,
209 struct drm_file *file);
210int xe_vm_bind_ioctl(struct drm_device *dev, void *data,
211 struct drm_file *file);
212int xe_vm_query_vmas_attrs_ioctl(struct drm_device *dev, void *data, struct drm_file *file);
213int xe_vm_get_property_ioctl(struct drm_device *dev, void *data,
214 struct drm_file *file);
215
216void xe_vm_close_and_put(struct xe_vm *vm);
217
218static inline bool xe_vm_in_fault_mode(struct xe_vm *vm)
219{
220 return vm->flags & XE_VM_FLAG_FAULT_MODE;
221}
222
223static inline bool xe_vm_in_lr_mode(struct xe_vm *vm)
224{
225 return vm->flags & XE_VM_FLAG_LR_MODE;
226}
227
228static inline bool xe_vm_in_preempt_fence_mode(struct xe_vm *vm)
229{
230 return xe_vm_in_lr_mode(vm) && !xe_vm_in_fault_mode(vm);
231}
232
233static inline bool xe_vm_allow_vm_eviction(struct xe_vm *vm)
234{
235 return !xe_vm_in_lr_mode(vm) ||
236 (xe_vm_in_fault_mode(vm) &&
237 !(vm->flags & XE_VM_FLAG_NO_VM_OVERCOMMIT));
238}
239
240int xe_vm_add_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q);
241void xe_vm_remove_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q);
242
243int xe_vm_rebind(struct xe_vm *vm, bool rebind_worker);
244struct dma_fence *xe_vma_rebind(struct xe_vm *vm, struct xe_vma *vma,
245 u8 tile_mask);
246struct dma_fence *xe_vm_range_rebind(struct xe_vm *vm,
247 struct xe_vma *vma,
248 struct xe_svm_range *range,
249 u8 tile_mask);
250struct dma_fence *xe_vm_range_unbind(struct xe_vm *vm,
251 struct xe_svm_range *range);
252
253int xe_vm_invalidate_vma(struct xe_vma *vma);
254
255int xe_vm_invalidate_vma_submit(struct xe_vma *vma, struct xe_tlb_inval_batch *batch);
256
257int xe_vm_validate_protected(struct xe_vm *vm);
258
259static inline void xe_vm_queue_rebind_worker(struct xe_vm *vm)
260{
261 xe_assert(vm->xe, xe_vm_in_preempt_fence_mode(vm));
262 queue_work(vm->xe->ordered_wq, &vm->preempt.rebind_work);
263}
264
265/**
266 * xe_vm_reactivate_rebind() - Reactivate the rebind functionality on compute
267 * vms.
268 * @vm: The vm.
269 *
270 * If the rebind functionality on a compute vm was disabled due
271 * to nothing to execute. Reactivate it and run the rebind worker.
272 * This function should be called after submitting a batch to a compute vm.
273 */
274static inline void xe_vm_reactivate_rebind(struct xe_vm *vm)
275{
276 if (xe_vm_in_preempt_fence_mode(vm) && vm->preempt.rebind_deactivated) {
277 vm->preempt.rebind_deactivated = false;
278 xe_vm_queue_rebind_worker(vm);
279 }
280}
281
282int xe_vm_lock_vma(struct drm_exec *exec, struct xe_vma *vma);
283
284int xe_vm_validate_rebind(struct xe_vm *vm, struct drm_exec *exec,
285 unsigned int num_fences);
286
287struct dma_fence *xe_vm_bind_kernel_bo(struct xe_vm *vm, struct xe_bo *bo,
288 struct xe_exec_queue *q, u64 addr,
289 enum xe_cache_level cache_lvl);
290
291void xe_vm_resume_rebind_worker(struct xe_vm *vm);
292
293/**
294 * xe_vm_resv() - Return's the vm's reservation object
295 * @vm: The vm
296 *
297 * Return: Pointer to the vm's reservation object.
298 */
299static inline struct dma_resv *xe_vm_resv(struct xe_vm *vm)
300{
301 return drm_gpuvm_resv(&vm->gpuvm);
302}
303
304void xe_vm_kill(struct xe_vm *vm, bool unlocked);
305
306void xe_vm_add_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q);
307void xe_vm_remove_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q);
308
309/**
310 * xe_vm_assert_held(vm) - Assert that the vm's reservation object is held.
311 * @vm: The vm
312 */
313#define xe_vm_assert_held(vm) dma_resv_assert_held(xe_vm_resv(vm))
314
315int xe_vm_drm_exec_lock(struct xe_vm *vm, struct drm_exec *exec);
316
317#if IS_ENABLED(CONFIG_DRM_XE_DEBUG_VM)
318#define vm_dbg drm_dbg
319#else
320__printf(2, 3)
321static inline void vm_dbg(const struct drm_device *dev,
322 const char *format, ...)
323{ /* noop */ }
324#endif
325
326struct xe_vm_snapshot *xe_vm_snapshot_capture(struct xe_vm *vm);
327void xe_vm_snapshot_capture_delayed(struct xe_vm_snapshot *snap);
328void xe_vm_snapshot_print(struct xe_vm_snapshot *snap, struct drm_printer *p);
329void xe_vm_snapshot_free(struct xe_vm_snapshot *snap);
330
331void xe_vm_add_fault_entry_pf(struct xe_vm *vm, struct xe_pagefault *pf);
332
333/**
334 * xe_vm_set_validating() - Register this task as currently making bos resident
335 * @allow_res_evict: Allow eviction of buffer objects bound to @vm when
336 * validating.
337 * @vm: Pointer to the vm or NULL.
338 *
339 * Register this task as currently making bos resident for the vm. Intended
340 * to avoid eviction by the same task of shared bos bound to the vm.
341 * Call with the vm's resv lock held.
342 */
343static inline void xe_vm_set_validating(struct xe_vm *vm, bool allow_res_evict)
344{
345 if (vm && !allow_res_evict) {
346 xe_vm_assert_held(vm);
347 /* Pairs with READ_ONCE in xe_vm_is_validating() */
348 WRITE_ONCE(vm->validation.validating, current);
349 }
350}
351
352/**
353 * xe_vm_clear_validating() - Unregister this task as currently making bos resident
354 * @vm: Pointer to the vm or NULL
355 * @allow_res_evict: Eviction from @vm was allowed. Must be set to the same
356 * value as for xe_vm_set_validation().
357 *
358 * Register this task as currently making bos resident for the vm. Intended
359 * to avoid eviction by the same task of shared bos bound to the vm.
360 * Call with the vm's resv lock held.
361 */
362static inline void xe_vm_clear_validating(struct xe_vm *vm, bool allow_res_evict)
363{
364 if (vm && !allow_res_evict) {
365 /* Pairs with READ_ONCE in xe_vm_is_validating() */
366 WRITE_ONCE(vm->validation.validating, NULL);
367 }
368}
369
370/**
371 * xe_vm_is_validating() - Whether bos bound to the vm are currently being made resident
372 * by the current task.
373 * @vm: Pointer to the vm.
374 *
375 * If this function returns %true, we should be in a vm resv locked region, since
376 * the current process is the same task that called xe_vm_set_validating().
377 * The function asserts that that's indeed the case.
378 *
379 * Return: %true if the task is currently making bos resident, %false otherwise.
380 */
381static inline bool xe_vm_is_validating(struct xe_vm *vm)
382{
383 /* Pairs with WRITE_ONCE in xe_vm_is_validating() */
384 if (READ_ONCE(vm->validation.validating) == current) {
385 xe_vm_assert_held(vm);
386 return true;
387 }
388 return false;
389}
390
391/**
392 * xe_vm_set_validation_exec() - Accessor to set the drm_exec object
393 * @vm: The vm we want to register a drm_exec object with.
394 * @exec: The exec object we want to register.
395 *
396 * Set the drm_exec object used to lock the vm's resv.
397 */
398static inline void xe_vm_set_validation_exec(struct xe_vm *vm, struct drm_exec *exec)
399{
400 xe_vm_assert_held(vm);
401 xe_assert(vm->xe, !!exec ^ !!vm->validation._exec);
402 vm->validation._exec = exec;
403}
404
405/**
406 * xe_vm_validation_exec() - Accessor to read the drm_exec object
407 * @vm: The vm we want to register a drm_exec object with.
408 *
409 * Return: The drm_exec object used to lock the vm's resv. The value
410 * is a valid pointer, %NULL, or one of the special values defined in
411 * xe_validation.h.
412 */
413static inline struct drm_exec *xe_vm_validation_exec(struct xe_vm *vm)
414{
415 xe_vm_assert_held(vm);
416 return vm->validation._exec;
417}
418
419/**
420 * xe_vm_has_valid_gpu_mapping() - Advisory helper to check if VMA or SVM range has
421 * a valid GPU mapping
422 * @tile: The tile which the GPU mapping belongs to
423 * @tile_present: Tile present mask
424 * @tile_invalidated: Tile invalidated mask
425 *
426 * The READ_ONCEs pair with WRITE_ONCEs in either the TLB invalidation paths
427 * (xe_vm.c, xe_svm.c) or the binding paths (xe_pt.c). These are not reliable
428 * without the notifier lock in userptr or SVM cases, and not reliable without
429 * the BO dma-resv lock in the BO case. As such, they should only be used in
430 * opportunistic cases (e.g., skipping a page fault fix or not skipping a TLB
431 * invalidation) where it is harmless.
432 *
433 * Return: True is there are valid GPU pages, False otherwise
434 */
435#define xe_vm_has_valid_gpu_mapping(tile, tile_present, tile_invalidated) \
436 ((READ_ONCE(tile_present) & ~READ_ONCE(tile_invalidated)) & BIT((tile)->id))
437
438void xe_vma_mem_attr_copy(struct xe_vma_mem_attr *to, struct xe_vma_mem_attr *from);
439#endif