Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* SPDX-License-Identifier: MIT */
2/*
3 * Copyright © 2021 Intel Corporation
4 */
5
6#ifndef _XE_BO_H_
7#define _XE_BO_H_
8
9#include <drm/ttm/ttm_tt.h>
10
11#include "xe_bo_types.h"
12#include "xe_ggtt.h"
13#include "xe_macros.h"
14#include "xe_validation.h"
15#include "xe_vm_types.h"
16#include "xe_vm.h"
17#include "xe_vram_types.h"
18
19#define XE_DEFAULT_GTT_SIZE_MB 3072ULL /* 3GB by default */
20
21#define XE_BO_FLAG_USER BIT(0)
22/* The bits below need to be contiguous, or things break */
23#define XE_BO_FLAG_SYSTEM BIT(1)
24#define XE_BO_FLAG_VRAM0 BIT(2)
25#define XE_BO_FLAG_VRAM1 BIT(3)
26#define XE_BO_FLAG_VRAM_MASK (XE_BO_FLAG_VRAM0 | XE_BO_FLAG_VRAM1)
27/* -- */
28#define XE_BO_FLAG_STOLEN BIT(4)
29#define XE_BO_FLAG_VRAM(vram) (XE_BO_FLAG_VRAM0 << ((vram)->id))
30#define XE_BO_FLAG_VRAM_IF_DGFX(tile) (IS_DGFX(tile_to_xe(tile)) ? \
31 XE_BO_FLAG_VRAM((tile)->mem.vram) : \
32 XE_BO_FLAG_SYSTEM)
33#define XE_BO_FLAG_GGTT BIT(5)
34#define XE_BO_FLAG_IGNORE_MIN_PAGE_SIZE BIT(6)
35#define XE_BO_FLAG_PINNED BIT(7)
36#define XE_BO_FLAG_NO_RESV_EVICT BIT(8)
37#define XE_BO_FLAG_DEFER_BACKING BIT(9)
38#define XE_BO_FLAG_FORCE_WC BIT(10)
39#define XE_BO_FLAG_FIXED_PLACEMENT BIT(11)
40#define XE_BO_FLAG_PAGETABLE BIT(12)
41#define XE_BO_FLAG_NEEDS_CPU_ACCESS BIT(13)
42#define XE_BO_FLAG_NEEDS_UC BIT(14)
43#define XE_BO_FLAG_NEEDS_64K BIT(15)
44#define XE_BO_FLAG_NEEDS_2M BIT(16)
45#define XE_BO_FLAG_GGTT_INVALIDATE BIT(17)
46#define XE_BO_FLAG_PINNED_NORESTORE BIT(18)
47#define XE_BO_FLAG_PINNED_LATE_RESTORE BIT(19)
48#define XE_BO_FLAG_GGTT0 BIT(20)
49#define XE_BO_FLAG_GGTT1 BIT(21)
50#define XE_BO_FLAG_GGTT2 BIT(22)
51#define XE_BO_FLAG_GGTT3 BIT(23)
52#define XE_BO_FLAG_CPU_ADDR_MIRROR BIT(24)
53#define XE_BO_FLAG_FORCE_USER_VRAM BIT(25)
54#define XE_BO_FLAG_NO_COMPRESSION BIT(26)
55
56/* this one is trigger internally only */
57#define XE_BO_FLAG_INTERNAL_TEST BIT(30)
58#define XE_BO_FLAG_INTERNAL_64K BIT(31)
59
60#define XE_BO_FLAG_GGTT_ALL (XE_BO_FLAG_GGTT0 | \
61 XE_BO_FLAG_GGTT1 | \
62 XE_BO_FLAG_GGTT2 | \
63 XE_BO_FLAG_GGTT3)
64
65#define XE_BO_FLAG_GGTTx(tile) \
66 (XE_BO_FLAG_GGTT0 << (tile)->id)
67
68#define XE_PTE_SHIFT 12
69#define XE_PAGE_SIZE (1 << XE_PTE_SHIFT)
70#define XE_PTE_MASK (XE_PAGE_SIZE - 1)
71#define XE_PDE_SHIFT (XE_PTE_SHIFT - 3)
72#define XE_PDES (1 << XE_PDE_SHIFT)
73#define XE_PDE_MASK (XE_PDES - 1)
74
75#define XE_64K_PTE_SHIFT 16
76#define XE_64K_PAGE_SIZE (1 << XE_64K_PTE_SHIFT)
77#define XE_64K_PTE_MASK (XE_64K_PAGE_SIZE - 1)
78#define XE_64K_PDE_MASK (XE_PDE_MASK >> 4)
79
80#define XE_PL_SYSTEM TTM_PL_SYSTEM
81#define XE_PL_TT TTM_PL_TT
82#define XE_PL_VRAM0 TTM_PL_VRAM
83#define XE_PL_VRAM1 (XE_PL_VRAM0 + 1)
84#define XE_PL_STOLEN (TTM_NUM_MEM_TYPES - 1)
85
86#define XE_BO_PROPS_INVALID (-1)
87
88#define XE_PCI_BARRIER_MMAP_OFFSET (0x50 << XE_PTE_SHIFT)
89
90/**
91 * enum xe_madv_purgeable_state - Buffer object purgeable state enumeration
92 *
93 * This enum defines the possible purgeable states for a buffer object,
94 * allowing userspace to provide memory usage hints to the kernel for
95 * better memory management under pressure.
96 *
97 * @XE_MADV_PURGEABLE_WILLNEED: The buffer object is needed and should not be purged.
98 * This is the default state.
99 * @XE_MADV_PURGEABLE_DONTNEED: The buffer object is not currently needed and can be
100 * purged by the kernel under memory pressure.
101 * @XE_MADV_PURGEABLE_PURGED: The buffer object has been purged by the kernel.
102 *
103 * Accessing a purged buffer will result in an error. Per i915 semantics,
104 * once purged, a BO remains permanently invalid and must be destroyed and recreated.
105 */
106enum xe_madv_purgeable_state {
107 XE_MADV_PURGEABLE_WILLNEED,
108 XE_MADV_PURGEABLE_DONTNEED,
109 XE_MADV_PURGEABLE_PURGED,
110};
111
112struct sg_table;
113
114struct xe_bo *xe_bo_alloc(void);
115void xe_bo_free(struct xe_bo *bo);
116
117struct xe_bo *xe_bo_init_locked(struct xe_device *xe, struct xe_bo *bo,
118 struct xe_tile *tile, struct dma_resv *resv,
119 struct ttm_lru_bulk_move *bulk, size_t size,
120 u16 cpu_caching, enum ttm_bo_type type,
121 u32 flags, struct drm_exec *exec);
122struct xe_bo *xe_bo_create_locked(struct xe_device *xe, struct xe_tile *tile,
123 struct xe_vm *vm, size_t size,
124 enum ttm_bo_type type, u32 flags,
125 struct drm_exec *exec);
126struct xe_bo *xe_bo_create_user(struct xe_device *xe, struct xe_vm *vm, size_t size,
127 u16 cpu_caching, u32 flags, struct drm_exec *exec);
128struct xe_bo *xe_bo_create_pin_map(struct xe_device *xe, struct xe_tile *tile,
129 struct xe_vm *vm, size_t size,
130 enum ttm_bo_type type, u32 flags,
131 struct drm_exec *exec);
132struct xe_bo *xe_bo_create_pin_map_novm(struct xe_device *xe, struct xe_tile *tile,
133 size_t size, enum ttm_bo_type type, u32 flags,
134 bool intr);
135struct xe_bo *xe_bo_create_pin_range_novm(struct xe_device *xe, struct xe_tile *tile,
136 size_t size, u64 start, u64 end,
137 enum ttm_bo_type type, u32 flags);
138struct xe_bo *
139xe_bo_create_pin_map_at_novm(struct xe_device *xe, struct xe_tile *tile,
140 size_t size, u64 offset, enum ttm_bo_type type,
141 u32 flags, u64 alignment, bool intr);
142struct xe_bo *xe_managed_bo_create_pin_map(struct xe_device *xe, struct xe_tile *tile,
143 size_t size, u32 flags);
144void xe_managed_bo_unpin_map_no_vm(struct xe_bo *bo);
145struct xe_bo *xe_managed_bo_create_from_data(struct xe_device *xe, struct xe_tile *tile,
146 const void *data, size_t size, u32 flags);
147int xe_managed_bo_reinit_in_vram(struct xe_device *xe, struct xe_tile *tile, struct xe_bo **src);
148
149int xe_bo_placement_for_flags(struct xe_device *xe, struct xe_bo *bo,
150 u32 bo_flags, enum ttm_bo_type type);
151
152static inline struct xe_bo *ttm_to_xe_bo(const struct ttm_buffer_object *bo)
153{
154 return container_of(bo, struct xe_bo, ttm);
155}
156
157static inline struct xe_bo *gem_to_xe_bo(const struct drm_gem_object *obj)
158{
159 return container_of(obj, struct xe_bo, ttm.base);
160}
161
162#define xe_bo_device(bo) ttm_to_xe_device((bo)->ttm.bdev)
163
164static inline struct xe_bo *xe_bo_get(struct xe_bo *bo)
165{
166 if (bo)
167 drm_gem_object_get(&bo->ttm.base);
168
169 return bo;
170}
171
172void xe_bo_put(struct xe_bo *bo);
173
174/*
175 * xe_bo_get_unless_zero() - Conditionally obtain a GEM object refcount on an
176 * xe bo
177 * @bo: The bo for which we want to obtain a refcount.
178 *
179 * There is a short window between where the bo's GEM object refcount reaches
180 * zero and where we put the final ttm_bo reference. Code in the eviction- and
181 * shrinking path should therefore attempt to grab a gem object reference before
182 * trying to use members outside of the base class ttm object. This function is
183 * intended for that purpose. On successful return, this function must be paired
184 * with an xe_bo_put().
185 *
186 * Return: @bo on success, NULL on failure.
187 */
188static inline __must_check struct xe_bo *xe_bo_get_unless_zero(struct xe_bo *bo)
189{
190 if (!bo || !kref_get_unless_zero(&bo->ttm.base.refcount))
191 return NULL;
192
193 return bo;
194}
195
196static inline void __xe_bo_unset_bulk_move(struct xe_bo *bo)
197{
198 if (bo)
199 ttm_bo_set_bulk_move(&bo->ttm, NULL);
200}
201
202static inline void xe_bo_assert_held(struct xe_bo *bo)
203{
204 if (bo)
205 dma_resv_assert_held((bo)->ttm.base.resv);
206}
207
208int xe_bo_lock(struct xe_bo *bo, bool intr);
209
210void xe_bo_unlock(struct xe_bo *bo);
211
212static inline void xe_bo_unlock_vm_held(struct xe_bo *bo)
213{
214 if (bo) {
215 XE_WARN_ON(bo->vm && bo->ttm.base.resv != xe_vm_resv(bo->vm));
216 if (bo->vm)
217 xe_vm_assert_held(bo->vm);
218 else
219 dma_resv_unlock(bo->ttm.base.resv);
220 }
221}
222
223int xe_bo_pin_external(struct xe_bo *bo, bool in_place, struct drm_exec *exec);
224int xe_bo_pin(struct xe_bo *bo, struct drm_exec *exec);
225void xe_bo_unpin_external(struct xe_bo *bo);
226void xe_bo_unpin(struct xe_bo *bo);
227int xe_bo_validate(struct xe_bo *bo, struct xe_vm *vm, bool allow_res_evict,
228 struct drm_exec *exec);
229
230static inline bool xe_bo_is_pinned(struct xe_bo *bo)
231{
232 return bo->ttm.pin_count;
233}
234
235static inline bool xe_bo_is_protected(const struct xe_bo *bo)
236{
237 return bo->pxp_key_instance;
238}
239
240/**
241 * xe_bo_is_purged() - Check if buffer object has been purged
242 * @bo: The buffer object to check
243 *
244 * Checks if the buffer object's backing store has been discarded by the
245 * kernel due to memory pressure after being marked as purgeable (DONTNEED).
246 * Once purged, the BO cannot be restored and any attempt to use it will fail.
247 *
248 * Context: Caller must hold the BO's dma-resv lock
249 * Return: true if the BO has been purged, false otherwise
250 */
251static inline bool xe_bo_is_purged(struct xe_bo *bo)
252{
253 xe_bo_assert_held(bo);
254 return bo->madv_purgeable == XE_MADV_PURGEABLE_PURGED;
255}
256
257/**
258 * xe_bo_madv_is_dontneed() - Check if BO is marked as DONTNEED
259 * @bo: The buffer object to check
260 *
261 * Checks if userspace has marked this BO as DONTNEED (i.e., its contents
262 * are not currently needed and can be discarded under memory pressure).
263 * This is used internally to decide whether a BO is eligible for purging.
264 *
265 * Context: Caller must hold the BO's dma-resv lock
266 * Return: true if the BO is marked DONTNEED, false otherwise
267 */
268static inline bool xe_bo_madv_is_dontneed(struct xe_bo *bo)
269{
270 xe_bo_assert_held(bo);
271 return bo->madv_purgeable == XE_MADV_PURGEABLE_DONTNEED;
272}
273
274void xe_bo_set_purgeable_state(struct xe_bo *bo, enum xe_madv_purgeable_state new_state);
275
276static inline void xe_bo_unpin_map_no_vm(struct xe_bo *bo)
277{
278 if (likely(bo)) {
279 xe_bo_lock(bo, false);
280 xe_bo_unpin(bo);
281 xe_bo_unlock(bo);
282
283 xe_bo_put(bo);
284 }
285}
286
287bool xe_bo_is_xe_bo(struct ttm_buffer_object *bo);
288dma_addr_t __xe_bo_addr(struct xe_bo *bo, u64 offset, size_t page_size);
289dma_addr_t xe_bo_addr(struct xe_bo *bo, u64 offset, size_t page_size);
290
291static inline dma_addr_t
292xe_bo_main_addr(struct xe_bo *bo, size_t page_size)
293{
294 return xe_bo_addr(bo, 0, page_size);
295}
296
297/**
298 * xe_bo_size() - Xe BO size
299 * @bo: The bo object.
300 *
301 * Simple helper to return Xe BO's size.
302 *
303 * Return: Xe BO's size
304 */
305static inline size_t xe_bo_size(struct xe_bo *bo)
306{
307 return bo->ttm.base.size;
308}
309
310static inline u32
311__xe_bo_ggtt_addr(struct xe_bo *bo, u8 tile_id)
312{
313 struct xe_ggtt_node *ggtt_node = bo->ggtt_node[tile_id];
314 u64 offset;
315
316 if (XE_WARN_ON(!ggtt_node))
317 return 0;
318
319 offset = xe_ggtt_node_addr(ggtt_node);
320 XE_WARN_ON(offset + xe_bo_size(bo) > (1ull << 32));
321 return offset;
322}
323
324static inline u32
325xe_bo_ggtt_addr(struct xe_bo *bo)
326{
327 xe_assert(xe_bo_device(bo), bo->tile);
328
329 return __xe_bo_ggtt_addr(bo, bo->tile->id);
330}
331
332int xe_bo_vmap(struct xe_bo *bo);
333void xe_bo_vunmap(struct xe_bo *bo);
334int xe_bo_read(struct xe_bo *bo, u64 offset, void *dst, int size);
335
336bool mem_type_is_vram(u32 mem_type);
337bool xe_bo_is_vram(struct xe_bo *bo);
338bool xe_bo_is_visible_vram(struct xe_bo *bo);
339bool xe_bo_is_stolen(struct xe_bo *bo);
340bool xe_bo_is_stolen_devmem(struct xe_bo *bo);
341bool xe_bo_is_vm_bound(struct xe_bo *bo);
342bool xe_bo_has_single_placement(struct xe_bo *bo);
343uint64_t vram_region_gpu_offset(struct ttm_resource *res);
344
345bool xe_bo_can_migrate(struct xe_bo *bo, u32 mem_type);
346
347int xe_bo_migrate(struct xe_bo *bo, u32 mem_type, struct ttm_operation_ctx *ctc,
348 struct drm_exec *exec);
349int xe_bo_evict(struct xe_bo *bo, struct drm_exec *exec);
350
351int xe_bo_evict_pinned(struct xe_bo *bo);
352int xe_bo_notifier_prepare_pinned(struct xe_bo *bo);
353int xe_bo_notifier_unprepare_pinned(struct xe_bo *bo);
354int xe_bo_restore_pinned(struct xe_bo *bo);
355
356int xe_bo_dma_unmap_pinned(struct xe_bo *bo);
357
358extern const struct ttm_device_funcs xe_ttm_funcs;
359extern const char *const xe_mem_type_to_name[];
360
361int xe_gem_create_ioctl(struct drm_device *dev, void *data,
362 struct drm_file *file);
363int xe_gem_mmap_offset_ioctl(struct drm_device *dev, void *data,
364 struct drm_file *file);
365void xe_bo_runtime_pm_release_mmap_offset(struct xe_bo *bo);
366
367int xe_bo_dumb_create(struct drm_file *file_priv,
368 struct drm_device *dev,
369 struct drm_mode_create_dumb *args);
370
371bool xe_bo_needs_ccs_pages(struct xe_bo *bo);
372
373int xe_bo_decompress(struct xe_bo *bo);
374
375static inline size_t xe_bo_ccs_pages_start(struct xe_bo *bo)
376{
377 return PAGE_ALIGN(xe_bo_size(bo));
378}
379
380/**
381 * xe_bo_has_valid_ccs_bb - Check if CCS's BBs were setup for the BO.
382 * @bo: the &xe_bo to check
383 *
384 * The CCS's BBs should only be setup by the driver VF, but it is safe
385 * to call this function also by non-VF driver.
386 *
387 * Return: true iff the CCS's BBs are setup, false otherwise.
388 */
389static inline bool xe_bo_has_valid_ccs_bb(struct xe_bo *bo)
390{
391 return bo->bb_ccs[XE_SRIOV_VF_CCS_READ_CTX] &&
392 bo->bb_ccs[XE_SRIOV_VF_CCS_WRITE_CTX];
393}
394
395static inline bool xe_bo_has_pages(struct xe_bo *bo)
396{
397 if ((bo->ttm.ttm && ttm_tt_is_populated(bo->ttm.ttm)) ||
398 xe_bo_is_vram(bo))
399 return true;
400
401 return false;
402}
403
404void __xe_bo_release_dummy(struct kref *kref);
405
406/**
407 * xe_bo_put_deferred() - Put a buffer object with delayed final freeing
408 * @bo: The bo to put.
409 * @deferred: List to which to add the buffer object if we cannot put, or
410 * NULL if the function is to put unconditionally.
411 *
412 * Since the final freeing of an object includes both sleeping and (!)
413 * memory allocation in the dma_resv individualization, it's not ok
414 * to put an object from atomic context nor from within a held lock
415 * tainted by reclaim. In such situations we want to defer the final
416 * freeing until we've exited the restricting context, or in the worst
417 * case to a workqueue.
418 * This function either puts the object if possible without the refcount
419 * reaching zero, or adds it to the @deferred list if that was not possible.
420 * The caller needs to follow up with a call to xe_bo_put_commit() to actually
421 * put the bo iff this function returns true. It's safe to always
422 * follow up with a call to xe_bo_put_commit().
423 * TODO: It's TTM that is the villain here. Perhaps TTM should add an
424 * interface like this.
425 *
426 * Return: true if @bo was the first object put on the @freed list,
427 * false otherwise.
428 */
429static inline bool
430xe_bo_put_deferred(struct xe_bo *bo, struct llist_head *deferred)
431{
432 if (!deferred) {
433 xe_bo_put(bo);
434 return false;
435 }
436
437 if (!kref_put(&bo->ttm.base.refcount, __xe_bo_release_dummy))
438 return false;
439
440 return llist_add(&bo->freed, deferred);
441}
442
443void xe_bo_put_commit(struct llist_head *deferred);
444
445/**
446 * xe_bo_put_async() - Put BO async
447 * @bo: The bo to put.
448 *
449 * Put BO async, the final put is deferred to a worker to exit an IRQ context.
450 */
451static inline void
452xe_bo_put_async(struct xe_bo *bo)
453{
454 struct xe_bo_dev *bo_device = &xe_bo_device(bo)->bo_device;
455
456 if (xe_bo_put_deferred(bo, &bo_device->async_list))
457 schedule_work(&bo_device->async_free);
458}
459
460void xe_bo_dev_init(struct xe_bo_dev *bo_device);
461
462void xe_bo_dev_fini(struct xe_bo_dev *bo_device);
463
464struct sg_table *xe_bo_sg(struct xe_bo *bo);
465
466/*
467 * xe_sg_segment_size() - Provides upper limit for sg segment size.
468 * @dev: device pointer
469 *
470 * Returns the maximum segment size for the 'struct scatterlist'
471 * elements.
472 */
473static inline unsigned int xe_sg_segment_size(struct device *dev)
474{
475 struct scatterlist __maybe_unused sg;
476 size_t max = BIT_ULL(sizeof(sg.length) * 8) - 1;
477
478 max = min_t(size_t, max, dma_max_mapping_size(dev));
479
480 /*
481 * The iommu_dma_map_sg() function ensures iova allocation doesn't
482 * cross dma segment boundary. It does so by padding some sg elements.
483 * This can cause overflow, ending up with sg->length being set to 0.
484 * Avoid this by ensuring maximum segment size is half of 'max'
485 * rounded down to PAGE_SIZE.
486 */
487 return round_down(max / 2, PAGE_SIZE);
488}
489
490/**
491 * struct xe_bo_shrink_flags - flags governing the shrink behaviour.
492 * @purge: Only purging allowed. Don't shrink if bo not purgeable.
493 * @writeback: Attempt to immediately move content to swap.
494 */
495struct xe_bo_shrink_flags {
496 u32 purge : 1;
497 u32 writeback : 1;
498};
499
500long xe_bo_shrink(struct ttm_operation_ctx *ctx, struct ttm_buffer_object *bo,
501 const struct xe_bo_shrink_flags flags,
502 unsigned long *scanned);
503
504/**
505 * xe_bo_is_mem_type - Whether the bo currently resides in the given
506 * TTM memory type
507 * @bo: The bo to check.
508 * @mem_type: The TTM memory type.
509 *
510 * Return: true iff the bo resides in @mem_type, false otherwise.
511 */
512static inline bool xe_bo_is_mem_type(struct xe_bo *bo, u32 mem_type)
513{
514 xe_bo_assert_held(bo);
515 return bo->ttm.resource->mem_type == mem_type;
516}
517#endif