Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* SPDX-License-Identifier: MIT */
2/*
3 * Copyright © 2024 Intel Corporation
4 */
5
6#ifndef _XE_SVM_H_
7#define _XE_SVM_H_
8
9#if IS_ENABLED(CONFIG_DRM_XE_GPUSVM)
10
11#include <drm/drm_pagemap.h>
12#include <drm/drm_gpusvm.h>
13#include <drm/drm_pagemap_util.h>
14
15#define XE_INTERCONNECT_VRAM DRM_INTERCONNECT_DRIVER
16#define XE_INTERCONNECT_P2P (XE_INTERCONNECT_VRAM + 1)
17
18struct drm_device;
19struct drm_file;
20
21struct xe_bo;
22struct xe_gt;
23struct xe_device;
24struct xe_vram_region;
25struct xe_tile;
26struct xe_vm;
27struct xe_vma;
28struct xe_vram_region;
29
30/** struct xe_svm_range - SVM range */
31struct xe_svm_range {
32 /** @base: base drm_gpusvm_range */
33 struct drm_gpusvm_range base;
34 /**
35 * @garbage_collector_link: Link into VM's garbage collect SVM range
36 * list. Protected by VM's garbage collect lock.
37 */
38 struct list_head garbage_collector_link;
39 /**
40 * @tile_present: Tile mask of binding is present for this range.
41 * Protected by GPU SVM notifier lock.
42 */
43 u8 tile_present;
44 /**
45 * @tile_invalidated: Tile mask of binding is invalidated for this
46 * range. Protected by GPU SVM notifier lock.
47 */
48 u8 tile_invalidated;
49};
50
51/**
52 * struct xe_pagemap - Manages xe device_private memory for SVM.
53 * @pagemap: The struct dev_pagemap providing the struct pages.
54 * @dpagemap: The drm_pagemap managing allocation and migration.
55 * @destroy_work: Handles asnynchronous destruction and caching.
56 * @peer: Used for pagemap owner computation.
57 * @hpa_base: The host physical address base for the managemd memory.
58 * @vr: Backpointer to the xe_vram region.
59 */
60struct xe_pagemap {
61 struct dev_pagemap pagemap;
62 struct drm_pagemap dpagemap;
63 struct work_struct destroy_work;
64 struct drm_pagemap_peer peer;
65 resource_size_t hpa_base;
66 struct xe_vram_region *vr;
67};
68
69/**
70 * xe_svm_range_pages_valid() - SVM range pages valid
71 * @range: SVM range
72 *
73 * Return: True if SVM range pages are valid, False otherwise
74 */
75static inline bool xe_svm_range_pages_valid(struct xe_svm_range *range)
76{
77 return drm_gpusvm_range_pages_valid(range->base.gpusvm, &range->base);
78}
79
80int xe_devm_add(struct xe_tile *tile, struct xe_vram_region *vr);
81
82int xe_svm_init(struct xe_vm *vm);
83
84void xe_svm_fini(struct xe_vm *vm);
85
86void xe_svm_close(struct xe_vm *vm);
87
88int xe_svm_handle_pagefault(struct xe_vm *vm, struct xe_vma *vma,
89 struct xe_gt *gt, u64 fault_addr,
90 bool atomic);
91
92bool xe_svm_has_mapping(struct xe_vm *vm, u64 start, u64 end);
93
94int xe_svm_bo_evict(struct xe_bo *bo);
95
96void xe_svm_range_debug(struct xe_svm_range *range, const char *operation);
97
98int xe_svm_alloc_vram(struct xe_svm_range *range, const struct drm_gpusvm_ctx *ctx,
99 struct drm_pagemap *dpagemap);
100
101struct xe_svm_range *xe_svm_range_find_or_insert(struct xe_vm *vm, u64 addr,
102 struct xe_vma *vma, struct drm_gpusvm_ctx *ctx);
103
104int xe_svm_range_get_pages(struct xe_vm *vm, struct xe_svm_range *range,
105 struct drm_gpusvm_ctx *ctx);
106
107bool xe_svm_range_needs_migrate_to_vram(struct xe_svm_range *range, struct xe_vma *vma,
108 const struct drm_pagemap *dpagemap);
109
110void xe_svm_range_migrate_to_smem(struct xe_vm *vm, struct xe_svm_range *range);
111
112bool xe_svm_range_validate(struct xe_vm *vm,
113 struct xe_svm_range *range,
114 u8 tile_mask, const struct drm_pagemap *dpagemap);
115
116u64 xe_svm_find_vma_start(struct xe_vm *vm, u64 addr, u64 end, struct xe_vma *vma);
117
118void xe_svm_unmap_address_range(struct xe_vm *vm, u64 start, u64 end);
119
120u8 xe_svm_ranges_zap_ptes_in_range(struct xe_vm *vm, u64 start, u64 end);
121
122struct drm_pagemap *xe_vma_resolve_pagemap(struct xe_vma *vma, struct xe_tile *tile);
123
124void *xe_svm_private_page_owner(struct xe_vm *vm, bool force_smem);
125
126/**
127 * xe_svm_range_has_dma_mapping() - SVM range has DMA mapping
128 * @range: SVM range
129 *
130 * Return: True if SVM range has a DMA mapping, False otherwise
131 */
132static inline bool xe_svm_range_has_dma_mapping(struct xe_svm_range *range)
133{
134 lockdep_assert_held(&range->base.gpusvm->notifier_lock);
135 return range->base.pages.flags.has_dma_mapping;
136}
137
138/**
139 * to_xe_range - Convert a drm_gpusvm_range pointer to a xe_svm_range
140 * @r: Pointer to the drm_gpusvm_range structure
141 *
142 * This function takes a pointer to a drm_gpusvm_range structure and
143 * converts it to a pointer to the containing xe_svm_range structure.
144 *
145 * Return: Pointer to the xe_svm_range structure
146 */
147static inline struct xe_svm_range *to_xe_range(struct drm_gpusvm_range *r)
148{
149 return container_of(r, struct xe_svm_range, base);
150}
151
152/**
153 * xe_svm_range_start() - SVM range start address
154 * @range: SVM range
155 *
156 * Return: start address of range.
157 */
158static inline unsigned long xe_svm_range_start(struct xe_svm_range *range)
159{
160 return drm_gpusvm_range_start(&range->base);
161}
162
163/**
164 * xe_svm_range_end() - SVM range end address
165 * @range: SVM range
166 *
167 * Return: end address of range.
168 */
169static inline unsigned long xe_svm_range_end(struct xe_svm_range *range)
170{
171 return drm_gpusvm_range_end(&range->base);
172}
173
174/**
175 * xe_svm_range_size() - SVM range size
176 * @range: SVM range
177 *
178 * Return: Size of range.
179 */
180static inline unsigned long xe_svm_range_size(struct xe_svm_range *range)
181{
182 return drm_gpusvm_range_size(&range->base);
183}
184
185void xe_svm_flush(struct xe_vm *vm);
186
187int xe_pagemap_shrinker_create(struct xe_device *xe);
188
189int xe_pagemap_cache_create(struct xe_tile *tile);
190
191struct drm_pagemap *xe_drm_pagemap_from_fd(int fd, u32 region_instance);
192
193#else
194#include <linux/interval_tree.h>
195#include "xe_vm.h"
196
197struct drm_pagemap_addr;
198struct drm_gpusvm_ctx;
199struct drm_gpusvm_range;
200struct xe_bo;
201struct xe_device;
202struct xe_vm;
203struct xe_vma;
204struct xe_tile;
205struct xe_vram_region;
206
207#define XE_INTERCONNECT_VRAM 1
208#define XE_INTERCONNECT_P2P (XE_INTERCONNECT_VRAM + 1)
209
210struct xe_svm_range {
211 struct {
212 struct interval_tree_node itree;
213 struct {
214 const struct drm_pagemap_addr *dma_addr;
215 } pages;
216 } base;
217 u32 tile_present;
218 u32 tile_invalidated;
219};
220
221static inline bool xe_svm_range_pages_valid(struct xe_svm_range *range)
222{
223 return false;
224}
225
226static inline
227int xe_devm_add(struct xe_tile *tile, struct xe_vram_region *vr)
228{
229 return 0;
230}
231
232static inline
233int xe_svm_init(struct xe_vm *vm)
234{
235#if IS_ENABLED(CONFIG_DRM_GPUSVM)
236 return drm_gpusvm_init(&vm->svm.gpusvm, "Xe SVM (simple)", &vm->xe->drm,
237 NULL, 0, 0, 0, NULL, NULL, 0);
238#else
239 return 0;
240#endif
241}
242
243static inline
244void xe_svm_fini(struct xe_vm *vm)
245{
246#if IS_ENABLED(CONFIG_DRM_GPUSVM)
247 xe_assert(vm->xe, xe_vm_is_closed(vm));
248 drm_gpusvm_fini(&vm->svm.gpusvm);
249#endif
250}
251
252static inline
253void xe_svm_close(struct xe_vm *vm)
254{
255}
256
257static inline
258int xe_svm_handle_pagefault(struct xe_vm *vm, struct xe_vma *vma,
259 struct xe_gt *gt, u64 fault_addr,
260 bool atomic)
261{
262 return 0;
263}
264
265static inline
266bool xe_svm_has_mapping(struct xe_vm *vm, u64 start, u64 end)
267{
268 return false;
269}
270
271static inline
272int xe_svm_bo_evict(struct xe_bo *bo)
273{
274 return 0;
275}
276
277static inline
278void xe_svm_range_debug(struct xe_svm_range *range, const char *operation)
279{
280}
281
282static inline int
283xe_svm_alloc_vram(struct xe_svm_range *range, const struct drm_gpusvm_ctx *ctx,
284 struct drm_pagemap *dpagemap)
285{
286 return -EOPNOTSUPP;
287}
288
289static inline
290struct xe_svm_range *xe_svm_range_find_or_insert(struct xe_vm *vm, u64 addr,
291 struct xe_vma *vma, struct drm_gpusvm_ctx *ctx)
292{
293 return ERR_PTR(-EINVAL);
294}
295
296static inline
297int xe_svm_range_get_pages(struct xe_vm *vm, struct xe_svm_range *range,
298 struct drm_gpusvm_ctx *ctx)
299{
300 return -EINVAL;
301}
302
303static inline struct xe_svm_range *to_xe_range(struct drm_gpusvm_range *r)
304{
305 return NULL;
306}
307
308static inline unsigned long xe_svm_range_start(struct xe_svm_range *range)
309{
310 return 0;
311}
312
313static inline unsigned long xe_svm_range_end(struct xe_svm_range *range)
314{
315 return 0;
316}
317
318static inline unsigned long xe_svm_range_size(struct xe_svm_range *range)
319{
320 return 0;
321}
322
323static inline
324bool xe_svm_range_needs_migrate_to_vram(struct xe_svm_range *range, struct xe_vma *vma,
325 const struct drm_pagemap *dpagemap)
326{
327 return false;
328}
329
330static inline
331void xe_svm_range_migrate_to_smem(struct xe_vm *vm, struct xe_svm_range *range)
332{
333}
334
335static inline
336bool xe_svm_range_validate(struct xe_vm *vm,
337 struct xe_svm_range *range,
338 u8 tile_mask, bool devmem_preferred)
339{
340 return false;
341}
342
343static inline
344u64 xe_svm_find_vma_start(struct xe_vm *vm, u64 addr, u64 end, struct xe_vma *vma)
345{
346 return ULONG_MAX;
347}
348
349static inline
350void xe_svm_unmap_address_range(struct xe_vm *vm, u64 start, u64 end)
351{
352}
353
354static inline
355u8 xe_svm_ranges_zap_ptes_in_range(struct xe_vm *vm, u64 start, u64 end)
356{
357 return 0;
358}
359
360static inline
361struct drm_pagemap *xe_vma_resolve_pagemap(struct xe_vma *vma, struct xe_tile *tile)
362{
363 return NULL;
364}
365
366static inline void *xe_svm_private_page_owner(struct xe_vm *vm, bool force_smem)
367{
368 return NULL;
369}
370
371static inline void xe_svm_flush(struct xe_vm *vm)
372{
373}
374
375static inline int xe_pagemap_shrinker_create(struct xe_device *xe)
376{
377 return 0;
378}
379
380static inline int xe_pagemap_cache_create(struct xe_tile *tile)
381{
382 return 0;
383}
384
385static inline struct drm_pagemap *xe_drm_pagemap_from_fd(int fd, u32 region_instance)
386{
387 return ERR_PTR(-ENOENT);
388}
389
390#define xe_svm_range_has_dma_mapping(...) false
391#endif /* CONFIG_DRM_XE_GPUSVM */
392
393#if IS_ENABLED(CONFIG_DRM_GPUSVM) /* Need to support userptr without XE_GPUSVM */
394#define xe_svm_assert_in_notifier(vm__) \
395 lockdep_assert_held_write(&(vm__)->svm.gpusvm.notifier_lock)
396
397#define xe_svm_assert_held_read(vm__) \
398 lockdep_assert_held_read(&(vm__)->svm.gpusvm.notifier_lock)
399
400#define xe_svm_notifier_lock(vm__) \
401 drm_gpusvm_notifier_lock(&(vm__)->svm.gpusvm)
402
403#define xe_svm_notifier_lock_interruptible(vm__) \
404 down_read_interruptible(&(vm__)->svm.gpusvm.notifier_lock)
405
406#define xe_svm_notifier_unlock(vm__) \
407 drm_gpusvm_notifier_unlock(&(vm__)->svm.gpusvm)
408
409#else
410#define xe_svm_assert_in_notifier(...) do {} while (0)
411
412static inline void xe_svm_assert_held_read(struct xe_vm *vm)
413{
414}
415
416static inline void xe_svm_notifier_lock(struct xe_vm *vm)
417{
418}
419
420static inline int xe_svm_notifier_lock_interruptible(struct xe_vm *vm)
421{
422 return 0;
423}
424
425static inline void xe_svm_notifier_unlock(struct xe_vm *vm)
426{
427}
428#endif /* CONFIG_DRM_GPUSVM */
429
430#endif