Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: MIT
2/*
3 * Copyright © 2020 Intel Corporation
4 */
5
6#include "xe_migrate.h"
7
8#include <linux/bitfield.h>
9#include <linux/sizes.h>
10
11#include <drm/drm_managed.h>
12#include <drm/drm_pagemap.h>
13#include <drm/ttm/ttm_tt.h>
14#include <uapi/drm/xe_drm.h>
15
16#include <generated/xe_wa_oob.h>
17
18#include "instructions/xe_gpu_commands.h"
19#include "instructions/xe_mi_commands.h"
20#include "regs/xe_gtt_defs.h"
21#include "tests/xe_test.h"
22#include "xe_assert.h"
23#include "xe_bb.h"
24#include "xe_bo.h"
25#include "xe_exec_queue.h"
26#include "xe_ggtt.h"
27#include "xe_gt.h"
28#include "xe_gt_printk.h"
29#include "xe_hw_engine.h"
30#include "xe_lrc.h"
31#include "xe_map.h"
32#include "xe_mem_pool.h"
33#include "xe_mocs.h"
34#include "xe_printk.h"
35#include "xe_pt.h"
36#include "xe_res_cursor.h"
37#include "xe_sa.h"
38#include "xe_sched_job.h"
39#include "xe_sriov_vf_ccs.h"
40#include "xe_svm.h"
41#include "xe_sync.h"
42#include "xe_trace_bo.h"
43#include "xe_validation.h"
44#include "xe_vm.h"
45#include "xe_vram.h"
46
47/**
48 * struct xe_migrate - migrate context.
49 */
50struct xe_migrate {
51 /** @q: Default exec queue used for migration */
52 struct xe_exec_queue *q;
53 /** @tile: Backpointer to the tile this struct xe_migrate belongs to. */
54 struct xe_tile *tile;
55 /** @job_mutex: Timeline mutex for @eng. */
56 struct mutex job_mutex;
57 /** @pt_bo: Page-table buffer object. */
58 struct xe_bo *pt_bo;
59 /** @batch_base_ofs: VM offset of the migration batch buffer */
60 u64 batch_base_ofs;
61 /** @usm_batch_base_ofs: VM offset of the usm batch buffer */
62 u64 usm_batch_base_ofs;
63 /** @cleared_mem_ofs: VM offset of @cleared_bo. */
64 u64 cleared_mem_ofs;
65 /** @large_page_copy_ofs: VM offset of 2M pages used for large copies */
66 u64 large_page_copy_ofs;
67 /**
68 * @large_page_copy_pdes: BO offset to writeout 2M pages (PDEs) used for
69 * large copies
70 */
71 u64 large_page_copy_pdes;
72 /**
73 * @fence: dma-fence representing the last migration job batch.
74 * Protected by @job_mutex.
75 */
76 struct dma_fence *fence;
77 /**
78 * @vm_update_sa: For integrated, used to suballocate page-tables
79 * out of the pt_bo.
80 */
81 struct drm_suballoc_manager vm_update_sa;
82 /** @min_chunk_size: For dgfx, Minimum chunk size */
83 u64 min_chunk_size;
84};
85
86#define MAX_PREEMPTDISABLE_TRANSFER SZ_8M /* Around 1ms. */
87#define MAX_CCS_LIMITED_TRANSFER SZ_4M /* XE_PAGE_SIZE * (FIELD_MAX(XE2_CCS_SIZE_MASK) + 1) */
88#define NUM_KERNEL_PDE 15
89#define NUM_PT_SLOTS 32
90#define LEVEL0_PAGE_TABLE_ENCODE_SIZE SZ_2M
91#define MAX_NUM_PTE 512
92#define IDENTITY_OFFSET 256ULL
93
94/*
95 * Although MI_STORE_DATA_IMM's "length" field is 10-bits, 0x3FE is the largest
96 * legal value accepted. Since that instruction field is always stored in
97 * (val-2) format, this translates to 0x400 dwords for the true maximum length
98 * of the instruction. Subtracting the instruction header (1 dword) and
99 * address (2 dwords), that leaves 0x3FD dwords (0x1FE qwords) for PTE values.
100 */
101#define MAX_PTE_PER_SDI 0x1FEU
102
103static void xe_migrate_fini(void *arg)
104{
105 struct xe_migrate *m = arg;
106
107 xe_vm_lock(m->q->vm, false);
108 xe_bo_unpin(m->pt_bo);
109 xe_vm_unlock(m->q->vm);
110
111 dma_fence_put(m->fence);
112 xe_bo_put(m->pt_bo);
113 drm_suballoc_manager_fini(&m->vm_update_sa);
114 mutex_destroy(&m->job_mutex);
115 xe_vm_close_and_put(m->q->vm);
116 xe_exec_queue_put(m->q);
117}
118
119static u64 xe_migrate_vm_addr(u64 slot, u32 level)
120{
121 XE_WARN_ON(slot >= NUM_PT_SLOTS);
122
123 /* First slot is reserved for mapping of PT bo and bb, start from 1 */
124 return (slot + 1ULL) << xe_pt_shift(level + 1);
125}
126
127static u64 xe_migrate_vram_ofs(struct xe_device *xe, u64 addr, bool is_comp_pte)
128{
129 /*
130 * Remove the DPA to get a correct offset into identity table for the
131 * migrate offset
132 */
133 u64 identity_offset = IDENTITY_OFFSET;
134
135 if (GRAPHICS_VER(xe) >= 20 && is_comp_pte)
136 identity_offset += DIV_ROUND_UP_ULL(xe_vram_region_actual_physical_size
137 (xe->mem.vram), SZ_1G);
138
139 addr -= xe_vram_region_dpa_base(xe->mem.vram);
140 return addr + (identity_offset << xe_pt_shift(2));
141}
142
143static void xe_migrate_program_identity(struct xe_device *xe, struct xe_vm *vm, struct xe_bo *bo,
144 u64 map_ofs, u64 vram_offset, u16 pat_index, u64 pt_2m_ofs)
145{
146 struct xe_vram_region *vram = xe->mem.vram;
147 resource_size_t dpa_base = xe_vram_region_dpa_base(vram);
148 u64 pos, ofs, flags;
149 u64 entry;
150 /* XXX: Unclear if this should be usable_size? */
151 u64 vram_limit = xe_vram_region_actual_physical_size(vram) + dpa_base;
152 u32 level = 2;
153
154 ofs = map_ofs + XE_PAGE_SIZE * level + vram_offset * 8;
155 flags = vm->pt_ops->pte_encode_addr(xe, 0, pat_index, level,
156 true, 0);
157
158 xe_assert(xe, IS_ALIGNED(xe_vram_region_usable_size(vram), SZ_2M));
159
160 /*
161 * Use 1GB pages when possible, last chunk always use 2M
162 * pages as mixing reserved memory (stolen, WOCPM) with a single
163 * mapping is not allowed on certain platforms.
164 */
165 for (pos = dpa_base; pos < vram_limit;
166 pos += SZ_1G, ofs += 8) {
167 if (pos + SZ_1G >= vram_limit) {
168 entry = vm->pt_ops->pde_encode_bo(bo, pt_2m_ofs);
169 xe_map_wr(xe, &bo->vmap, ofs, u64, entry);
170
171 flags = vm->pt_ops->pte_encode_addr(xe, 0,
172 pat_index,
173 level - 1,
174 true, 0);
175
176 for (ofs = pt_2m_ofs; pos < vram_limit;
177 pos += SZ_2M, ofs += 8)
178 xe_map_wr(xe, &bo->vmap, ofs, u64, pos | flags);
179 break; /* Ensure pos == vram_limit assert correct */
180 }
181
182 xe_map_wr(xe, &bo->vmap, ofs, u64, pos | flags);
183 }
184
185 xe_assert(xe, pos == vram_limit);
186}
187
188static int xe_migrate_pt_bo_alloc(struct xe_tile *tile, struct xe_migrate *m,
189 struct xe_vm *vm, struct drm_exec *exec)
190{
191 struct xe_bo *bo, *batch = tile->mem.kernel_bb_pool->bo;
192 u32 num_entries = NUM_PT_SLOTS;
193
194 /* Can't bump NUM_PT_SLOTS too high */
195 BUILD_BUG_ON(NUM_PT_SLOTS > SZ_2M/XE_PAGE_SIZE);
196 /* Must be a multiple of 64K to support all platforms */
197 BUILD_BUG_ON(NUM_PT_SLOTS * XE_PAGE_SIZE % SZ_64K);
198 /* And one slot reserved for the 4KiB page table updates */
199 BUILD_BUG_ON(!(NUM_KERNEL_PDE & 1));
200
201 /* Need to be sure everything fits in the first PT, or create more */
202 xe_tile_assert(tile, m->batch_base_ofs + xe_bo_size(batch) < SZ_2M);
203
204 bo = xe_bo_create_pin_map(vm->xe, tile, vm,
205 num_entries * XE_PAGE_SIZE,
206 ttm_bo_type_kernel,
207 XE_BO_FLAG_VRAM_IF_DGFX(tile) |
208 XE_BO_FLAG_PAGETABLE, exec);
209 if (IS_ERR(bo))
210 return PTR_ERR(bo);
211
212 m->pt_bo = bo;
213 return 0;
214}
215
216static void xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m,
217 struct xe_vm *vm, u32 *ofs)
218{
219 struct xe_device *xe = tile_to_xe(tile);
220 u16 pat_index = xe->pat.idx[XE_CACHE_WB];
221 u8 id = tile->id;
222 u32 num_entries = NUM_PT_SLOTS, num_level = vm->pt_root[id]->level;
223#define VRAM_IDENTITY_MAP_COUNT 2
224 u32 num_setup = num_level + VRAM_IDENTITY_MAP_COUNT;
225#undef VRAM_IDENTITY_MAP_COUNT
226 u32 map_ofs, level, i;
227 struct xe_bo *bo = m->pt_bo, *batch = tile->mem.kernel_bb_pool->bo;
228 u64 entry, pt29_ofs;
229
230 /* PT30 & PT31 reserved for 2M identity map */
231 pt29_ofs = xe_bo_size(bo) - 3 * XE_PAGE_SIZE;
232 entry = vm->pt_ops->pde_encode_bo(bo, pt29_ofs);
233 xe_pt_write(xe, &vm->pt_root[id]->bo->vmap, 0, entry);
234
235 map_ofs = (num_entries - num_setup) * XE_PAGE_SIZE;
236
237 /* Map the entire BO in our level 0 pt */
238 for (i = 0, level = 0; i < num_entries; level++) {
239 entry = vm->pt_ops->pte_encode_bo(bo, i * XE_PAGE_SIZE,
240 pat_index, 0);
241
242 xe_map_wr(xe, &bo->vmap, map_ofs + level * 8, u64, entry);
243
244 if (vm->flags & XE_VM_FLAG_64K)
245 i += 16;
246 else
247 i += 1;
248 }
249
250 if (!IS_DGFX(xe)) {
251 /* Write out batch too */
252 m->batch_base_ofs = NUM_PT_SLOTS * XE_PAGE_SIZE;
253 for (i = 0; i < xe_bo_size(batch);
254 i += vm->flags & XE_VM_FLAG_64K ? XE_64K_PAGE_SIZE :
255 XE_PAGE_SIZE) {
256 entry = vm->pt_ops->pte_encode_bo(batch, i,
257 pat_index, 0);
258
259 xe_map_wr(xe, &bo->vmap, map_ofs + level * 8, u64,
260 entry);
261 level++;
262 }
263 if (xe->info.has_usm) {
264 xe_tile_assert(tile, xe_bo_size(batch) == SZ_1M);
265
266 batch = tile->primary_gt->usm.bb_pool->bo;
267 m->usm_batch_base_ofs = m->batch_base_ofs + SZ_1M;
268 xe_tile_assert(tile, xe_bo_size(batch) == SZ_512K);
269
270 for (i = 0; i < xe_bo_size(batch);
271 i += vm->flags & XE_VM_FLAG_64K ? XE_64K_PAGE_SIZE :
272 XE_PAGE_SIZE) {
273 entry = vm->pt_ops->pte_encode_bo(batch, i,
274 pat_index, 0);
275
276 xe_map_wr(xe, &bo->vmap, map_ofs + level * 8, u64,
277 entry);
278 level++;
279 }
280 }
281 } else {
282 u64 batch_addr = xe_bo_addr(batch, 0, XE_PAGE_SIZE);
283
284 m->batch_base_ofs = xe_migrate_vram_ofs(xe, batch_addr, false);
285
286 if (xe->info.has_usm) {
287 batch = tile->primary_gt->usm.bb_pool->bo;
288 batch_addr = xe_bo_addr(batch, 0, XE_PAGE_SIZE);
289 m->usm_batch_base_ofs = xe_migrate_vram_ofs(xe, batch_addr, false);
290 }
291 }
292
293 for (level = 1; level < num_level; level++) {
294 u32 flags = 0;
295
296 if (vm->flags & XE_VM_FLAG_64K && level == 1)
297 flags = XE_PDE_64K;
298
299 entry = vm->pt_ops->pde_encode_bo(bo, map_ofs + (u64)(level - 1) *
300 XE_PAGE_SIZE);
301 xe_map_wr(xe, &bo->vmap, map_ofs + XE_PAGE_SIZE * level, u64,
302 entry | flags);
303 }
304
305 /* Write PDE's that point to our BO. */
306 for (i = 0; i < map_ofs / XE_PAGE_SIZE; i++) {
307 entry = vm->pt_ops->pde_encode_bo(bo, (u64)i * XE_PAGE_SIZE);
308
309 xe_map_wr(xe, &bo->vmap, map_ofs + XE_PAGE_SIZE +
310 (i + 1) * 8, u64, entry);
311 }
312
313 /* Reserve 2M PDEs */
314 level = 1;
315 m->large_page_copy_ofs = NUM_PT_SLOTS << xe_pt_shift(level);
316 m->large_page_copy_pdes = map_ofs + XE_PAGE_SIZE * level +
317 NUM_PT_SLOTS * 8;
318
319 /* Set up a 1GiB NULL mapping at 255GiB offset. */
320 level = 2;
321 xe_map_wr(xe, &bo->vmap, map_ofs + XE_PAGE_SIZE * level + 255 * 8, u64,
322 vm->pt_ops->pte_encode_addr(xe, 0, pat_index, level, IS_DGFX(xe), 0)
323 | XE_PTE_NULL);
324 m->cleared_mem_ofs = (255ULL << xe_pt_shift(level));
325
326 /* Identity map the entire vram at 256GiB offset */
327 if (IS_DGFX(xe)) {
328 u64 pt30_ofs = xe_bo_size(bo) - 2 * XE_PAGE_SIZE;
329 resource_size_t actual_phy_size = xe_vram_region_actual_physical_size(xe->mem.vram);
330
331 xe_migrate_program_identity(xe, vm, bo, map_ofs, IDENTITY_OFFSET,
332 pat_index, pt30_ofs);
333 xe_assert(xe, actual_phy_size <= (MAX_NUM_PTE - IDENTITY_OFFSET) * SZ_1G);
334
335 /*
336 * Identity map the entire vram for compressed pat_index for xe2+
337 * if flat ccs is enabled.
338 */
339 if (GRAPHICS_VER(xe) >= 20 && xe_device_has_flat_ccs(xe)) {
340 u16 comp_pat_index = xe->pat.idx[XE_CACHE_NONE_COMPRESSION];
341 u64 vram_offset = IDENTITY_OFFSET +
342 DIV_ROUND_UP_ULL(actual_phy_size, SZ_1G);
343 u64 pt31_ofs = xe_bo_size(bo) - XE_PAGE_SIZE;
344
345 xe_assert(xe, actual_phy_size <= (MAX_NUM_PTE - IDENTITY_OFFSET -
346 IDENTITY_OFFSET / 2) * SZ_1G);
347 xe_migrate_program_identity(xe, vm, bo, map_ofs, vram_offset,
348 comp_pat_index, pt31_ofs);
349 }
350 }
351
352 if (ofs)
353 *ofs = map_ofs;
354}
355
356static void xe_migrate_suballoc_manager_init(struct xe_migrate *m, u32 map_ofs)
357{
358 /*
359 * Example layout created above, with root level = 3:
360 * [PT0...PT7]: kernel PT's for copy/clear; 64 or 4KiB PTE's
361 * [PT8]: Kernel PT for VM_BIND, 4 KiB PTE's
362 * [PT9...PT26]: Userspace PT's for VM_BIND, 4 KiB PTE's
363 * [PT27 = PDE 0] [PT28 = PDE 1] [PT29 = PDE 2] [PT30 & PT31 = 2M vram identity map]
364 *
365 * This makes the lowest part of the VM point to the pagetables.
366 * Hence the lowest 2M in the vm should point to itself, with a few writes
367 * and flushes, other parts of the VM can be used either for copying and
368 * clearing.
369 *
370 * For performance, the kernel reserves PDE's, so about 20 are left
371 * for async VM updates.
372 *
373 * To make it easier to work, each scratch PT is put in slot (1 + PT #)
374 * everywhere, this allows lockless updates to scratch pages by using
375 * the different addresses in VM.
376 */
377#define NUM_VMUSA_UNIT_PER_PAGE 32
378#define VM_SA_UPDATE_UNIT_SIZE (XE_PAGE_SIZE / NUM_VMUSA_UNIT_PER_PAGE)
379#define NUM_VMUSA_WRITES_PER_UNIT (VM_SA_UPDATE_UNIT_SIZE / sizeof(u64))
380 drm_suballoc_manager_init(&m->vm_update_sa,
381 (size_t)(map_ofs / XE_PAGE_SIZE - NUM_KERNEL_PDE) *
382 NUM_VMUSA_UNIT_PER_PAGE, 0);
383}
384
385/*
386 * Including the reserved copy engine is required to avoid deadlocks due to
387 * migrate jobs servicing the faults gets stuck behind the job that faulted.
388 */
389static u32 xe_migrate_usm_logical_mask(struct xe_gt *gt)
390{
391 u32 logical_mask = 0;
392 struct xe_hw_engine *hwe;
393 enum xe_hw_engine_id id;
394
395 for_each_hw_engine(hwe, gt, id) {
396 if (hwe->class != XE_ENGINE_CLASS_COPY)
397 continue;
398
399 if (xe_gt_is_usm_hwe(gt, hwe))
400 logical_mask |= BIT(hwe->logical_instance);
401 }
402
403 return logical_mask;
404}
405
406static bool xe_migrate_needs_ccs_emit(struct xe_device *xe)
407{
408 return xe_device_has_flat_ccs(xe) && !(GRAPHICS_VER(xe) >= 20 && IS_DGFX(xe));
409}
410
411/**
412 * xe_migrate_alloc - Allocate a migrate struct for a given &xe_tile
413 * @tile: &xe_tile
414 *
415 * Allocates a &xe_migrate for a given tile.
416 *
417 * Return: &xe_migrate on success, or NULL when out of memory.
418 */
419struct xe_migrate *xe_migrate_alloc(struct xe_tile *tile)
420{
421 struct xe_migrate *m = drmm_kzalloc(&tile_to_xe(tile)->drm, sizeof(*m), GFP_KERNEL);
422
423 if (m)
424 m->tile = tile;
425 return m;
426}
427
428static int xe_migrate_lock_prepare_vm(struct xe_tile *tile, struct xe_migrate *m, struct xe_vm *vm)
429{
430 struct xe_device *xe = tile_to_xe(tile);
431 struct xe_validation_ctx ctx;
432 struct drm_exec exec;
433 u32 map_ofs;
434 int err = 0;
435
436 xe_validation_guard(&ctx, &xe->val, &exec, (struct xe_val_flags) {}, err) {
437 err = xe_vm_drm_exec_lock(vm, &exec);
438 if (err)
439 return err;
440
441 drm_exec_retry_on_contention(&exec);
442
443 err = xe_migrate_pt_bo_alloc(tile, m, vm, &exec);
444 if (err)
445 return err;
446
447 xe_migrate_prepare_vm(tile, m, vm, &map_ofs);
448 xe_migrate_suballoc_manager_init(m, map_ofs);
449 drm_exec_retry_on_contention(&exec);
450 xe_validation_retry_on_oom(&ctx, &err);
451 }
452
453 return err;
454}
455
456/**
457 * xe_migrate_init() - Initialize a migrate context
458 * @m: The migration context
459 *
460 * Return: 0 if successful, negative error code on failure
461 */
462int xe_migrate_init(struct xe_migrate *m)
463{
464 struct xe_tile *tile = m->tile;
465 struct xe_gt *primary_gt = tile->primary_gt;
466 struct xe_device *xe = tile_to_xe(tile);
467 struct xe_vm *vm;
468 int err;
469
470 /* Special layout, prepared below.. */
471 vm = xe_vm_create(xe, XE_VM_FLAG_MIGRATION |
472 XE_VM_FLAG_SET_TILE_ID(tile), NULL);
473 if (IS_ERR(vm))
474 return PTR_ERR(vm);
475
476 err = xe_migrate_lock_prepare_vm(tile, m, vm);
477 if (err)
478 goto err_out;
479
480 if (xe->info.has_usm) {
481 struct xe_hw_engine *hwe = xe_gt_hw_engine(primary_gt,
482 XE_ENGINE_CLASS_COPY,
483 primary_gt->usm.reserved_bcs_instance,
484 false);
485 u32 logical_mask = xe_migrate_usm_logical_mask(primary_gt);
486
487 if (!hwe || !logical_mask) {
488 err = -EINVAL;
489 goto err_out;
490 }
491
492 /*
493 * XXX: Currently only reserving 1 (likely slow) BCS instance on
494 * PVC, may want to revisit if performance is needed.
495 */
496 m->q = xe_exec_queue_create(xe, vm, logical_mask, 1, hwe,
497 EXEC_QUEUE_FLAG_KERNEL |
498 EXEC_QUEUE_FLAG_PERMANENT |
499 EXEC_QUEUE_FLAG_HIGH_PRIORITY |
500 EXEC_QUEUE_FLAG_MIGRATE |
501 EXEC_QUEUE_FLAG_LOW_LATENCY, 0);
502 } else {
503 m->q = xe_exec_queue_create_class(xe, primary_gt, vm,
504 XE_ENGINE_CLASS_COPY,
505 EXEC_QUEUE_FLAG_KERNEL |
506 EXEC_QUEUE_FLAG_PERMANENT |
507 EXEC_QUEUE_FLAG_MIGRATE, 0);
508 }
509 if (IS_ERR(m->q)) {
510 err = PTR_ERR(m->q);
511 goto err_out;
512 }
513
514 mutex_init(&m->job_mutex);
515 fs_reclaim_acquire(GFP_KERNEL);
516 might_lock(&m->job_mutex);
517 fs_reclaim_release(GFP_KERNEL);
518
519 err = devm_add_action_or_reset(xe->drm.dev, xe_migrate_fini, m);
520 if (err)
521 return err;
522
523 if (IS_DGFX(xe)) {
524 if (xe_migrate_needs_ccs_emit(xe))
525 /* min chunk size corresponds to 4K of CCS Metadata */
526 m->min_chunk_size = SZ_4K * SZ_64K /
527 xe_device_ccs_bytes(xe, SZ_64K);
528 else
529 /* Somewhat arbitrary to avoid a huge amount of blits */
530 m->min_chunk_size = SZ_64K;
531 m->min_chunk_size = roundup_pow_of_two(m->min_chunk_size);
532 drm_dbg(&xe->drm, "Migrate min chunk size is 0x%08llx\n",
533 (unsigned long long)m->min_chunk_size);
534 }
535
536 return err;
537
538err_out:
539 xe_vm_close_and_put(vm);
540 return err;
541
542}
543
544static u64 max_mem_transfer_per_pass(struct xe_device *xe)
545{
546 if (!IS_DGFX(xe) && xe_device_has_flat_ccs(xe))
547 return MAX_CCS_LIMITED_TRANSFER;
548
549 return MAX_PREEMPTDISABLE_TRANSFER;
550}
551
552static u64 xe_migrate_res_sizes(struct xe_migrate *m, struct xe_res_cursor *cur)
553{
554 struct xe_device *xe = tile_to_xe(m->tile);
555 u64 size = min_t(u64, max_mem_transfer_per_pass(xe), cur->remaining);
556
557 if (mem_type_is_vram(cur->mem_type)) {
558 /*
559 * VRAM we want to blit in chunks with sizes aligned to
560 * min_chunk_size in order for the offset to CCS metadata to be
561 * page-aligned. If it's the last chunk it may be smaller.
562 *
563 * Another constraint is that we need to limit the blit to
564 * the VRAM block size, unless size is smaller than
565 * min_chunk_size.
566 */
567 u64 chunk = max_t(u64, cur->size, m->min_chunk_size);
568
569 size = min_t(u64, size, chunk);
570 if (size > m->min_chunk_size)
571 size = round_down(size, m->min_chunk_size);
572 }
573
574 return size;
575}
576
577static bool xe_migrate_allow_identity(u64 size, const struct xe_res_cursor *cur)
578{
579 /* If the chunk is not fragmented, allow identity map. */
580 return cur->size >= size;
581}
582
583#define PTE_UPDATE_FLAG_IS_VRAM BIT(0)
584#define PTE_UPDATE_FLAG_IS_COMP_PTE BIT(1)
585
586static u32 pte_update_size(struct xe_migrate *m,
587 u32 flags,
588 struct ttm_resource *res,
589 struct xe_res_cursor *cur,
590 u64 *L0, u64 *L0_ofs, u32 *L0_pt,
591 u32 cmd_size, u32 pt_ofs, u32 avail_pts)
592{
593 u32 cmds = 0;
594 bool is_vram = PTE_UPDATE_FLAG_IS_VRAM & flags;
595 bool is_comp_pte = PTE_UPDATE_FLAG_IS_COMP_PTE & flags;
596
597 *L0_pt = pt_ofs;
598 if (is_vram && xe_migrate_allow_identity(*L0, cur)) {
599 /* Offset into identity map. */
600 *L0_ofs = xe_migrate_vram_ofs(tile_to_xe(m->tile),
601 cur->start + vram_region_gpu_offset(res),
602 is_comp_pte);
603 cmds += cmd_size;
604 } else {
605 /* Clip L0 to available size */
606 u64 size = min(*L0, (u64)avail_pts * SZ_2M);
607 u32 num_4k_pages = (size + XE_PAGE_SIZE - 1) >> XE_PTE_SHIFT;
608
609 *L0 = size;
610 *L0_ofs = xe_migrate_vm_addr(pt_ofs, 0);
611
612 /* MI_STORE_DATA_IMM */
613 cmds += 3 * DIV_ROUND_UP(num_4k_pages, MAX_PTE_PER_SDI);
614
615 /* PDE qwords */
616 cmds += num_4k_pages * 2;
617
618 /* Each chunk has a single blit command */
619 cmds += cmd_size;
620 }
621
622 return cmds;
623}
624
625static void emit_pte(struct xe_migrate *m,
626 struct xe_bb *bb, u32 at_pt,
627 bool is_vram, bool is_comp_pte,
628 struct xe_res_cursor *cur,
629 u32 size, struct ttm_resource *res)
630{
631 struct xe_device *xe = tile_to_xe(m->tile);
632 struct xe_vm *vm = m->q->vm;
633 u16 pat_index;
634 u32 ptes;
635 u64 ofs = (u64)at_pt * XE_PAGE_SIZE;
636 u64 cur_ofs;
637
638 /* Indirect access needs compression enabled uncached PAT index */
639 if (GRAPHICS_VERx100(xe) >= 2000)
640 pat_index = is_comp_pte ? xe->pat.idx[XE_CACHE_NONE_COMPRESSION] :
641 xe->pat.idx[XE_CACHE_WB];
642 else
643 pat_index = xe->pat.idx[XE_CACHE_WB];
644
645 ptes = DIV_ROUND_UP(size, XE_PAGE_SIZE);
646
647 while (ptes) {
648 u32 chunk = min(MAX_PTE_PER_SDI, ptes);
649
650 bb->cs[bb->len++] = MI_STORE_DATA_IMM | MI_SDI_NUM_QW(chunk);
651 bb->cs[bb->len++] = ofs;
652 bb->cs[bb->len++] = 0;
653
654 cur_ofs = ofs;
655 ofs += chunk * 8;
656 ptes -= chunk;
657
658 while (chunk--) {
659 u64 addr, flags = 0;
660 bool devmem = false;
661
662 addr = xe_res_dma(cur) & PAGE_MASK;
663 if (is_vram) {
664 if (vm->flags & XE_VM_FLAG_64K) {
665 u64 va = cur_ofs * XE_PAGE_SIZE / 8;
666
667 xe_assert(xe, (va & (SZ_64K - 1)) ==
668 (addr & (SZ_64K - 1)));
669
670 flags |= XE_PTE_PS64;
671 }
672
673 addr += vram_region_gpu_offset(res);
674 devmem = true;
675 }
676
677 addr = vm->pt_ops->pte_encode_addr(m->tile->xe,
678 addr, pat_index,
679 0, devmem, flags);
680 bb->cs[bb->len++] = lower_32_bits(addr);
681 bb->cs[bb->len++] = upper_32_bits(addr);
682
683 xe_res_next(cur, min_t(u32, size, PAGE_SIZE));
684 cur_ofs += 8;
685 }
686 }
687}
688
689#define EMIT_COPY_CCS_DW 5
690static void emit_copy_ccs(struct xe_gt *gt, struct xe_bb *bb,
691 u64 dst_ofs, bool dst_is_indirect,
692 u64 src_ofs, bool src_is_indirect,
693 u32 size)
694{
695 struct xe_device *xe = gt_to_xe(gt);
696 u32 *cs = bb->cs + bb->len;
697 u32 num_ccs_blks;
698 u32 num_pages;
699 u32 ccs_copy_size;
700 u32 mocs;
701
702 if (GRAPHICS_VERx100(xe) >= 2000) {
703 num_pages = DIV_ROUND_UP(size, XE_PAGE_SIZE);
704 xe_gt_assert(gt, FIELD_FIT(XE2_CCS_SIZE_MASK, num_pages - 1));
705
706 ccs_copy_size = REG_FIELD_PREP(XE2_CCS_SIZE_MASK, num_pages - 1);
707 mocs = FIELD_PREP(XE2_XY_CTRL_SURF_MOCS_INDEX_MASK, gt->mocs.uc_index);
708
709 } else {
710 num_ccs_blks = DIV_ROUND_UP(xe_device_ccs_bytes(gt_to_xe(gt), size),
711 NUM_CCS_BYTES_PER_BLOCK);
712 xe_gt_assert(gt, FIELD_FIT(CCS_SIZE_MASK, num_ccs_blks - 1));
713
714 ccs_copy_size = REG_FIELD_PREP(CCS_SIZE_MASK, num_ccs_blks - 1);
715 mocs = FIELD_PREP(XY_CTRL_SURF_MOCS_MASK, gt->mocs.uc_index);
716 }
717
718 *cs++ = XY_CTRL_SURF_COPY_BLT |
719 (src_is_indirect ? 0x0 : 0x1) << SRC_ACCESS_TYPE_SHIFT |
720 (dst_is_indirect ? 0x0 : 0x1) << DST_ACCESS_TYPE_SHIFT |
721 ccs_copy_size;
722 *cs++ = lower_32_bits(src_ofs);
723 *cs++ = upper_32_bits(src_ofs) | mocs;
724 *cs++ = lower_32_bits(dst_ofs);
725 *cs++ = upper_32_bits(dst_ofs) | mocs;
726
727 bb->len = cs - bb->cs;
728}
729
730#define EMIT_COPY_DW 10
731static void emit_xy_fast_copy(struct xe_gt *gt, struct xe_bb *bb, u64 src_ofs,
732 u64 dst_ofs, unsigned int size,
733 unsigned int pitch)
734{
735 struct xe_device *xe = gt_to_xe(gt);
736 u32 mocs = 0;
737 u32 tile_y = 0;
738
739 xe_gt_assert(gt, !(pitch & 3));
740 xe_gt_assert(gt, size / pitch <= S16_MAX);
741 xe_gt_assert(gt, pitch / 4 <= S16_MAX);
742 xe_gt_assert(gt, pitch <= U16_MAX);
743
744 if (GRAPHICS_VER(xe) >= 20)
745 mocs = FIELD_PREP(XE2_XY_FAST_COPY_BLT_MOCS_INDEX_MASK, gt->mocs.uc_index);
746
747 if (GRAPHICS_VERx100(xe) >= 1250)
748 tile_y = XY_FAST_COPY_BLT_D1_SRC_TILE4 | XY_FAST_COPY_BLT_D1_DST_TILE4;
749
750 bb->cs[bb->len++] = XY_FAST_COPY_BLT_CMD | (10 - 2);
751 bb->cs[bb->len++] = XY_FAST_COPY_BLT_DEPTH_32 | pitch | tile_y | mocs;
752 bb->cs[bb->len++] = 0;
753 bb->cs[bb->len++] = (size / pitch) << 16 | pitch / 4;
754 bb->cs[bb->len++] = lower_32_bits(dst_ofs);
755 bb->cs[bb->len++] = upper_32_bits(dst_ofs);
756 bb->cs[bb->len++] = 0;
757 bb->cs[bb->len++] = pitch | mocs;
758 bb->cs[bb->len++] = lower_32_bits(src_ofs);
759 bb->cs[bb->len++] = upper_32_bits(src_ofs);
760}
761
762#define PAGE_COPY_MODE_PS SZ_256 /* hw uses 256 bytes as the page-size */
763static void emit_mem_copy(struct xe_gt *gt, struct xe_bb *bb, u64 src_ofs,
764 u64 dst_ofs, unsigned int size, unsigned int pitch)
765{
766 u32 mode, copy_type, width;
767
768 xe_gt_assert(gt, IS_ALIGNED(size, pitch));
769 xe_gt_assert(gt, pitch <= U16_MAX);
770 xe_gt_assert(gt, pitch);
771 xe_gt_assert(gt, size);
772
773 if (IS_ALIGNED(size, PAGE_COPY_MODE_PS) &&
774 IS_ALIGNED(lower_32_bits(src_ofs), PAGE_COPY_MODE_PS) &&
775 IS_ALIGNED(lower_32_bits(dst_ofs), PAGE_COPY_MODE_PS)) {
776 mode = MEM_COPY_PAGE_COPY_MODE;
777 copy_type = 0; /* linear copy */
778 width = size / PAGE_COPY_MODE_PS;
779 } else if (pitch > 1) {
780 xe_gt_assert(gt, size / pitch <= U16_MAX);
781 mode = 0; /* BYTE_COPY */
782 copy_type = MEM_COPY_MATRIX_COPY;
783 width = pitch;
784 } else {
785 mode = 0; /* BYTE_COPY */
786 copy_type = 0; /* linear copy */
787 width = size;
788 }
789
790 xe_gt_assert(gt, width <= U16_MAX);
791
792 bb->cs[bb->len++] = MEM_COPY_CMD | mode | copy_type;
793 bb->cs[bb->len++] = width - 1;
794 bb->cs[bb->len++] = size / pitch - 1; /* ignored by hw for page-copy/linear above */
795 bb->cs[bb->len++] = pitch - 1;
796 bb->cs[bb->len++] = pitch - 1;
797 bb->cs[bb->len++] = lower_32_bits(src_ofs);
798 bb->cs[bb->len++] = upper_32_bits(src_ofs);
799 bb->cs[bb->len++] = lower_32_bits(dst_ofs);
800 bb->cs[bb->len++] = upper_32_bits(dst_ofs);
801 bb->cs[bb->len++] = FIELD_PREP(MEM_COPY_SRC_MOCS_INDEX_MASK, gt->mocs.uc_index) |
802 FIELD_PREP(MEM_COPY_DST_MOCS_INDEX_MASK, gt->mocs.uc_index);
803}
804
805static void emit_copy(struct xe_gt *gt, struct xe_bb *bb,
806 u64 src_ofs, u64 dst_ofs, unsigned int size,
807 unsigned int pitch)
808{
809 struct xe_device *xe = gt_to_xe(gt);
810
811 if (xe->info.has_mem_copy_instr)
812 emit_mem_copy(gt, bb, src_ofs, dst_ofs, size, pitch);
813 else
814 emit_xy_fast_copy(gt, bb, src_ofs, dst_ofs, size, pitch);
815}
816
817static u64 xe_migrate_batch_base(struct xe_migrate *m, bool usm)
818{
819 return usm ? m->usm_batch_base_ofs : m->batch_base_ofs;
820}
821
822static u32 xe_migrate_ccs_copy(struct xe_migrate *m,
823 struct xe_bb *bb,
824 u64 src_ofs, bool src_is_indirect,
825 u64 dst_ofs, bool dst_is_indirect, u32 dst_size,
826 u64 ccs_ofs, bool copy_ccs)
827{
828 struct xe_gt *gt = m->tile->primary_gt;
829 u32 flush_flags = 0;
830
831 if (!copy_ccs && dst_is_indirect) {
832 /*
833 * If the src is already in vram, then it should already
834 * have been cleared by us, or has been populated by the
835 * user. Make sure we copy the CCS aux state as-is.
836 *
837 * Otherwise if the bo doesn't have any CCS metadata attached,
838 * we still need to clear it for security reasons.
839 */
840 u64 ccs_src_ofs = src_is_indirect ? src_ofs : m->cleared_mem_ofs;
841
842 emit_copy_ccs(gt, bb,
843 dst_ofs, true,
844 ccs_src_ofs, src_is_indirect, dst_size);
845
846 flush_flags = MI_FLUSH_DW_CCS;
847 } else if (copy_ccs) {
848 if (!src_is_indirect)
849 src_ofs = ccs_ofs;
850 else if (!dst_is_indirect)
851 dst_ofs = ccs_ofs;
852
853 xe_gt_assert(gt, src_is_indirect || dst_is_indirect);
854
855 emit_copy_ccs(gt, bb, dst_ofs, dst_is_indirect, src_ofs,
856 src_is_indirect, dst_size);
857 if (dst_is_indirect)
858 flush_flags = MI_FLUSH_DW_CCS;
859 }
860
861 return flush_flags;
862}
863
864static struct dma_fence *__xe_migrate_copy(struct xe_migrate *m,
865 struct xe_bo *src_bo,
866 struct xe_bo *dst_bo,
867 struct ttm_resource *src,
868 struct ttm_resource *dst,
869 bool copy_only_ccs,
870 bool is_vram_resolve)
871{
872 struct xe_gt *gt = m->tile->primary_gt;
873 struct xe_device *xe = gt_to_xe(gt);
874 struct dma_fence *fence = NULL;
875 u64 size = xe_bo_size(src_bo);
876 struct xe_res_cursor src_it, dst_it, ccs_it;
877 u64 src_L0_ofs, dst_L0_ofs;
878 u32 src_L0_pt, dst_L0_pt;
879 u64 src_L0, dst_L0;
880 int pass = 0;
881 int err;
882 bool src_is_pltt = src->mem_type == XE_PL_TT;
883 bool dst_is_pltt = dst->mem_type == XE_PL_TT;
884 bool src_is_vram = mem_type_is_vram(src->mem_type);
885 bool dst_is_vram = mem_type_is_vram(dst->mem_type);
886 bool type_device = src_bo->ttm.type == ttm_bo_type_device;
887 bool needs_ccs_emit = type_device && xe_migrate_needs_ccs_emit(xe);
888 bool copy_ccs = xe_device_has_flat_ccs(xe) &&
889 xe_bo_needs_ccs_pages(src_bo) && xe_bo_needs_ccs_pages(dst_bo);
890 bool copy_system_ccs = copy_ccs && (!src_is_vram || !dst_is_vram);
891
892 /*
893 * For decompression operation, always use the compression PAT index.
894 * Otherwise, only use the compression PAT index for device memory
895 * when copying from VRAM to system memory.
896 */
897 bool use_comp_pat = is_vram_resolve || (type_device &&
898 xe_device_has_flat_ccs(xe) &&
899 GRAPHICS_VER(xe) >= 20 && src_is_vram && !dst_is_vram);
900
901 /* Copying CCS between two different BOs is not supported yet. */
902 if (XE_WARN_ON(copy_ccs && src_bo != dst_bo))
903 return ERR_PTR(-EINVAL);
904
905 if (src_bo != dst_bo && XE_WARN_ON(xe_bo_size(src_bo) != xe_bo_size(dst_bo)))
906 return ERR_PTR(-EINVAL);
907
908 if (!src_is_vram)
909 xe_res_first_sg(xe_bo_sg(src_bo), 0, size, &src_it);
910 else
911 xe_res_first(src, 0, size, &src_it);
912 if (!dst_is_vram)
913 xe_res_first_sg(xe_bo_sg(dst_bo), 0, size, &dst_it);
914 else
915 xe_res_first(dst, 0, size, &dst_it);
916
917 if (copy_system_ccs)
918 xe_res_first_sg(xe_bo_sg(src_bo), xe_bo_ccs_pages_start(src_bo),
919 PAGE_ALIGN(xe_device_ccs_bytes(xe, size)),
920 &ccs_it);
921
922 while (size) {
923 u32 batch_size = 1; /* MI_BATCH_BUFFER_END */
924 struct xe_sched_job *job;
925 struct xe_bb *bb;
926 u32 flush_flags = 0;
927 u32 update_idx;
928 u64 ccs_ofs, ccs_size;
929 u32 ccs_pt;
930 u32 pte_flags;
931
932 bool usm = xe->info.has_usm;
933 u32 avail_pts = max_mem_transfer_per_pass(xe) / LEVEL0_PAGE_TABLE_ENCODE_SIZE;
934
935 src_L0 = xe_migrate_res_sizes(m, &src_it);
936 dst_L0 = xe_migrate_res_sizes(m, &dst_it);
937
938 drm_dbg(&xe->drm, "Pass %u, sizes: %llu & %llu\n",
939 pass++, src_L0, dst_L0);
940
941 src_L0 = min(src_L0, dst_L0);
942
943 pte_flags = src_is_vram ? PTE_UPDATE_FLAG_IS_VRAM : 0;
944 pte_flags |= use_comp_pat ? PTE_UPDATE_FLAG_IS_COMP_PTE : 0;
945 batch_size += pte_update_size(m, pte_flags, src, &src_it, &src_L0,
946 &src_L0_ofs, &src_L0_pt, 0, 0,
947 avail_pts);
948 if (copy_only_ccs) {
949 dst_L0_ofs = src_L0_ofs;
950 } else {
951 pte_flags = dst_is_vram ? PTE_UPDATE_FLAG_IS_VRAM : 0;
952 batch_size += pte_update_size(m, pte_flags, dst,
953 &dst_it, &src_L0,
954 &dst_L0_ofs, &dst_L0_pt,
955 0, avail_pts, avail_pts);
956 }
957
958 if (copy_system_ccs) {
959 xe_assert(xe, type_device);
960 ccs_size = xe_device_ccs_bytes(xe, src_L0);
961 batch_size += pte_update_size(m, 0, NULL, &ccs_it, &ccs_size,
962 &ccs_ofs, &ccs_pt, 0,
963 2 * avail_pts,
964 avail_pts);
965 xe_assert(xe, IS_ALIGNED(ccs_it.start, PAGE_SIZE));
966 }
967
968 /* Add copy commands size here */
969 batch_size += ((copy_only_ccs) ? 0 : EMIT_COPY_DW) +
970 ((needs_ccs_emit ? EMIT_COPY_CCS_DW : 0));
971
972 bb = xe_bb_new(gt, batch_size, usm);
973 if (IS_ERR(bb)) {
974 err = PTR_ERR(bb);
975 goto err_sync;
976 }
977
978 if (src_is_vram && xe_migrate_allow_identity(src_L0, &src_it))
979 xe_res_next(&src_it, src_L0);
980 else
981 emit_pte(m, bb, src_L0_pt, src_is_vram, copy_system_ccs || use_comp_pat,
982 &src_it, src_L0, src);
983
984 if (dst_is_vram && xe_migrate_allow_identity(src_L0, &dst_it))
985 xe_res_next(&dst_it, src_L0);
986 else if (!copy_only_ccs)
987 emit_pte(m, bb, dst_L0_pt, dst_is_vram, copy_system_ccs,
988 &dst_it, src_L0, dst);
989
990 if (copy_system_ccs)
991 emit_pte(m, bb, ccs_pt, false, false, &ccs_it, ccs_size, src);
992
993 bb->cs[bb->len++] = MI_BATCH_BUFFER_END;
994 update_idx = bb->len;
995
996 if (!copy_only_ccs)
997 emit_copy(gt, bb, src_L0_ofs, dst_L0_ofs, src_L0, XE_PAGE_SIZE);
998
999 if (needs_ccs_emit)
1000 flush_flags = xe_migrate_ccs_copy(m, bb, src_L0_ofs,
1001 IS_DGFX(xe) ? src_is_vram : src_is_pltt,
1002 dst_L0_ofs,
1003 IS_DGFX(xe) ? dst_is_vram : dst_is_pltt,
1004 src_L0, ccs_ofs, copy_ccs);
1005
1006 job = xe_bb_create_migration_job(m->q, bb,
1007 xe_migrate_batch_base(m, usm),
1008 update_idx);
1009 if (IS_ERR(job)) {
1010 err = PTR_ERR(job);
1011 goto err;
1012 }
1013
1014 xe_sched_job_add_migrate_flush(job, flush_flags | MI_INVALIDATE_TLB);
1015 if (!fence) {
1016 err = xe_sched_job_add_deps(job, src_bo->ttm.base.resv,
1017 DMA_RESV_USAGE_BOOKKEEP);
1018 if (!err && src_bo->ttm.base.resv != dst_bo->ttm.base.resv)
1019 err = xe_sched_job_add_deps(job, dst_bo->ttm.base.resv,
1020 DMA_RESV_USAGE_BOOKKEEP);
1021 if (err)
1022 goto err_job;
1023 }
1024
1025 mutex_lock(&m->job_mutex);
1026 xe_sched_job_arm(job);
1027 dma_fence_put(fence);
1028 fence = dma_fence_get(&job->drm.s_fence->finished);
1029 xe_sched_job_push(job);
1030
1031 dma_fence_put(m->fence);
1032 m->fence = dma_fence_get(fence);
1033
1034 mutex_unlock(&m->job_mutex);
1035
1036 xe_bb_free(bb, fence);
1037 size -= src_L0;
1038 continue;
1039
1040err_job:
1041 xe_sched_job_put(job);
1042err:
1043 xe_bb_free(bb, NULL);
1044
1045err_sync:
1046 /* Sync partial copy if any. FIXME: under job_mutex? */
1047 if (fence) {
1048 dma_fence_wait(fence, false);
1049 dma_fence_put(fence);
1050 }
1051
1052 return ERR_PTR(err);
1053 }
1054
1055 return fence;
1056}
1057
1058/**
1059 * xe_migrate_copy() - Copy content of TTM resources.
1060 * @m: The migration context.
1061 * @src_bo: The buffer object @src is currently bound to.
1062 * @dst_bo: If copying between resources created for the same bo, set this to
1063 * the same value as @src_bo. If copying between buffer objects, set it to
1064 * the buffer object @dst is currently bound to.
1065 * @src: The source TTM resource.
1066 * @dst: The dst TTM resource.
1067 * @copy_only_ccs: If true copy only CCS metadata
1068 *
1069 * Copies the contents of @src to @dst: On flat CCS devices,
1070 * the CCS metadata is copied as well if needed, or if not present,
1071 * the CCS metadata of @dst is cleared for security reasons.
1072 *
1073 * Return: Pointer to a dma_fence representing the last copy batch, or
1074 * an error pointer on failure. If there is a failure, any copy operation
1075 * started by the function call has been synced.
1076 */
1077struct dma_fence *xe_migrate_copy(struct xe_migrate *m,
1078 struct xe_bo *src_bo,
1079 struct xe_bo *dst_bo,
1080 struct ttm_resource *src,
1081 struct ttm_resource *dst,
1082 bool copy_only_ccs)
1083{
1084 return __xe_migrate_copy(m, src_bo, dst_bo, src, dst, copy_only_ccs, false);
1085}
1086
1087/**
1088 * xe_migrate_resolve() - Resolve and decompress a buffer object if required.
1089 * @m: The migrate context
1090 * @bo: The buffer object to resolve
1091 * @res: The reservation object
1092 *
1093 * Wrapper around __xe_migrate_copy() with is_vram_resolve set to true
1094 * to trigger decompression if needed.
1095 *
1096 * Return: A dma_fence that signals on completion, or an ERR_PTR on failure.
1097 */
1098struct dma_fence *xe_migrate_resolve(struct xe_migrate *m,
1099 struct xe_bo *bo,
1100 struct ttm_resource *res)
1101{
1102 return __xe_migrate_copy(m, bo, bo, res, res, false, true);
1103}
1104
1105/**
1106 * xe_migrate_lrc() - Get the LRC from migrate context.
1107 * @migrate: Migrate context.
1108 *
1109 * Return: Pointer to LRC on success, error on failure
1110 */
1111struct xe_lrc *xe_migrate_lrc(struct xe_migrate *migrate)
1112{
1113 return migrate->q->lrc[0];
1114}
1115
1116static u64 migrate_vm_ppgtt_addr_tlb_inval(void)
1117{
1118 /*
1119 * The migrate VM is self-referential so it can modify its own PTEs (see
1120 * pte_update_size() or emit_pte() functions). We reserve NUM_KERNEL_PDE
1121 * entries for kernel operations (copies, clears, CCS migrate), and
1122 * suballocate the rest to user operations (binds/unbinds). With
1123 * NUM_KERNEL_PDE = 15, NUM_KERNEL_PDE - 1 is already used for PTE updates,
1124 * so assign NUM_KERNEL_PDE - 2 for TLB invalidation.
1125 */
1126 return (NUM_KERNEL_PDE - 2) * XE_PAGE_SIZE;
1127}
1128
1129static int emit_flush_invalidate(u32 *dw, int i, u32 flags)
1130{
1131 u64 addr = migrate_vm_ppgtt_addr_tlb_inval();
1132
1133 dw[i++] = MI_FLUSH_DW | MI_INVALIDATE_TLB | MI_FLUSH_DW_OP_STOREDW |
1134 MI_FLUSH_IMM_DW | flags;
1135 dw[i++] = lower_32_bits(addr);
1136 dw[i++] = upper_32_bits(addr);
1137 dw[i++] = MI_NOOP;
1138 dw[i++] = MI_NOOP;
1139
1140 return i;
1141}
1142
1143/**
1144 * xe_migrate_ccs_rw_copy() - Copy content of TTM resources.
1145 * @tile: Tile whose migration context to be used.
1146 * @q : Execution to be used along with migration context.
1147 * @src_bo: The buffer object @src is currently bound to.
1148 * @read_write : Creates BB commands for CCS read/write.
1149 *
1150 * Creates batch buffer instructions to copy CCS metadata from CCS pool to
1151 * memory and vice versa.
1152 *
1153 * This function should only be called for IGPU.
1154 *
1155 * Return: 0 if successful, negative error code on failure.
1156 */
1157int xe_migrate_ccs_rw_copy(struct xe_tile *tile, struct xe_exec_queue *q,
1158 struct xe_bo *src_bo,
1159 enum xe_sriov_vf_ccs_rw_ctxs read_write)
1160
1161{
1162 bool src_is_pltt = read_write == XE_SRIOV_VF_CCS_READ_CTX;
1163 bool dst_is_pltt = read_write == XE_SRIOV_VF_CCS_WRITE_CTX;
1164 struct ttm_resource *src = src_bo->ttm.resource;
1165 struct xe_migrate *m = tile->migrate;
1166 struct xe_gt *gt = tile->primary_gt;
1167 u32 batch_size, batch_size_allocated;
1168 struct xe_device *xe = gt_to_xe(gt);
1169 struct xe_res_cursor src_it, ccs_it;
1170 struct xe_mem_pool *bb_pool;
1171 struct xe_sriov_vf_ccs_ctx *ctx;
1172 u64 size = xe_bo_size(src_bo);
1173 struct xe_mem_pool_node *bb;
1174 u64 src_L0, src_L0_ofs;
1175 struct xe_bb xe_bb_tmp;
1176 u32 src_L0_pt;
1177 int err;
1178
1179 ctx = &xe->sriov.vf.ccs.contexts[read_write];
1180
1181 xe_res_first_sg(xe_bo_sg(src_bo), 0, size, &src_it);
1182
1183 xe_res_first_sg(xe_bo_sg(src_bo), xe_bo_ccs_pages_start(src_bo),
1184 PAGE_ALIGN(xe_device_ccs_bytes(xe, size)),
1185 &ccs_it);
1186
1187 /* Calculate Batch buffer size */
1188 batch_size = 0;
1189 while (size) {
1190 batch_size += 10; /* Flush + ggtt addr + 2 NOP */
1191 u64 ccs_ofs, ccs_size;
1192 u32 ccs_pt;
1193
1194 u32 avail_pts = max_mem_transfer_per_pass(xe) / LEVEL0_PAGE_TABLE_ENCODE_SIZE;
1195
1196 src_L0 = min_t(u64, max_mem_transfer_per_pass(xe), size);
1197
1198 batch_size += pte_update_size(m, false, src, &src_it, &src_L0,
1199 &src_L0_ofs, &src_L0_pt, 0, 0,
1200 avail_pts);
1201
1202 ccs_size = xe_device_ccs_bytes(xe, src_L0);
1203 batch_size += pte_update_size(m, 0, NULL, &ccs_it, &ccs_size, &ccs_ofs,
1204 &ccs_pt, 0, avail_pts, avail_pts);
1205 xe_assert(xe, IS_ALIGNED(ccs_it.start, PAGE_SIZE));
1206
1207 /* Add copy commands size here */
1208 batch_size += EMIT_COPY_CCS_DW;
1209
1210 size -= src_L0;
1211 }
1212
1213 bb = xe_mem_pool_alloc_node();
1214 if (IS_ERR(bb))
1215 return PTR_ERR(bb);
1216
1217 bb_pool = ctx->mem.ccs_bb_pool;
1218 scoped_guard(mutex, xe_mem_pool_bo_swap_guard(bb_pool)) {
1219 xe_mem_pool_swap_shadow_locked(bb_pool);
1220
1221 err = xe_mem_pool_insert_node(bb_pool, bb, batch_size * sizeof(u32));
1222 if (err) {
1223 xe_gt_err(gt, "BB allocation failed.\n");
1224 kfree(bb);
1225 return err;
1226 }
1227
1228 batch_size_allocated = batch_size;
1229 size = xe_bo_size(src_bo);
1230 batch_size = 0;
1231
1232 xe_bb_tmp = (struct xe_bb){ .cs = xe_mem_pool_node_cpu_addr(bb), .len = 0 };
1233 /*
1234 * Emit PTE and copy commands here.
1235 * The CCS copy command can only support limited size. If the size to be
1236 * copied is more than the limit, divide copy into chunks. So, calculate
1237 * sizes here again before copy command is emitted.
1238 */
1239
1240 while (size) {
1241 batch_size += 10; /* Flush + ggtt addr + 2 NOP */
1242 u32 flush_flags = 0;
1243 u64 ccs_ofs, ccs_size;
1244 u32 ccs_pt;
1245
1246 u32 avail_pts = max_mem_transfer_per_pass(xe) /
1247 LEVEL0_PAGE_TABLE_ENCODE_SIZE;
1248
1249 src_L0 = xe_migrate_res_sizes(m, &src_it);
1250
1251 batch_size += pte_update_size(m, false, src, &src_it, &src_L0,
1252 &src_L0_ofs, &src_L0_pt, 0, 0,
1253 avail_pts);
1254
1255 ccs_size = xe_device_ccs_bytes(xe, src_L0);
1256 batch_size += pte_update_size(m, 0, NULL, &ccs_it, &ccs_size, &ccs_ofs,
1257 &ccs_pt, 0, avail_pts, avail_pts);
1258 xe_assert(xe, IS_ALIGNED(ccs_it.start, PAGE_SIZE));
1259 batch_size += EMIT_COPY_CCS_DW;
1260
1261 emit_pte(m, &xe_bb_tmp, src_L0_pt, false, true, &src_it, src_L0, src);
1262
1263 emit_pte(m, &xe_bb_tmp, ccs_pt, false, false, &ccs_it, ccs_size, src);
1264
1265 xe_bb_tmp.len = emit_flush_invalidate(xe_bb_tmp.cs, xe_bb_tmp.len,
1266 flush_flags);
1267 flush_flags = xe_migrate_ccs_copy(m, &xe_bb_tmp, src_L0_ofs, src_is_pltt,
1268 src_L0_ofs, dst_is_pltt,
1269 src_L0, ccs_ofs, true);
1270 xe_bb_tmp.len = emit_flush_invalidate(xe_bb_tmp.cs, xe_bb_tmp.len,
1271 flush_flags);
1272
1273 size -= src_L0;
1274 }
1275
1276 xe_assert(xe, (batch_size_allocated == xe_bb_tmp.len));
1277 xe_assert(xe, bb->sa_node.size == xe_bb_tmp.len * sizeof(u32));
1278 src_bo->bb_ccs[read_write] = bb;
1279
1280 xe_sriov_vf_ccs_rw_update_bb_addr(ctx);
1281 xe_mem_pool_sync_shadow_locked(bb);
1282 }
1283
1284 return 0;
1285}
1286
1287/**
1288 * xe_migrate_ccs_rw_copy_clear() - Clear the CCS read/write batch buffer
1289 * content.
1290 * @src_bo: The buffer object @src is currently bound to.
1291 * @read_write : Creates BB commands for CCS read/write.
1292 *
1293 * Directly clearing the BB lacks atomicity and can lead to undefined
1294 * behavior if the vCPU is halted mid-operation during the clearing
1295 * process. To avoid this issue, we use a shadow buffer object approach.
1296 *
1297 * First swap the SA BO address with the shadow BO, perform the clearing
1298 * operation on the BB, update the shadow BO in the ring buffer, then
1299 * sync the shadow and the actual buffer to maintain consistency.
1300 *
1301 * Returns: None.
1302 */
1303void xe_migrate_ccs_rw_copy_clear(struct xe_bo *src_bo,
1304 enum xe_sriov_vf_ccs_rw_ctxs read_write)
1305{
1306 struct xe_mem_pool_node *bb = src_bo->bb_ccs[read_write];
1307 struct xe_device *xe = xe_bo_device(src_bo);
1308 struct xe_mem_pool *bb_pool;
1309 struct xe_sriov_vf_ccs_ctx *ctx;
1310 u32 *cs;
1311
1312 xe_assert(xe, IS_SRIOV_VF(xe));
1313
1314 ctx = &xe->sriov.vf.ccs.contexts[read_write];
1315 bb_pool = ctx->mem.ccs_bb_pool;
1316
1317 scoped_guard(mutex, xe_mem_pool_bo_swap_guard(bb_pool)) {
1318 xe_mem_pool_swap_shadow_locked(bb_pool);
1319
1320 cs = xe_mem_pool_node_cpu_addr(bb);
1321 memset(cs, MI_NOOP, bb->sa_node.size);
1322 xe_sriov_vf_ccs_rw_update_bb_addr(ctx);
1323
1324 xe_mem_pool_sync_shadow_locked(bb);
1325 xe_mem_pool_free_node(bb);
1326 src_bo->bb_ccs[read_write] = NULL;
1327 }
1328}
1329
1330/**
1331 * xe_migrate_exec_queue() - Get the execution queue from migrate context.
1332 * @migrate: Migrate context.
1333 *
1334 * Return: Pointer to execution queue on success, error on failure
1335 */
1336struct xe_exec_queue *xe_migrate_exec_queue(struct xe_migrate *migrate)
1337{
1338 return migrate->q;
1339}
1340
1341/**
1342 * xe_migrate_vram_copy_chunk() - Copy a chunk of a VRAM buffer object.
1343 * @vram_bo: The VRAM buffer object.
1344 * @vram_offset: The VRAM offset.
1345 * @sysmem_bo: The sysmem buffer object.
1346 * @sysmem_offset: The sysmem offset.
1347 * @size: The size of VRAM chunk to copy.
1348 * @dir: The direction of the copy operation.
1349 *
1350 * Copies a portion of a buffer object between VRAM and system memory.
1351 * On Xe2 platforms that support flat CCS, VRAM data is decompressed when
1352 * copying to system memory.
1353 *
1354 * Return: Pointer to a dma_fence representing the last copy batch, or
1355 * an error pointer on failure. If there is a failure, any copy operation
1356 * started by the function call has been synced.
1357 */
1358struct dma_fence *xe_migrate_vram_copy_chunk(struct xe_bo *vram_bo, u64 vram_offset,
1359 struct xe_bo *sysmem_bo, u64 sysmem_offset,
1360 u64 size, enum xe_migrate_copy_dir dir)
1361{
1362 struct xe_device *xe = xe_bo_device(vram_bo);
1363 struct xe_tile *tile = vram_bo->tile;
1364 struct xe_gt *gt = tile->primary_gt;
1365 struct xe_migrate *m = tile->migrate;
1366 struct dma_fence *fence = NULL;
1367 struct ttm_resource *vram = vram_bo->ttm.resource;
1368 struct ttm_resource *sysmem = sysmem_bo->ttm.resource;
1369 struct xe_res_cursor vram_it, sysmem_it;
1370 u64 vram_L0_ofs, sysmem_L0_ofs;
1371 u32 vram_L0_pt, sysmem_L0_pt;
1372 u64 vram_L0, sysmem_L0;
1373 bool to_sysmem = (dir == XE_MIGRATE_COPY_TO_SRAM);
1374 bool use_comp_pat = to_sysmem &&
1375 GRAPHICS_VER(xe) >= 20 && xe_device_has_flat_ccs(xe);
1376 int pass = 0;
1377 int err;
1378
1379 xe_assert(xe, IS_ALIGNED(vram_offset | sysmem_offset | size, PAGE_SIZE));
1380 xe_assert(xe, xe_bo_is_vram(vram_bo));
1381 xe_assert(xe, !xe_bo_is_vram(sysmem_bo));
1382 xe_assert(xe, !range_overflows(vram_offset, size, (u64)vram_bo->ttm.base.size));
1383 xe_assert(xe, !range_overflows(sysmem_offset, size, (u64)sysmem_bo->ttm.base.size));
1384
1385 xe_res_first(vram, vram_offset, size, &vram_it);
1386 xe_res_first_sg(xe_bo_sg(sysmem_bo), sysmem_offset, size, &sysmem_it);
1387
1388 while (size) {
1389 u32 pte_flags = PTE_UPDATE_FLAG_IS_VRAM;
1390 u32 batch_size = 2; /* arb_clear() + MI_BATCH_BUFFER_END */
1391 struct xe_sched_job *job;
1392 struct xe_bb *bb;
1393 u32 update_idx;
1394 bool usm = xe->info.has_usm;
1395 u32 avail_pts = max_mem_transfer_per_pass(xe) / LEVEL0_PAGE_TABLE_ENCODE_SIZE;
1396
1397 sysmem_L0 = xe_migrate_res_sizes(m, &sysmem_it);
1398 vram_L0 = min(xe_migrate_res_sizes(m, &vram_it), sysmem_L0);
1399
1400 xe_dbg(xe, "Pass %u, size: %llu\n", pass++, vram_L0);
1401
1402 pte_flags |= use_comp_pat ? PTE_UPDATE_FLAG_IS_COMP_PTE : 0;
1403 batch_size += pte_update_size(m, pte_flags, vram, &vram_it, &vram_L0,
1404 &vram_L0_ofs, &vram_L0_pt, 0, 0, avail_pts);
1405
1406 batch_size += pte_update_size(m, 0, sysmem, &sysmem_it, &vram_L0, &sysmem_L0_ofs,
1407 &sysmem_L0_pt, 0, avail_pts, avail_pts);
1408 batch_size += EMIT_COPY_DW;
1409
1410 bb = xe_bb_new(gt, batch_size, usm);
1411 if (IS_ERR(bb)) {
1412 err = PTR_ERR(bb);
1413 return ERR_PTR(err);
1414 }
1415
1416 if (xe_migrate_allow_identity(vram_L0, &vram_it))
1417 xe_res_next(&vram_it, vram_L0);
1418 else
1419 emit_pte(m, bb, vram_L0_pt, true, use_comp_pat, &vram_it, vram_L0, vram);
1420
1421 emit_pte(m, bb, sysmem_L0_pt, false, false, &sysmem_it, vram_L0, sysmem);
1422
1423 bb->cs[bb->len++] = MI_BATCH_BUFFER_END;
1424 update_idx = bb->len;
1425
1426 if (to_sysmem)
1427 emit_copy(gt, bb, vram_L0_ofs, sysmem_L0_ofs, vram_L0, XE_PAGE_SIZE);
1428 else
1429 emit_copy(gt, bb, sysmem_L0_ofs, vram_L0_ofs, vram_L0, XE_PAGE_SIZE);
1430
1431 job = xe_bb_create_migration_job(m->q, bb, xe_migrate_batch_base(m, usm),
1432 update_idx);
1433 if (IS_ERR(job)) {
1434 xe_bb_free(bb, NULL);
1435 err = PTR_ERR(job);
1436 return ERR_PTR(err);
1437 }
1438
1439 xe_sched_job_add_migrate_flush(job, MI_INVALIDATE_TLB);
1440
1441 xe_assert(xe, dma_resv_test_signaled(vram_bo->ttm.base.resv,
1442 DMA_RESV_USAGE_BOOKKEEP));
1443 xe_assert(xe, dma_resv_test_signaled(sysmem_bo->ttm.base.resv,
1444 DMA_RESV_USAGE_BOOKKEEP));
1445
1446 scoped_guard(mutex, &m->job_mutex) {
1447 xe_sched_job_arm(job);
1448 dma_fence_put(fence);
1449 fence = dma_fence_get(&job->drm.s_fence->finished);
1450 xe_sched_job_push(job);
1451
1452 dma_fence_put(m->fence);
1453 m->fence = dma_fence_get(fence);
1454 }
1455
1456 xe_bb_free(bb, fence);
1457 size -= vram_L0;
1458 }
1459
1460 return fence;
1461}
1462
1463static void emit_clear_link_copy(struct xe_gt *gt, struct xe_bb *bb, u64 src_ofs,
1464 u32 size, u32 pitch)
1465{
1466 struct xe_device *xe = gt_to_xe(gt);
1467 u32 *cs = bb->cs + bb->len;
1468 u32 len = PVC_MEM_SET_CMD_LEN_DW;
1469
1470 *cs++ = PVC_MEM_SET_CMD | PVC_MEM_SET_MATRIX | (len - 2);
1471 *cs++ = pitch - 1;
1472 *cs++ = (size / pitch) - 1;
1473 *cs++ = pitch - 1;
1474 *cs++ = lower_32_bits(src_ofs);
1475 *cs++ = upper_32_bits(src_ofs);
1476 if (GRAPHICS_VERx100(xe) >= 2000)
1477 *cs++ = FIELD_PREP(XE2_MEM_SET_MOCS_INDEX_MASK, gt->mocs.uc_index);
1478 else
1479 *cs++ = FIELD_PREP(PVC_MEM_SET_MOCS_INDEX_MASK, gt->mocs.uc_index);
1480
1481 xe_gt_assert(gt, cs - bb->cs == len + bb->len);
1482
1483 bb->len += len;
1484}
1485
1486static void emit_clear_main_copy(struct xe_gt *gt, struct xe_bb *bb,
1487 u64 src_ofs, u32 size, u32 pitch, bool is_vram)
1488{
1489 struct xe_device *xe = gt_to_xe(gt);
1490 u32 *cs = bb->cs + bb->len;
1491 u32 len = XY_FAST_COLOR_BLT_DW;
1492
1493 if (GRAPHICS_VERx100(xe) < 1250)
1494 len = 11;
1495
1496 *cs++ = XY_FAST_COLOR_BLT_CMD | XY_FAST_COLOR_BLT_DEPTH_32 |
1497 (len - 2);
1498 if (GRAPHICS_VERx100(xe) >= 2000)
1499 *cs++ = FIELD_PREP(XE2_XY_FAST_COLOR_BLT_MOCS_INDEX_MASK, gt->mocs.uc_index) |
1500 (pitch - 1);
1501 else
1502 *cs++ = FIELD_PREP(XY_FAST_COLOR_BLT_MOCS_MASK, gt->mocs.uc_index) |
1503 (pitch - 1);
1504 *cs++ = 0;
1505 *cs++ = (size / pitch) << 16 | pitch / 4;
1506 *cs++ = lower_32_bits(src_ofs);
1507 *cs++ = upper_32_bits(src_ofs);
1508 *cs++ = (is_vram ? 0x0 : 0x1) << XY_FAST_COLOR_BLT_MEM_TYPE_SHIFT;
1509 *cs++ = 0;
1510 *cs++ = 0;
1511 *cs++ = 0;
1512 *cs++ = 0;
1513
1514 if (len > 11) {
1515 *cs++ = 0;
1516 *cs++ = 0;
1517 *cs++ = 0;
1518 *cs++ = 0;
1519 *cs++ = 0;
1520 }
1521
1522 xe_gt_assert(gt, cs - bb->cs == len + bb->len);
1523
1524 bb->len += len;
1525}
1526
1527static bool has_service_copy_support(struct xe_gt *gt)
1528{
1529 /*
1530 * What we care about is whether the architecture was designed with
1531 * service copy functionality (specifically the new MEM_SET / MEM_COPY
1532 * instructions) so check the architectural engine list rather than the
1533 * actual list since these instructions are usable on BCS0 even if
1534 * all of the actual service copy engines (BCS1-BCS8) have been fused
1535 * off.
1536 */
1537 return gt->info.engine_mask & GENMASK(XE_HW_ENGINE_BCS8,
1538 XE_HW_ENGINE_BCS1);
1539}
1540
1541static u32 emit_clear_cmd_len(struct xe_gt *gt)
1542{
1543 if (has_service_copy_support(gt))
1544 return PVC_MEM_SET_CMD_LEN_DW;
1545 else
1546 return XY_FAST_COLOR_BLT_DW;
1547}
1548
1549static void emit_clear(struct xe_gt *gt, struct xe_bb *bb, u64 src_ofs,
1550 u32 size, u32 pitch, bool is_vram)
1551{
1552 if (has_service_copy_support(gt))
1553 emit_clear_link_copy(gt, bb, src_ofs, size, pitch);
1554 else
1555 emit_clear_main_copy(gt, bb, src_ofs, size, pitch,
1556 is_vram);
1557}
1558
1559/**
1560 * xe_migrate_clear() - Copy content of TTM resources.
1561 * @m: The migration context.
1562 * @bo: The buffer object @dst is currently bound to.
1563 * @dst: The dst TTM resource to be cleared.
1564 * @clear_flags: flags to specify which data to clear: CCS, BO, or both.
1565 *
1566 * Clear the contents of @dst to zero when XE_MIGRATE_CLEAR_FLAG_BO_DATA is set.
1567 * On flat CCS devices, the CCS metadata is cleared to zero with XE_MIGRATE_CLEAR_FLAG_CCS_DATA.
1568 * Set XE_MIGRATE_CLEAR_FLAG_FULL to clear bo as well as CCS metadata.
1569 * TODO: Eliminate the @bo argument.
1570 *
1571 * Return: Pointer to a dma_fence representing the last clear batch, or
1572 * an error pointer on failure. If there is a failure, any clear operation
1573 * started by the function call has been synced.
1574 */
1575struct dma_fence *xe_migrate_clear(struct xe_migrate *m,
1576 struct xe_bo *bo,
1577 struct ttm_resource *dst,
1578 u32 clear_flags)
1579{
1580 bool clear_vram = mem_type_is_vram(dst->mem_type);
1581 bool clear_bo_data = XE_MIGRATE_CLEAR_FLAG_BO_DATA & clear_flags;
1582 bool clear_ccs = XE_MIGRATE_CLEAR_FLAG_CCS_DATA & clear_flags;
1583 struct xe_gt *gt = m->tile->primary_gt;
1584 struct xe_device *xe = gt_to_xe(gt);
1585 bool clear_only_system_ccs = false;
1586 struct dma_fence *fence = NULL;
1587 u64 size = xe_bo_size(bo);
1588 struct xe_res_cursor src_it;
1589 struct ttm_resource *src = dst;
1590 int err;
1591
1592 if (WARN_ON(!clear_bo_data && !clear_ccs))
1593 return NULL;
1594
1595 if (!clear_bo_data && clear_ccs && !IS_DGFX(xe))
1596 clear_only_system_ccs = true;
1597
1598 if (!clear_vram)
1599 xe_res_first_sg(xe_bo_sg(bo), 0, xe_bo_size(bo), &src_it);
1600 else
1601 xe_res_first(src, 0, xe_bo_size(bo), &src_it);
1602
1603 while (size) {
1604 u64 clear_L0_ofs;
1605 u32 clear_L0_pt;
1606 u32 flush_flags = 0;
1607 u64 clear_L0;
1608 struct xe_sched_job *job;
1609 struct xe_bb *bb;
1610 u32 batch_size, update_idx;
1611 u32 pte_flags;
1612
1613 bool usm = xe->info.has_usm;
1614 u32 avail_pts = max_mem_transfer_per_pass(xe) / LEVEL0_PAGE_TABLE_ENCODE_SIZE;
1615
1616 clear_L0 = xe_migrate_res_sizes(m, &src_it);
1617
1618 /* Calculate final sizes and batch size.. */
1619 pte_flags = clear_vram ? PTE_UPDATE_FLAG_IS_VRAM : 0;
1620 batch_size = 1 +
1621 pte_update_size(m, pte_flags, src, &src_it,
1622 &clear_L0, &clear_L0_ofs, &clear_L0_pt,
1623 clear_bo_data ? emit_clear_cmd_len(gt) : 0, 0,
1624 avail_pts);
1625
1626 if (xe_migrate_needs_ccs_emit(xe))
1627 batch_size += EMIT_COPY_CCS_DW;
1628
1629 /* Clear commands */
1630
1631 if (WARN_ON_ONCE(!clear_L0))
1632 break;
1633
1634 bb = xe_bb_new(gt, batch_size, usm);
1635 if (IS_ERR(bb)) {
1636 err = PTR_ERR(bb);
1637 goto err_sync;
1638 }
1639
1640 size -= clear_L0;
1641 /* Preemption is enabled again by the ring ops. */
1642 if (clear_vram && xe_migrate_allow_identity(clear_L0, &src_it)) {
1643 xe_res_next(&src_it, clear_L0);
1644 } else {
1645 emit_pte(m, bb, clear_L0_pt, clear_vram,
1646 clear_only_system_ccs, &src_it, clear_L0, dst);
1647 flush_flags |= MI_INVALIDATE_TLB;
1648 }
1649
1650 bb->cs[bb->len++] = MI_BATCH_BUFFER_END;
1651 update_idx = bb->len;
1652
1653 if (clear_bo_data)
1654 emit_clear(gt, bb, clear_L0_ofs, clear_L0, XE_PAGE_SIZE, clear_vram);
1655
1656 if (xe_migrate_needs_ccs_emit(xe)) {
1657 emit_copy_ccs(gt, bb, clear_L0_ofs, true,
1658 m->cleared_mem_ofs, false, clear_L0);
1659 flush_flags |= MI_FLUSH_DW_CCS;
1660 }
1661
1662 job = xe_bb_create_migration_job(m->q, bb,
1663 xe_migrate_batch_base(m, usm),
1664 update_idx);
1665 if (IS_ERR(job)) {
1666 err = PTR_ERR(job);
1667 goto err;
1668 }
1669
1670 xe_sched_job_add_migrate_flush(job, flush_flags);
1671 if (!fence) {
1672 /*
1673 * There can't be anything userspace related at this
1674 * point, so we just need to respect any potential move
1675 * fences, which are always tracked as
1676 * DMA_RESV_USAGE_KERNEL.
1677 */
1678 err = xe_sched_job_add_deps(job, bo->ttm.base.resv,
1679 DMA_RESV_USAGE_KERNEL);
1680 if (err)
1681 goto err_job;
1682 }
1683
1684 mutex_lock(&m->job_mutex);
1685 xe_sched_job_arm(job);
1686 dma_fence_put(fence);
1687 fence = dma_fence_get(&job->drm.s_fence->finished);
1688 xe_sched_job_push(job);
1689
1690 dma_fence_put(m->fence);
1691 m->fence = dma_fence_get(fence);
1692
1693 mutex_unlock(&m->job_mutex);
1694
1695 xe_bb_free(bb, fence);
1696 continue;
1697
1698err_job:
1699 xe_sched_job_put(job);
1700err:
1701 xe_bb_free(bb, NULL);
1702err_sync:
1703 /* Sync partial copies if any. FIXME: job_mutex? */
1704 if (fence) {
1705 dma_fence_wait(fence, false);
1706 dma_fence_put(fence);
1707 }
1708
1709 return ERR_PTR(err);
1710 }
1711
1712 if (clear_ccs)
1713 bo->ccs_cleared = true;
1714
1715 return fence;
1716}
1717
1718static void write_pgtable(struct xe_tile *tile, struct xe_bb *bb, u64 ppgtt_ofs,
1719 const struct xe_vm_pgtable_update_op *pt_op,
1720 const struct xe_vm_pgtable_update *update,
1721 struct xe_migrate_pt_update *pt_update)
1722{
1723 const struct xe_migrate_pt_update_ops *ops = pt_update->ops;
1724 u32 chunk;
1725 u32 ofs = update->ofs, size = update->qwords;
1726
1727 /*
1728 * If we have 512 entries (max), we would populate it ourselves,
1729 * and update the PDE above it to the new pointer.
1730 * The only time this can only happen if we have to update the top
1731 * PDE. This requires a BO that is almost vm->size big.
1732 *
1733 * This shouldn't be possible in practice.. might change when 16K
1734 * pages are used. Hence the assert.
1735 */
1736 xe_tile_assert(tile, update->qwords < MAX_NUM_PTE);
1737 if (!ppgtt_ofs)
1738 ppgtt_ofs = xe_migrate_vram_ofs(tile_to_xe(tile),
1739 xe_bo_addr(update->pt_bo, 0,
1740 XE_PAGE_SIZE), false);
1741
1742 do {
1743 u64 addr = ppgtt_ofs + ofs * 8;
1744
1745 chunk = min(size, MAX_PTE_PER_SDI);
1746
1747 /* Ensure populatefn can do memset64 by aligning bb->cs */
1748 if (!(bb->len & 1))
1749 bb->cs[bb->len++] = MI_NOOP;
1750
1751 bb->cs[bb->len++] = MI_STORE_DATA_IMM | MI_SDI_NUM_QW(chunk);
1752 bb->cs[bb->len++] = lower_32_bits(addr);
1753 bb->cs[bb->len++] = upper_32_bits(addr);
1754 if (pt_op->bind)
1755 ops->populate(pt_update, tile, NULL, bb->cs + bb->len,
1756 ofs, chunk, update);
1757 else
1758 ops->clear(pt_update, tile, NULL, bb->cs + bb->len,
1759 ofs, chunk, update);
1760
1761 bb->len += chunk * 2;
1762 ofs += chunk;
1763 size -= chunk;
1764 } while (size);
1765}
1766
1767struct xe_vm *xe_migrate_get_vm(struct xe_migrate *m)
1768{
1769 return xe_vm_get(m->q->vm);
1770}
1771
1772#if IS_ENABLED(CONFIG_DRM_XE_KUNIT_TEST)
1773struct migrate_test_params {
1774 struct xe_test_priv base;
1775 bool force_gpu;
1776};
1777
1778#define to_migrate_test_params(_priv) \
1779 container_of(_priv, struct migrate_test_params, base)
1780#endif
1781
1782static struct dma_fence *
1783xe_migrate_update_pgtables_cpu(struct xe_migrate *m,
1784 struct xe_migrate_pt_update *pt_update)
1785{
1786 XE_TEST_DECLARE(struct migrate_test_params *test =
1787 to_migrate_test_params
1788 (xe_cur_kunit_priv(XE_TEST_LIVE_MIGRATE));)
1789 const struct xe_migrate_pt_update_ops *ops = pt_update->ops;
1790 struct xe_vm *vm = pt_update->vops->vm;
1791 struct xe_vm_pgtable_update_ops *pt_update_ops =
1792 &pt_update->vops->pt_update_ops[pt_update->tile_id];
1793 int err;
1794 u32 i, j;
1795
1796 if (XE_TEST_ONLY(test && test->force_gpu))
1797 return ERR_PTR(-ETIME);
1798
1799 if (ops->pre_commit) {
1800 pt_update->job = NULL;
1801 err = ops->pre_commit(pt_update);
1802 if (err)
1803 return ERR_PTR(err);
1804 }
1805
1806 for (i = 0; i < pt_update_ops->num_ops; ++i) {
1807 const struct xe_vm_pgtable_update_op *pt_op =
1808 &pt_update_ops->ops[i];
1809
1810 for (j = 0; j < pt_op->num_entries; j++) {
1811 const struct xe_vm_pgtable_update *update =
1812 &pt_op->entries[j];
1813
1814 if (pt_op->bind)
1815 ops->populate(pt_update, m->tile,
1816 &update->pt_bo->vmap, NULL,
1817 update->ofs, update->qwords,
1818 update);
1819 else
1820 ops->clear(pt_update, m->tile,
1821 &update->pt_bo->vmap, NULL,
1822 update->ofs, update->qwords, update);
1823 }
1824 }
1825
1826 trace_xe_vm_cpu_bind(vm);
1827 xe_device_wmb(vm->xe);
1828
1829 return dma_fence_get_stub();
1830}
1831
1832static struct dma_fence *
1833__xe_migrate_update_pgtables(struct xe_migrate *m,
1834 struct xe_migrate_pt_update *pt_update,
1835 struct xe_vm_pgtable_update_ops *pt_update_ops)
1836{
1837 const struct xe_migrate_pt_update_ops *ops = pt_update->ops;
1838 struct xe_tile *tile = m->tile;
1839 struct xe_gt *gt = tile->primary_gt;
1840 struct xe_device *xe = tile_to_xe(tile);
1841 struct xe_sched_job *job;
1842 struct dma_fence *fence;
1843 struct drm_suballoc *sa_bo = NULL;
1844 struct xe_bb *bb;
1845 u32 i, j, batch_size = 0, ppgtt_ofs, update_idx, page_ofs = 0;
1846 u32 num_updates = 0, current_update = 0;
1847 u64 addr;
1848 int err = 0;
1849 bool is_migrate = pt_update_ops->q == m->q;
1850 bool usm = is_migrate && xe->info.has_usm;
1851
1852 for (i = 0; i < pt_update_ops->num_ops; ++i) {
1853 struct xe_vm_pgtable_update_op *pt_op = &pt_update_ops->ops[i];
1854 struct xe_vm_pgtable_update *updates = pt_op->entries;
1855
1856 num_updates += pt_op->num_entries;
1857 for (j = 0; j < pt_op->num_entries; ++j) {
1858 u32 num_cmds = DIV_ROUND_UP(updates[j].qwords,
1859 MAX_PTE_PER_SDI);
1860
1861 /* align noop + MI_STORE_DATA_IMM cmd prefix */
1862 batch_size += 4 * num_cmds + updates[j].qwords * 2;
1863 }
1864 }
1865
1866 /* fixed + PTE entries */
1867 if (IS_DGFX(xe))
1868 batch_size += 2;
1869 else
1870 batch_size += 6 * (num_updates / MAX_PTE_PER_SDI + 1) +
1871 num_updates * 2;
1872
1873 bb = xe_bb_new(gt, batch_size, usm);
1874 if (IS_ERR(bb))
1875 return ERR_CAST(bb);
1876
1877 /* For sysmem PTE's, need to map them in our hole.. */
1878 if (!IS_DGFX(xe)) {
1879 u16 pat_index = xe->pat.idx[XE_CACHE_WB];
1880 u32 ptes, ofs;
1881
1882 ppgtt_ofs = NUM_KERNEL_PDE - 1;
1883 if (!is_migrate) {
1884 u32 num_units = DIV_ROUND_UP(num_updates,
1885 NUM_VMUSA_WRITES_PER_UNIT);
1886
1887 if (num_units > m->vm_update_sa.size) {
1888 err = -ENOBUFS;
1889 goto err_bb;
1890 }
1891 sa_bo = drm_suballoc_new(&m->vm_update_sa, num_units,
1892 GFP_KERNEL, true, 0);
1893 if (IS_ERR(sa_bo)) {
1894 err = PTR_ERR(sa_bo);
1895 goto err_bb;
1896 }
1897
1898 ppgtt_ofs = NUM_KERNEL_PDE +
1899 (drm_suballoc_soffset(sa_bo) /
1900 NUM_VMUSA_UNIT_PER_PAGE);
1901 page_ofs = (drm_suballoc_soffset(sa_bo) %
1902 NUM_VMUSA_UNIT_PER_PAGE) *
1903 VM_SA_UPDATE_UNIT_SIZE;
1904 }
1905
1906 /* Map our PT's to gtt */
1907 i = 0;
1908 j = 0;
1909 ptes = num_updates;
1910 ofs = ppgtt_ofs * XE_PAGE_SIZE + page_ofs;
1911 while (ptes) {
1912 u32 chunk = min(MAX_PTE_PER_SDI, ptes);
1913 u32 idx = 0;
1914
1915 bb->cs[bb->len++] = MI_STORE_DATA_IMM |
1916 MI_SDI_NUM_QW(chunk);
1917 bb->cs[bb->len++] = ofs;
1918 bb->cs[bb->len++] = 0; /* upper_32_bits */
1919
1920 for (; i < pt_update_ops->num_ops; ++i) {
1921 struct xe_vm_pgtable_update_op *pt_op =
1922 &pt_update_ops->ops[i];
1923 struct xe_vm_pgtable_update *updates = pt_op->entries;
1924
1925 for (; j < pt_op->num_entries; ++j, ++current_update, ++idx) {
1926 struct xe_vm *vm = pt_update->vops->vm;
1927 struct xe_bo *pt_bo = updates[j].pt_bo;
1928
1929 if (idx == chunk)
1930 goto next_cmd;
1931
1932 xe_tile_assert(tile, xe_bo_size(pt_bo) == SZ_4K);
1933
1934 /* Map a PT at most once */
1935 if (pt_bo->update_index < 0)
1936 pt_bo->update_index = current_update;
1937
1938 addr = vm->pt_ops->pte_encode_bo(pt_bo, 0,
1939 pat_index, 0);
1940 bb->cs[bb->len++] = lower_32_bits(addr);
1941 bb->cs[bb->len++] = upper_32_bits(addr);
1942 }
1943
1944 j = 0;
1945 }
1946
1947next_cmd:
1948 ptes -= chunk;
1949 ofs += chunk * sizeof(u64);
1950 }
1951
1952 bb->cs[bb->len++] = MI_BATCH_BUFFER_END;
1953 update_idx = bb->len;
1954
1955 addr = xe_migrate_vm_addr(ppgtt_ofs, 0) +
1956 (page_ofs / sizeof(u64)) * XE_PAGE_SIZE;
1957 for (i = 0; i < pt_update_ops->num_ops; ++i) {
1958 struct xe_vm_pgtable_update_op *pt_op =
1959 &pt_update_ops->ops[i];
1960 struct xe_vm_pgtable_update *updates = pt_op->entries;
1961
1962 for (j = 0; j < pt_op->num_entries; ++j) {
1963 struct xe_bo *pt_bo = updates[j].pt_bo;
1964
1965 write_pgtable(tile, bb, addr +
1966 pt_bo->update_index * XE_PAGE_SIZE,
1967 pt_op, &updates[j], pt_update);
1968 }
1969 }
1970 } else {
1971 /* phys pages, no preamble required */
1972 bb->cs[bb->len++] = MI_BATCH_BUFFER_END;
1973 update_idx = bb->len;
1974
1975 for (i = 0; i < pt_update_ops->num_ops; ++i) {
1976 struct xe_vm_pgtable_update_op *pt_op =
1977 &pt_update_ops->ops[i];
1978 struct xe_vm_pgtable_update *updates = pt_op->entries;
1979
1980 for (j = 0; j < pt_op->num_entries; ++j)
1981 write_pgtable(tile, bb, 0, pt_op, &updates[j],
1982 pt_update);
1983 }
1984 }
1985
1986 job = xe_bb_create_migration_job(pt_update_ops->q, bb,
1987 xe_migrate_batch_base(m, usm),
1988 update_idx);
1989 if (IS_ERR(job)) {
1990 err = PTR_ERR(job);
1991 goto err_sa;
1992 }
1993
1994 xe_sched_job_add_migrate_flush(job, MI_INVALIDATE_TLB);
1995
1996 if (ops->pre_commit) {
1997 pt_update->job = job;
1998 err = ops->pre_commit(pt_update);
1999 if (err)
2000 goto err_job;
2001 }
2002 if (is_migrate)
2003 mutex_lock(&m->job_mutex);
2004
2005 xe_sched_job_arm(job);
2006 fence = dma_fence_get(&job->drm.s_fence->finished);
2007 xe_sched_job_push(job);
2008
2009 if (is_migrate)
2010 mutex_unlock(&m->job_mutex);
2011
2012 xe_bb_free(bb, fence);
2013 drm_suballoc_free(sa_bo, fence);
2014
2015 return fence;
2016
2017err_job:
2018 xe_sched_job_put(job);
2019err_sa:
2020 drm_suballoc_free(sa_bo, NULL);
2021err_bb:
2022 xe_bb_free(bb, NULL);
2023 return ERR_PTR(err);
2024}
2025
2026/**
2027 * xe_migrate_update_pgtables() - Pipelined page-table update
2028 * @m: The migrate context.
2029 * @pt_update: PT update arguments
2030 *
2031 * Perform a pipelined page-table update. The update descriptors are typically
2032 * built under the same lock critical section as a call to this function. If
2033 * using the default engine for the updates, they will be performed in the
2034 * order they grab the job_mutex. If different engines are used, external
2035 * synchronization is needed for overlapping updates to maintain page-table
2036 * consistency. Note that the meaning of "overlapping" is that the updates
2037 * touch the same page-table, which might be a higher-level page-directory.
2038 * If no pipelining is needed, then updates may be performed by the cpu.
2039 *
2040 * Return: A dma_fence that, when signaled, indicates the update completion.
2041 */
2042struct dma_fence *
2043xe_migrate_update_pgtables(struct xe_migrate *m,
2044 struct xe_migrate_pt_update *pt_update)
2045
2046{
2047 struct xe_vm_pgtable_update_ops *pt_update_ops =
2048 &pt_update->vops->pt_update_ops[pt_update->tile_id];
2049 struct dma_fence *fence;
2050
2051 fence = xe_migrate_update_pgtables_cpu(m, pt_update);
2052
2053 /* -ETIME indicates a job is needed, anything else is legit error */
2054 if (!IS_ERR(fence) || PTR_ERR(fence) != -ETIME)
2055 return fence;
2056
2057 return __xe_migrate_update_pgtables(m, pt_update, pt_update_ops);
2058}
2059
2060/**
2061 * xe_migrate_wait() - Complete all operations using the xe_migrate context
2062 * @m: Migrate context to wait for.
2063 *
2064 * Waits until the GPU no longer uses the migrate context's default engine
2065 * or its page-table objects. FIXME: What about separate page-table update
2066 * engines?
2067 */
2068void xe_migrate_wait(struct xe_migrate *m)
2069{
2070 if (m->fence)
2071 dma_fence_wait(m->fence, false);
2072}
2073
2074static u32 pte_update_cmd_size(u64 size)
2075{
2076 u32 num_dword;
2077 u64 entries = DIV_U64_ROUND_UP(size, XE_PAGE_SIZE);
2078
2079 XE_WARN_ON(size > MAX_PREEMPTDISABLE_TRANSFER);
2080
2081 /*
2082 * MI_STORE_DATA_IMM command is used to update page table. Each
2083 * instruction can update maximumly MAX_PTE_PER_SDI pte entries. To
2084 * update n (n <= MAX_PTE_PER_SDI) pte entries, we need:
2085 *
2086 * - 1 dword for the MI_STORE_DATA_IMM command header (opcode etc)
2087 * - 2 dword for the page table's physical location
2088 * - 2*n dword for value of pte to fill (each pte entry is 2 dwords)
2089 */
2090 num_dword = (1 + 2) * DIV_U64_ROUND_UP(entries, MAX_PTE_PER_SDI);
2091 num_dword += entries * 2;
2092
2093 return num_dword;
2094}
2095
2096static void build_pt_update_batch_sram(struct xe_migrate *m,
2097 struct xe_bb *bb, u32 pt_offset,
2098 struct drm_pagemap_addr *sram_addr,
2099 u32 size, int level)
2100{
2101 u16 pat_index = tile_to_xe(m->tile)->pat.idx[XE_CACHE_WB];
2102 u64 gpu_page_size = 0x1ull << xe_pt_shift(level);
2103 u32 ptes;
2104 int i = 0;
2105
2106 xe_tile_assert(m->tile, PAGE_ALIGNED(size));
2107
2108 ptes = DIV_ROUND_UP(size, gpu_page_size);
2109 while (ptes) {
2110 u32 chunk = min(MAX_PTE_PER_SDI, ptes);
2111
2112 if (!level)
2113 chunk = ALIGN_DOWN(chunk, PAGE_SIZE / XE_PAGE_SIZE);
2114
2115 bb->cs[bb->len++] = MI_STORE_DATA_IMM | MI_SDI_NUM_QW(chunk);
2116 bb->cs[bb->len++] = pt_offset;
2117 bb->cs[bb->len++] = 0;
2118
2119 pt_offset += chunk * 8;
2120 ptes -= chunk;
2121
2122 while (chunk--) {
2123 u64 addr = sram_addr[i].addr;
2124 u64 pte;
2125
2126 xe_tile_assert(m->tile, sram_addr[i].proto ==
2127 DRM_INTERCONNECT_SYSTEM ||
2128 sram_addr[i].proto == XE_INTERCONNECT_P2P);
2129 xe_tile_assert(m->tile, addr);
2130 xe_tile_assert(m->tile, PAGE_ALIGNED(addr));
2131
2132again:
2133 pte = m->q->vm->pt_ops->pte_encode_addr(m->tile->xe,
2134 addr, pat_index,
2135 level, false, 0);
2136 bb->cs[bb->len++] = lower_32_bits(pte);
2137 bb->cs[bb->len++] = upper_32_bits(pte);
2138
2139 if (gpu_page_size < PAGE_SIZE) {
2140 addr += XE_PAGE_SIZE;
2141 if (!PAGE_ALIGNED(addr)) {
2142 chunk--;
2143 goto again;
2144 }
2145 i++;
2146 } else {
2147 i += gpu_page_size / PAGE_SIZE;
2148 }
2149 }
2150 }
2151}
2152
2153static bool xe_migrate_vram_use_pde(struct drm_pagemap_addr *sram_addr,
2154 unsigned long size)
2155{
2156 u32 large_size = (0x1 << xe_pt_shift(1));
2157 unsigned long i, incr = large_size / PAGE_SIZE;
2158
2159 for (i = 0; i < DIV_ROUND_UP(size, PAGE_SIZE); i += incr)
2160 if (PAGE_SIZE << sram_addr[i].order != large_size)
2161 return false;
2162
2163 return true;
2164}
2165
2166#define XE_CACHELINE_BYTES 64ull
2167#define XE_CACHELINE_MASK (XE_CACHELINE_BYTES - 1)
2168
2169static u32 xe_migrate_copy_pitch(struct xe_device *xe, u32 len)
2170{
2171 u32 pitch;
2172
2173 if (IS_ALIGNED(len, PAGE_SIZE))
2174 pitch = PAGE_SIZE;
2175 else if (IS_ALIGNED(len, SZ_4K))
2176 pitch = SZ_4K;
2177 else if (IS_ALIGNED(len, SZ_256))
2178 pitch = SZ_256;
2179 else if (IS_ALIGNED(len, 4))
2180 pitch = 4;
2181 else
2182 pitch = 1;
2183
2184 xe_assert(xe, pitch > 1 || xe->info.has_mem_copy_instr);
2185 return pitch;
2186}
2187
2188static struct dma_fence *xe_migrate_vram(struct xe_migrate *m,
2189 unsigned long len,
2190 unsigned long sram_offset,
2191 struct drm_pagemap_addr *sram_addr,
2192 u64 vram_addr,
2193 struct dma_fence *deps,
2194 const enum xe_migrate_copy_dir dir)
2195{
2196 struct xe_gt *gt = m->tile->primary_gt;
2197 struct xe_device *xe = gt_to_xe(gt);
2198 bool use_usm_batch = xe->info.has_usm;
2199 struct dma_fence *fence = NULL;
2200 u32 batch_size = 1;
2201 u64 src_L0_ofs, dst_L0_ofs;
2202 struct xe_sched_job *job;
2203 struct xe_bb *bb;
2204 u32 update_idx, pt_slot = 0;
2205 unsigned long npages = DIV_ROUND_UP(len + sram_offset, PAGE_SIZE);
2206 unsigned int pitch = xe_migrate_copy_pitch(xe, len);
2207 int err;
2208 unsigned long i, j;
2209 bool use_pde = xe_migrate_vram_use_pde(sram_addr, len + sram_offset);
2210
2211 if (!xe->info.has_mem_copy_instr &&
2212 drm_WARN_ON(&xe->drm,
2213 (!IS_ALIGNED(len, pitch)) || (sram_offset | vram_addr) & XE_CACHELINE_MASK))
2214 return ERR_PTR(-EOPNOTSUPP);
2215
2216 xe_assert(xe, npages * PAGE_SIZE <= MAX_PREEMPTDISABLE_TRANSFER);
2217
2218 batch_size += pte_update_cmd_size(npages << PAGE_SHIFT);
2219 batch_size += EMIT_COPY_DW;
2220
2221 bb = xe_bb_new(gt, batch_size, use_usm_batch);
2222 if (IS_ERR(bb)) {
2223 err = PTR_ERR(bb);
2224 return ERR_PTR(err);
2225 }
2226
2227 /*
2228 * If the order of a struct drm_pagemap_addr entry is greater than 0,
2229 * the entry is populated by GPU pagemap but subsequent entries within
2230 * the range of that order are not populated.
2231 * build_pt_update_batch_sram() expects a fully populated array of
2232 * struct drm_pagemap_addr. Ensure this is the case even with higher
2233 * orders.
2234 */
2235 for (i = 0; !use_pde && i < npages;) {
2236 unsigned int order = sram_addr[i].order;
2237
2238 for (j = 1; j < NR_PAGES(order) && i + j < npages; j++)
2239 if (!sram_addr[i + j].addr)
2240 sram_addr[i + j].addr = sram_addr[i].addr + j * PAGE_SIZE;
2241
2242 i += NR_PAGES(order);
2243 }
2244
2245 if (use_pde)
2246 build_pt_update_batch_sram(m, bb, m->large_page_copy_pdes,
2247 sram_addr, npages << PAGE_SHIFT, 1);
2248 else
2249 build_pt_update_batch_sram(m, bb, pt_slot * XE_PAGE_SIZE,
2250 sram_addr, npages << PAGE_SHIFT, 0);
2251
2252 if (dir == XE_MIGRATE_COPY_TO_VRAM) {
2253 if (use_pde)
2254 src_L0_ofs = m->large_page_copy_ofs + sram_offset;
2255 else
2256 src_L0_ofs = xe_migrate_vm_addr(pt_slot, 0) + sram_offset;
2257 dst_L0_ofs = xe_migrate_vram_ofs(xe, vram_addr, false);
2258
2259 } else {
2260 src_L0_ofs = xe_migrate_vram_ofs(xe, vram_addr, false);
2261 if (use_pde)
2262 dst_L0_ofs = m->large_page_copy_ofs + sram_offset;
2263 else
2264 dst_L0_ofs = xe_migrate_vm_addr(pt_slot, 0) + sram_offset;
2265 }
2266
2267 bb->cs[bb->len++] = MI_BATCH_BUFFER_END;
2268 update_idx = bb->len;
2269
2270 emit_copy(gt, bb, src_L0_ofs, dst_L0_ofs, len, pitch);
2271
2272 job = xe_bb_create_migration_job(m->q, bb,
2273 xe_migrate_batch_base(m, use_usm_batch),
2274 update_idx);
2275 if (IS_ERR(job)) {
2276 err = PTR_ERR(job);
2277 goto err;
2278 }
2279
2280 xe_sched_job_add_migrate_flush(job, MI_INVALIDATE_TLB);
2281
2282 if (deps && !dma_fence_is_signaled(deps)) {
2283 dma_fence_get(deps);
2284 err = drm_sched_job_add_dependency(&job->drm, deps);
2285 if (err)
2286 dma_fence_wait(deps, false);
2287 err = 0;
2288 }
2289
2290 mutex_lock(&m->job_mutex);
2291 xe_sched_job_arm(job);
2292 fence = dma_fence_get(&job->drm.s_fence->finished);
2293 xe_sched_job_push(job);
2294
2295 dma_fence_put(m->fence);
2296 m->fence = dma_fence_get(fence);
2297 mutex_unlock(&m->job_mutex);
2298
2299 xe_bb_free(bb, fence);
2300
2301 return fence;
2302
2303err:
2304 xe_bb_free(bb, NULL);
2305
2306 return ERR_PTR(err);
2307}
2308
2309/**
2310 * xe_migrate_to_vram() - Migrate to VRAM
2311 * @m: The migration context.
2312 * @npages: Number of pages to migrate.
2313 * @src_addr: Array of DMA information (source of migrate)
2314 * @dst_addr: Device physical address of VRAM (destination of migrate)
2315 * @deps: struct dma_fence representing the dependencies that need
2316 * to be signaled before migration.
2317 *
2318 * Copy from an array dma addresses to a VRAM device physical address
2319 *
2320 * Return: dma fence for migrate to signal completion on success, ERR_PTR on
2321 * failure
2322 */
2323struct dma_fence *xe_migrate_to_vram(struct xe_migrate *m,
2324 unsigned long npages,
2325 struct drm_pagemap_addr *src_addr,
2326 u64 dst_addr,
2327 struct dma_fence *deps)
2328{
2329 return xe_migrate_vram(m, npages * PAGE_SIZE, 0, src_addr, dst_addr,
2330 deps, XE_MIGRATE_COPY_TO_VRAM);
2331}
2332
2333/**
2334 * xe_migrate_from_vram() - Migrate from VRAM
2335 * @m: The migration context.
2336 * @npages: Number of pages to migrate.
2337 * @src_addr: Device physical address of VRAM (source of migrate)
2338 * @dst_addr: Array of DMA information (destination of migrate)
2339 * @deps: struct dma_fence representing the dependencies that need
2340 * to be signaled before migration.
2341 *
2342 * Copy from a VRAM device physical address to an array dma addresses
2343 *
2344 * Return: dma fence for migrate to signal completion on success, ERR_PTR on
2345 * failure
2346 */
2347struct dma_fence *xe_migrate_from_vram(struct xe_migrate *m,
2348 unsigned long npages,
2349 u64 src_addr,
2350 struct drm_pagemap_addr *dst_addr,
2351 struct dma_fence *deps)
2352{
2353 return xe_migrate_vram(m, npages * PAGE_SIZE, 0, dst_addr, src_addr,
2354 deps, XE_MIGRATE_COPY_TO_SRAM);
2355}
2356
2357static void xe_migrate_dma_unmap(struct xe_device *xe,
2358 struct drm_pagemap_addr *pagemap_addr,
2359 int len, int write)
2360{
2361 unsigned long i, npages = DIV_ROUND_UP(len, PAGE_SIZE);
2362
2363 for (i = 0; i < npages; ++i) {
2364 if (!pagemap_addr[i].addr)
2365 break;
2366
2367 dma_unmap_page(xe->drm.dev, pagemap_addr[i].addr, PAGE_SIZE,
2368 write ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
2369 }
2370 kfree(pagemap_addr);
2371}
2372
2373static struct drm_pagemap_addr *xe_migrate_dma_map(struct xe_device *xe,
2374 void *buf, int len,
2375 int write)
2376{
2377 struct drm_pagemap_addr *pagemap_addr;
2378 unsigned long i, npages = DIV_ROUND_UP(len, PAGE_SIZE);
2379
2380 pagemap_addr = kzalloc_objs(*pagemap_addr, npages);
2381 if (!pagemap_addr)
2382 return ERR_PTR(-ENOMEM);
2383
2384 for (i = 0; i < npages; ++i) {
2385 dma_addr_t addr;
2386 struct page *page;
2387 enum dma_data_direction dir = write ? DMA_TO_DEVICE :
2388 DMA_FROM_DEVICE;
2389
2390 if (is_vmalloc_addr(buf))
2391 page = vmalloc_to_page(buf);
2392 else
2393 page = virt_to_page(buf);
2394
2395 addr = dma_map_page(xe->drm.dev, page, 0, PAGE_SIZE, dir);
2396 if (dma_mapping_error(xe->drm.dev, addr))
2397 goto err_fault;
2398
2399 pagemap_addr[i] =
2400 drm_pagemap_addr_encode(addr,
2401 DRM_INTERCONNECT_SYSTEM,
2402 0, dir);
2403 buf += PAGE_SIZE;
2404 }
2405
2406 return pagemap_addr;
2407
2408err_fault:
2409 xe_migrate_dma_unmap(xe, pagemap_addr, len, write);
2410 return ERR_PTR(-EFAULT);
2411}
2412
2413/**
2414 * xe_migrate_access_memory - Access memory of a BO via GPU
2415 *
2416 * @m: The migration context.
2417 * @bo: buffer object
2418 * @offset: access offset into buffer object
2419 * @buf: pointer to caller memory to read into or write from
2420 * @len: length of access
2421 * @write: write access
2422 *
2423 * Access memory of a BO via GPU either reading in or writing from a passed in
2424 * pointer. Pointer is dma mapped for GPU access and GPU commands are issued to
2425 * read to or write from pointer.
2426 *
2427 * Returns:
2428 * 0 if successful, negative error code on failure.
2429 */
2430int xe_migrate_access_memory(struct xe_migrate *m, struct xe_bo *bo,
2431 unsigned long offset, void *buf, int len,
2432 int write)
2433{
2434 struct xe_tile *tile = m->tile;
2435 struct xe_device *xe = tile_to_xe(tile);
2436 struct xe_res_cursor cursor;
2437 struct dma_fence *fence = NULL;
2438 struct drm_pagemap_addr *pagemap_addr;
2439 unsigned long page_offset = (unsigned long)buf & ~PAGE_MASK;
2440 int bytes_left = len, current_page = 0;
2441 void *orig_buf = buf;
2442
2443 xe_bo_assert_held(bo);
2444
2445 /* Use bounce buffer for small access and unaligned access */
2446 if (!xe->info.has_mem_copy_instr &&
2447 (!IS_ALIGNED(len, 4) ||
2448 !IS_ALIGNED(page_offset, XE_CACHELINE_BYTES) ||
2449 !IS_ALIGNED(offset, XE_CACHELINE_BYTES))) {
2450 int buf_offset = 0;
2451 void *bounce;
2452 int err;
2453
2454 BUILD_BUG_ON(!is_power_of_2(XE_CACHELINE_BYTES));
2455 bounce = kmalloc(XE_CACHELINE_BYTES, GFP_KERNEL);
2456 if (!bounce)
2457 return -ENOMEM;
2458
2459 /*
2460 * Less than ideal for large unaligned access but this should be
2461 * fairly rare, can fixup if this becomes common.
2462 */
2463 do {
2464 int copy_bytes = min_t(int, bytes_left,
2465 XE_CACHELINE_BYTES -
2466 (offset & XE_CACHELINE_MASK));
2467 int ptr_offset = offset & XE_CACHELINE_MASK;
2468
2469 err = xe_migrate_access_memory(m, bo,
2470 offset &
2471 ~XE_CACHELINE_MASK,
2472 bounce,
2473 XE_CACHELINE_BYTES, 0);
2474 if (err)
2475 break;
2476
2477 if (write) {
2478 memcpy(bounce + ptr_offset, buf + buf_offset, copy_bytes);
2479
2480 err = xe_migrate_access_memory(m, bo,
2481 offset & ~XE_CACHELINE_MASK,
2482 bounce,
2483 XE_CACHELINE_BYTES, write);
2484 if (err)
2485 break;
2486 } else {
2487 memcpy(buf + buf_offset, bounce + ptr_offset,
2488 copy_bytes);
2489 }
2490
2491 bytes_left -= copy_bytes;
2492 buf_offset += copy_bytes;
2493 offset += copy_bytes;
2494 } while (bytes_left);
2495
2496 kfree(bounce);
2497 return err;
2498 }
2499
2500 pagemap_addr = xe_migrate_dma_map(xe, buf, len + page_offset, write);
2501 if (IS_ERR(pagemap_addr))
2502 return PTR_ERR(pagemap_addr);
2503
2504 xe_res_first(bo->ttm.resource, offset, xe_bo_size(bo) - offset, &cursor);
2505
2506 do {
2507 struct dma_fence *__fence;
2508 u64 vram_addr = vram_region_gpu_offset(bo->ttm.resource) +
2509 cursor.start;
2510 int current_bytes;
2511 u32 pitch;
2512
2513 if (cursor.size > MAX_PREEMPTDISABLE_TRANSFER)
2514 current_bytes = min_t(int, bytes_left,
2515 MAX_PREEMPTDISABLE_TRANSFER);
2516 else
2517 current_bytes = min_t(int, bytes_left, cursor.size);
2518
2519 pitch = xe_migrate_copy_pitch(xe, current_bytes);
2520 if (xe->info.has_mem_copy_instr)
2521 current_bytes = min_t(int, current_bytes, U16_MAX * pitch);
2522 else
2523 current_bytes = min_t(int, current_bytes,
2524 round_down(S16_MAX * pitch,
2525 XE_CACHELINE_BYTES));
2526
2527 __fence = xe_migrate_vram(m, current_bytes,
2528 (unsigned long)buf & ~PAGE_MASK,
2529 &pagemap_addr[current_page],
2530 vram_addr, NULL, write ?
2531 XE_MIGRATE_COPY_TO_VRAM :
2532 XE_MIGRATE_COPY_TO_SRAM);
2533 if (IS_ERR(__fence)) {
2534 if (fence) {
2535 dma_fence_wait(fence, false);
2536 dma_fence_put(fence);
2537 }
2538 fence = __fence;
2539 goto out_err;
2540 }
2541
2542 dma_fence_put(fence);
2543 fence = __fence;
2544
2545 buf += current_bytes;
2546 offset += current_bytes;
2547 current_page = (int)(buf - orig_buf) / PAGE_SIZE;
2548 bytes_left -= current_bytes;
2549 if (bytes_left)
2550 xe_res_next(&cursor, current_bytes);
2551 } while (bytes_left);
2552
2553 dma_fence_wait(fence, false);
2554 dma_fence_put(fence);
2555
2556out_err:
2557 xe_migrate_dma_unmap(xe, pagemap_addr, len + page_offset, write);
2558 return IS_ERR(fence) ? PTR_ERR(fence) : 0;
2559}
2560
2561/**
2562 * xe_migrate_job_lock() - Lock migrate job lock
2563 * @m: The migration context.
2564 * @q: Queue associated with the operation which requires a lock
2565 *
2566 * Lock the migrate job lock if the queue is a migration queue, otherwise
2567 * assert the VM's dma-resv is held (user queue's have own locking).
2568 */
2569void xe_migrate_job_lock(struct xe_migrate *m, struct xe_exec_queue *q)
2570{
2571 bool is_migrate = q == m->q;
2572
2573 if (is_migrate)
2574 mutex_lock(&m->job_mutex);
2575 else
2576 xe_vm_assert_held(q->user_vm); /* User queues VM's should be locked */
2577}
2578
2579/**
2580 * xe_migrate_job_unlock() - Unlock migrate job lock
2581 * @m: The migration context.
2582 * @q: Queue associated with the operation which requires a lock
2583 *
2584 * Unlock the migrate job lock if the queue is a migration queue, otherwise
2585 * assert the VM's dma-resv is held (user queue's have own locking).
2586 */
2587void xe_migrate_job_unlock(struct xe_migrate *m, struct xe_exec_queue *q)
2588{
2589 bool is_migrate = q == m->q;
2590
2591 if (is_migrate)
2592 mutex_unlock(&m->job_mutex);
2593 else
2594 xe_vm_assert_held(q->user_vm); /* User queues VM's should be locked */
2595}
2596
2597#if IS_ENABLED(CONFIG_PROVE_LOCKING)
2598/**
2599 * xe_migrate_job_lock_assert() - Assert migrate job lock held of queue
2600 * @q: Migrate queue
2601 */
2602void xe_migrate_job_lock_assert(struct xe_exec_queue *q)
2603{
2604 struct xe_migrate *m = gt_to_tile(q->gt)->migrate;
2605
2606 xe_gt_assert(q->gt, q == m->q);
2607 lockdep_assert_held(&m->job_mutex);
2608}
2609#endif
2610
2611#if IS_ENABLED(CONFIG_DRM_XE_KUNIT_TEST)
2612#include "tests/xe_migrate.c"
2613#endif