Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0 AND MIT
2/*
3 * Copyright © 2024 Intel Corporation
4 */
5
6#include <kunit/static_stub.h>
7#include <kunit/test.h>
8#include <kunit/test-bug.h>
9
10#include "xe_device.h"
11#include "xe_ggtt.h"
12#include "xe_guc_ct.h"
13#include "xe_kunit_helpers.h"
14#include "xe_pci_test.h"
15
16#define DUT_GGTT_START SZ_1M
17#define DUT_GGTT_SIZE SZ_2M
18
19static struct xe_bo *replacement_xe_managed_bo_create_pin_map(struct xe_device *xe,
20 struct xe_tile *tile,
21 size_t size, u32 flags)
22{
23 struct kunit *test = kunit_get_current_test();
24 struct xe_bo *bo;
25 void *buf;
26
27 bo = drmm_kzalloc(&xe->drm, sizeof(*bo), GFP_KERNEL);
28 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, bo);
29
30 buf = drmm_kzalloc(&xe->drm, size, GFP_KERNEL);
31 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, buf);
32
33 bo->tile = tile;
34 bo->ttm.bdev = &xe->ttm;
35 bo->ttm.base.size = size;
36 iosys_map_set_vaddr(&bo->vmap, buf);
37
38 if (flags & XE_BO_FLAG_GGTT) {
39 struct xe_ggtt *ggtt = tile->mem.ggtt;
40
41 bo->ggtt_node[tile->id] = xe_ggtt_insert_node(ggtt, xe_bo_size(bo), SZ_4K);
42 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, bo->ggtt_node[tile->id]);
43 }
44
45 return bo;
46}
47
48static int guc_buf_test_init(struct kunit *test)
49{
50 struct xe_pci_fake_data fake = {
51 .sriov_mode = XE_SRIOV_MODE_PF,
52 .platform = XE_TIGERLAKE, /* some random platform */
53 .subplatform = XE_SUBPLATFORM_NONE,
54 };
55 struct xe_ggtt *ggtt;
56 struct xe_guc *guc;
57
58 test->priv = &fake;
59 xe_kunit_helper_xe_device_test_init(test);
60
61 ggtt = xe_device_get_root_tile(test->priv)->mem.ggtt;
62 guc = &xe_device_get_gt(test->priv, 0)->uc.guc;
63
64 KUNIT_ASSERT_EQ(test, 0,
65 xe_ggtt_init_kunit(ggtt, DUT_GGTT_START,
66 DUT_GGTT_SIZE));
67
68 kunit_activate_static_stub(test, xe_managed_bo_create_pin_map,
69 replacement_xe_managed_bo_create_pin_map);
70
71 KUNIT_ASSERT_EQ(test, 0, xe_guc_buf_cache_init(&guc->buf));
72
73 test->priv = &guc->buf;
74 return 0;
75}
76
77static void test_smallest(struct kunit *test)
78{
79 struct xe_guc_buf_cache *cache = test->priv;
80 struct xe_guc_buf buf;
81
82 buf = xe_guc_buf_reserve(cache, 1);
83 KUNIT_ASSERT_TRUE(test, xe_guc_buf_is_valid(buf));
84 KUNIT_EXPECT_NOT_NULL(test, xe_guc_buf_cpu_ptr(buf));
85 KUNIT_EXPECT_NE(test, 0, xe_guc_buf_gpu_addr(buf));
86 KUNIT_EXPECT_LE(test, DUT_GGTT_START, xe_guc_buf_gpu_addr(buf));
87 KUNIT_EXPECT_GT(test, DUT_GGTT_START + DUT_GGTT_SIZE, xe_guc_buf_gpu_addr(buf));
88 xe_guc_buf_release(buf);
89}
90
91static void test_largest(struct kunit *test)
92{
93 struct xe_guc_buf_cache *cache = test->priv;
94 struct xe_guc_buf buf;
95
96 buf = xe_guc_buf_reserve(cache, xe_guc_buf_cache_dwords(cache));
97 KUNIT_ASSERT_TRUE(test, xe_guc_buf_is_valid(buf));
98 KUNIT_EXPECT_NOT_NULL(test, xe_guc_buf_cpu_ptr(buf));
99 KUNIT_EXPECT_NE(test, 0, xe_guc_buf_gpu_addr(buf));
100 KUNIT_EXPECT_LE(test, DUT_GGTT_START, xe_guc_buf_gpu_addr(buf));
101 KUNIT_EXPECT_GT(test, DUT_GGTT_START + DUT_GGTT_SIZE, xe_guc_buf_gpu_addr(buf));
102 xe_guc_buf_release(buf);
103}
104
105static void test_granular(struct kunit *test)
106{
107 struct xe_guc_buf_cache *cache = test->priv;
108 struct xe_guc_buf *bufs;
109 int n, dwords;
110
111 dwords = xe_guc_buf_cache_dwords(cache);
112 bufs = kunit_kcalloc(test, dwords, sizeof(*bufs), GFP_KERNEL);
113 KUNIT_EXPECT_NOT_NULL(test, bufs);
114
115 for (n = 0; n < dwords; n++)
116 bufs[n] = xe_guc_buf_reserve(cache, 1);
117
118 for (n = 0; n < dwords; n++)
119 KUNIT_EXPECT_TRUE_MSG(test, xe_guc_buf_is_valid(bufs[n]), "n=%d", n);
120
121 for (n = 0; n < dwords; n++)
122 xe_guc_buf_release(bufs[n]);
123}
124
125static void test_unique(struct kunit *test)
126{
127 struct xe_guc_buf_cache *cache = test->priv;
128 struct xe_guc_buf *bufs;
129 int n, m, dwords;
130
131 dwords = xe_guc_buf_cache_dwords(cache);
132 bufs = kunit_kcalloc(test, dwords, sizeof(*bufs), GFP_KERNEL);
133 KUNIT_EXPECT_NOT_NULL(test, bufs);
134
135 for (n = 0; n < dwords; n++)
136 bufs[n] = xe_guc_buf_reserve(cache, 1);
137
138 for (n = 0; n < dwords; n++) {
139 for (m = n + 1; m < dwords; m++) {
140 KUNIT_EXPECT_PTR_NE_MSG(test, xe_guc_buf_cpu_ptr(bufs[n]),
141 xe_guc_buf_cpu_ptr(bufs[m]), "n=%d, m=%d", n, m);
142 KUNIT_ASSERT_NE_MSG(test, xe_guc_buf_gpu_addr(bufs[n]),
143 xe_guc_buf_gpu_addr(bufs[m]), "n=%d, m=%d", n, m);
144 }
145 }
146
147 for (n = 0; n < dwords; n++)
148 xe_guc_buf_release(bufs[n]);
149}
150
151static void test_overlap(struct kunit *test)
152{
153 struct xe_guc_buf_cache *cache = test->priv;
154 struct xe_guc_buf b1, b2;
155 u32 dwords = xe_guc_buf_cache_dwords(cache) / 2;
156 u32 bytes = dwords * sizeof(u32);
157 void *p1, *p2;
158 u64 a1, a2;
159
160 b1 = xe_guc_buf_reserve(cache, dwords);
161 b2 = xe_guc_buf_reserve(cache, dwords);
162
163 p1 = xe_guc_buf_cpu_ptr(b1);
164 p2 = xe_guc_buf_cpu_ptr(b2);
165
166 a1 = xe_guc_buf_gpu_addr(b1);
167 a2 = xe_guc_buf_gpu_addr(b2);
168
169 KUNIT_EXPECT_PTR_NE(test, p1, p2);
170 if (p1 < p2)
171 KUNIT_EXPECT_LT(test, (uintptr_t)(p1 + bytes - 1), (uintptr_t)p2);
172 else
173 KUNIT_EXPECT_LT(test, (uintptr_t)(p2 + bytes - 1), (uintptr_t)p1);
174
175 KUNIT_EXPECT_NE(test, a1, a2);
176 if (a1 < a2)
177 KUNIT_EXPECT_LT(test, a1 + bytes - 1, a2);
178 else
179 KUNIT_EXPECT_LT(test, a2 + bytes - 1, a1);
180
181 xe_guc_buf_release(b1);
182 xe_guc_buf_release(b2);
183}
184
185static void test_reusable(struct kunit *test)
186{
187 struct xe_guc_buf_cache *cache = test->priv;
188 struct xe_guc_buf b1, b2;
189 void *p1;
190 u64 a1;
191
192 b1 = xe_guc_buf_reserve(cache, xe_guc_buf_cache_dwords(cache));
193 KUNIT_ASSERT_TRUE(test, xe_guc_buf_is_valid(b1));
194 KUNIT_EXPECT_NOT_NULL(test, p1 = xe_guc_buf_cpu_ptr(b1));
195 KUNIT_EXPECT_NE(test, 0, a1 = xe_guc_buf_gpu_addr(b1));
196 xe_guc_buf_release(b1);
197
198 b2 = xe_guc_buf_reserve(cache, xe_guc_buf_cache_dwords(cache));
199 KUNIT_EXPECT_PTR_EQ(test, p1, xe_guc_buf_cpu_ptr(b2));
200 KUNIT_EXPECT_EQ(test, a1, xe_guc_buf_gpu_addr(b2));
201 xe_guc_buf_release(b2);
202}
203
204static void test_too_big(struct kunit *test)
205{
206 struct xe_guc_buf_cache *cache = test->priv;
207 struct xe_guc_buf buf;
208
209 buf = xe_guc_buf_reserve(cache, xe_guc_buf_cache_dwords(cache) + 1);
210 KUNIT_EXPECT_FALSE(test, xe_guc_buf_is_valid(buf));
211 xe_guc_buf_release(buf); /* shouldn't crash */
212}
213
214static void test_flush(struct kunit *test)
215{
216 struct xe_guc_buf_cache *cache = test->priv;
217 struct xe_guc_buf buf;
218 const u32 dwords = xe_guc_buf_cache_dwords(cache);
219 const u32 bytes = dwords * sizeof(u32);
220 u32 *s, *p, *d;
221 int n;
222
223 KUNIT_ASSERT_NOT_NULL(test, s = kunit_kcalloc(test, dwords, sizeof(u32), GFP_KERNEL));
224 KUNIT_ASSERT_NOT_NULL(test, d = kunit_kcalloc(test, dwords, sizeof(u32), GFP_KERNEL));
225
226 for (n = 0; n < dwords; n++)
227 s[n] = n;
228
229 buf = xe_guc_buf_reserve(cache, dwords);
230 KUNIT_ASSERT_TRUE(test, xe_guc_buf_is_valid(buf));
231 KUNIT_ASSERT_NOT_NULL(test, p = xe_guc_buf_cpu_ptr(buf));
232 KUNIT_EXPECT_PTR_NE(test, p, s);
233 KUNIT_EXPECT_PTR_NE(test, p, d);
234
235 memcpy(p, s, bytes);
236 KUNIT_EXPECT_NE(test, 0, xe_guc_buf_flush(buf));
237
238 iosys_map_memcpy_from(d, &cache->sam->bo->vmap, 0, bytes);
239 KUNIT_EXPECT_MEMEQ(test, s, d, bytes);
240
241 xe_guc_buf_release(buf);
242}
243
244static void test_lookup(struct kunit *test)
245{
246 struct xe_guc_buf_cache *cache = test->priv;
247 struct xe_guc_buf buf;
248 u32 dwords;
249 u64 addr;
250 u32 *p;
251 int n;
252
253 dwords = xe_guc_buf_cache_dwords(cache);
254 buf = xe_guc_buf_reserve(cache, dwords);
255 KUNIT_ASSERT_TRUE(test, xe_guc_buf_is_valid(buf));
256 KUNIT_ASSERT_NOT_NULL(test, p = xe_guc_buf_cpu_ptr(buf));
257 KUNIT_ASSERT_NE(test, 0, addr = xe_guc_buf_gpu_addr(buf));
258
259 KUNIT_EXPECT_EQ(test, 0, xe_guc_cache_gpu_addr_from_ptr(cache, p - 1, sizeof(u32)));
260 KUNIT_EXPECT_EQ(test, 0, xe_guc_cache_gpu_addr_from_ptr(cache, p + dwords, sizeof(u32)));
261
262 for (n = 0; n < dwords; n++)
263 KUNIT_EXPECT_EQ_MSG(test, xe_guc_cache_gpu_addr_from_ptr(cache, p + n, sizeof(u32)),
264 addr + n * sizeof(u32), "n=%d", n);
265
266 xe_guc_buf_release(buf);
267}
268
269static void test_data(struct kunit *test)
270{
271 static const u32 data[] = { 1, 2, 3, 4, 5, 6 };
272 struct xe_guc_buf_cache *cache = test->priv;
273 struct xe_guc_buf buf;
274 void *p;
275
276 buf = xe_guc_buf_from_data(cache, data, sizeof(data));
277 KUNIT_ASSERT_TRUE(test, xe_guc_buf_is_valid(buf));
278 KUNIT_ASSERT_NOT_NULL(test, p = xe_guc_buf_cpu_ptr(buf));
279 KUNIT_EXPECT_MEMEQ(test, p, data, sizeof(data));
280
281 xe_guc_buf_release(buf);
282}
283
284static void test_class(struct kunit *test)
285{
286 struct xe_guc_buf_cache *cache = test->priv;
287 u32 dwords = xe_guc_buf_cache_dwords(cache);
288
289 {
290 CLASS(xe_guc_buf, buf)(cache, dwords);
291 KUNIT_ASSERT_TRUE(test, xe_guc_buf_is_valid(buf));
292 KUNIT_EXPECT_NOT_NULL(test, xe_guc_buf_cpu_ptr(buf));
293 KUNIT_EXPECT_NE(test, 0, xe_guc_buf_gpu_addr(buf));
294 KUNIT_EXPECT_LE(test, DUT_GGTT_START, xe_guc_buf_gpu_addr(buf));
295 KUNIT_EXPECT_GT(test, DUT_GGTT_START + DUT_GGTT_SIZE, xe_guc_buf_gpu_addr(buf));
296 }
297
298 {
299 CLASS(xe_guc_buf, buf)(cache, dwords);
300 KUNIT_ASSERT_TRUE(test, xe_guc_buf_is_valid(buf));
301 KUNIT_EXPECT_NOT_NULL(test, xe_guc_buf_cpu_ptr(buf));
302 KUNIT_EXPECT_NE(test, 0, xe_guc_buf_gpu_addr(buf));
303 KUNIT_EXPECT_LE(test, DUT_GGTT_START, xe_guc_buf_gpu_addr(buf));
304 KUNIT_EXPECT_GT(test, DUT_GGTT_START + DUT_GGTT_SIZE, xe_guc_buf_gpu_addr(buf));
305 }
306}
307
308static struct kunit_case guc_buf_test_cases[] = {
309 KUNIT_CASE(test_smallest),
310 KUNIT_CASE(test_largest),
311 KUNIT_CASE(test_granular),
312 KUNIT_CASE(test_unique),
313 KUNIT_CASE(test_overlap),
314 KUNIT_CASE(test_reusable),
315 KUNIT_CASE(test_too_big),
316 KUNIT_CASE(test_flush),
317 KUNIT_CASE(test_lookup),
318 KUNIT_CASE(test_data),
319 KUNIT_CASE(test_class),
320 {}
321};
322
323static struct kunit_suite guc_buf_suite = {
324 .name = "guc_buf",
325 .test_cases = guc_buf_test_cases,
326 .init = guc_buf_test_init,
327};
328
329kunit_test_suites(&guc_buf_suite);