Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: MIT
2/*
3 * Copyright © 2021 Intel Corporation
4 */
5
6#include "xe_hw_fence.h"
7
8#include <linux/device.h>
9#include <linux/slab.h>
10
11#include "xe_device_types.h"
12#include "xe_hw_engine.h"
13#include "xe_macros.h"
14#include "xe_map.h"
15#include "xe_trace.h"
16
17static struct kmem_cache *xe_hw_fence_slab;
18
19int __init xe_hw_fence_module_init(void)
20{
21 xe_hw_fence_slab = kmem_cache_create("xe_hw_fence",
22 sizeof(struct xe_hw_fence), 0,
23 SLAB_HWCACHE_ALIGN, NULL);
24 if (!xe_hw_fence_slab)
25 return -ENOMEM;
26
27 return 0;
28}
29
30void xe_hw_fence_module_exit(void)
31{
32 rcu_barrier();
33 kmem_cache_destroy(xe_hw_fence_slab);
34}
35
36static struct xe_hw_fence *fence_alloc(void)
37{
38 return kmem_cache_zalloc(xe_hw_fence_slab, GFP_KERNEL);
39}
40
41static void fence_free(struct rcu_head *rcu)
42{
43 struct xe_hw_fence *fence =
44 container_of(rcu, struct xe_hw_fence, dma.rcu);
45
46 if (!WARN_ON_ONCE(!fence))
47 kmem_cache_free(xe_hw_fence_slab, fence);
48}
49
50static void hw_fence_irq_run_cb(struct irq_work *work)
51{
52 struct xe_hw_fence_irq *irq = container_of(work, typeof(*irq), work);
53 struct xe_hw_fence *fence, *next;
54 bool tmp;
55
56 tmp = dma_fence_begin_signalling();
57 spin_lock(&irq->lock);
58 if (irq->enabled) {
59 list_for_each_entry_safe(fence, next, &irq->pending, irq_link) {
60 struct dma_fence *dma_fence = &fence->dma;
61
62 trace_xe_hw_fence_try_signal(fence);
63 if (dma_fence_is_signaled_locked(dma_fence)) {
64 trace_xe_hw_fence_signal(fence);
65 list_del_init(&fence->irq_link);
66 dma_fence_put(dma_fence);
67 }
68 }
69 }
70 spin_unlock(&irq->lock);
71 dma_fence_end_signalling(tmp);
72}
73
74void xe_hw_fence_irq_init(struct xe_hw_fence_irq *irq)
75{
76 spin_lock_init(&irq->lock);
77 init_irq_work(&irq->work, hw_fence_irq_run_cb);
78 INIT_LIST_HEAD(&irq->pending);
79 irq->enabled = true;
80}
81
82void xe_hw_fence_irq_finish(struct xe_hw_fence_irq *irq)
83{
84 struct xe_hw_fence *fence, *next;
85 unsigned long flags;
86 bool tmp;
87
88 if (XE_WARN_ON(!list_empty(&irq->pending))) {
89 tmp = dma_fence_begin_signalling();
90 spin_lock_irqsave(&irq->lock, flags);
91 list_for_each_entry_safe(fence, next, &irq->pending, irq_link) {
92 list_del_init(&fence->irq_link);
93 XE_WARN_ON(dma_fence_check_and_signal_locked(&fence->dma));
94 dma_fence_put(&fence->dma);
95 }
96 spin_unlock_irqrestore(&irq->lock, flags);
97 dma_fence_end_signalling(tmp);
98 }
99
100 /* Safe release of the irq->lock used in dma_fence_init. */
101 synchronize_rcu();
102}
103
104void xe_hw_fence_irq_run(struct xe_hw_fence_irq *irq)
105{
106 irq_work_queue(&irq->work);
107}
108
109void xe_hw_fence_ctx_init(struct xe_hw_fence_ctx *ctx, struct xe_gt *gt,
110 struct xe_hw_fence_irq *irq, const char *name)
111{
112 ctx->gt = gt;
113 ctx->irq = irq;
114 ctx->dma_fence_ctx = dma_fence_context_alloc(1);
115 ctx->next_seqno = XE_FENCE_INITIAL_SEQNO;
116 snprintf(ctx->name, sizeof(ctx->name), "%s", name);
117}
118
119void xe_hw_fence_ctx_finish(struct xe_hw_fence_ctx *ctx)
120{
121}
122
123static struct xe_hw_fence *to_xe_hw_fence(struct dma_fence *fence);
124
125static struct xe_hw_fence_irq *xe_hw_fence_irq(struct xe_hw_fence *fence)
126{
127 return container_of(fence->dma.extern_lock, struct xe_hw_fence_irq,
128 lock);
129}
130
131static const char *xe_hw_fence_get_driver_name(struct dma_fence *dma_fence)
132{
133 struct xe_hw_fence *fence = to_xe_hw_fence(dma_fence);
134
135 return dev_name(fence->xe->drm.dev);
136}
137
138static const char *xe_hw_fence_get_timeline_name(struct dma_fence *dma_fence)
139{
140 struct xe_hw_fence *fence = to_xe_hw_fence(dma_fence);
141
142 return fence->name;
143}
144
145static bool xe_hw_fence_signaled(struct dma_fence *dma_fence)
146{
147 struct xe_hw_fence *fence = to_xe_hw_fence(dma_fence);
148 struct xe_device *xe = fence->xe;
149 u32 seqno = xe_map_rd(xe, &fence->seqno_map, 0, u32);
150
151 return dma_fence->error ||
152 !__dma_fence_is_later(dma_fence, dma_fence->seqno, seqno);
153}
154
155static bool xe_hw_fence_enable_signaling(struct dma_fence *dma_fence)
156{
157 struct xe_hw_fence *fence = to_xe_hw_fence(dma_fence);
158 struct xe_hw_fence_irq *irq = xe_hw_fence_irq(fence);
159
160 dma_fence_get(dma_fence);
161 list_add_tail(&fence->irq_link, &irq->pending);
162
163 /* SW completed (no HW IRQ) so kick handler to signal fence */
164 if (xe_hw_fence_signaled(dma_fence))
165 xe_hw_fence_irq_run(irq);
166
167 return true;
168}
169
170static void xe_hw_fence_release(struct dma_fence *dma_fence)
171{
172 struct xe_hw_fence *fence = to_xe_hw_fence(dma_fence);
173
174 XE_WARN_ON(!list_empty(&fence->irq_link));
175 call_rcu(&dma_fence->rcu, fence_free);
176}
177
178static const struct dma_fence_ops xe_hw_fence_ops = {
179 .get_driver_name = xe_hw_fence_get_driver_name,
180 .get_timeline_name = xe_hw_fence_get_timeline_name,
181 .enable_signaling = xe_hw_fence_enable_signaling,
182 .signaled = xe_hw_fence_signaled,
183 .release = xe_hw_fence_release,
184};
185
186static struct xe_hw_fence *to_xe_hw_fence(struct dma_fence *fence)
187{
188 if (XE_WARN_ON(fence->ops != &xe_hw_fence_ops))
189 return NULL;
190
191 return container_of(fence, struct xe_hw_fence, dma);
192}
193
194/**
195 * xe_hw_fence_alloc() - Allocate an hw fence.
196 *
197 * Allocate but don't initialize an hw fence.
198 *
199 * Return: Pointer to the allocated fence or
200 * negative error pointer on error.
201 */
202struct dma_fence *xe_hw_fence_alloc(void)
203{
204 struct xe_hw_fence *hw_fence = fence_alloc();
205
206 if (!hw_fence)
207 return ERR_PTR(-ENOMEM);
208
209 return &hw_fence->dma;
210}
211
212/**
213 * xe_hw_fence_free() - Free an hw fence.
214 * @fence: Pointer to the fence to free.
215 *
216 * Frees an hw fence that hasn't yet been
217 * initialized.
218 */
219void xe_hw_fence_free(struct dma_fence *fence)
220{
221 fence_free(&fence->rcu);
222}
223
224/**
225 * xe_hw_fence_init() - Initialize an hw fence.
226 * @fence: Pointer to the fence to initialize.
227 * @ctx: Pointer to the struct xe_hw_fence_ctx fence context.
228 * @seqno_map: Pointer to the map into where the seqno is blitted.
229 *
230 * Initializes a pre-allocated hw fence.
231 * After initialization, the fence is subject to normal
232 * dma-fence refcounting.
233 */
234void xe_hw_fence_init(struct dma_fence *fence, struct xe_hw_fence_ctx *ctx,
235 struct iosys_map seqno_map)
236{
237 struct xe_hw_fence *hw_fence =
238 container_of(fence, typeof(*hw_fence), dma);
239
240 hw_fence->xe = gt_to_xe(ctx->gt);
241 snprintf(hw_fence->name, sizeof(hw_fence->name), "%s", ctx->name);
242 hw_fence->seqno_map = seqno_map;
243 INIT_LIST_HEAD(&hw_fence->irq_link);
244
245 dma_fence_init(fence, &xe_hw_fence_ops, &ctx->irq->lock,
246 ctx->dma_fence_ctx, ctx->next_seqno++);
247
248 trace_xe_hw_fence_create(hw_fence);
249}