Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0-only OR MIT
2/* Copyright (c) 2023 Imagination Technologies Ltd. */
3
4#include "pvr_ccb.h"
5#include "pvr_device.h"
6#include "pvr_drv.h"
7#include "pvr_dump.h"
8#include "pvr_free_list.h"
9#include "pvr_fw.h"
10#include "pvr_gem.h"
11#include "pvr_power.h"
12
13#include <drm/drm_managed.h>
14#include <drm/drm_print.h>
15#include <linux/compiler.h>
16#include <linux/delay.h>
17#include <linux/jiffies.h>
18#include <linux/kernel.h>
19#include <linux/mutex.h>
20#include <linux/types.h>
21#include <linux/workqueue.h>
22
23#define RESERVE_SLOT_TIMEOUT (1 * HZ) /* 1s */
24#define RESERVE_SLOT_MIN_RETRIES 10
25
26static void
27ccb_ctrl_init(void *cpu_ptr, void *priv)
28{
29 struct rogue_fwif_ccb_ctl *ctrl = cpu_ptr;
30 struct pvr_ccb *pvr_ccb = priv;
31
32 ctrl->write_offset = 0;
33 ctrl->read_offset = 0;
34 ctrl->wrap_mask = pvr_ccb->num_cmds - 1;
35 ctrl->cmd_size = pvr_ccb->cmd_size;
36}
37
38/**
39 * pvr_ccb_init() - Initialise a CCB
40 * @pvr_dev: Device pointer.
41 * @pvr_ccb: Pointer to CCB structure to initialise.
42 * @num_cmds_log2: Log2 of number of commands in this CCB.
43 * @cmd_size: Command size for this CCB.
44 *
45 * Return:
46 * * Zero on success, or
47 * * Any error code returned by pvr_fw_object_create_and_map().
48 */
49static int
50pvr_ccb_init(struct pvr_device *pvr_dev, struct pvr_ccb *pvr_ccb,
51 u32 num_cmds_log2, size_t cmd_size)
52{
53 u32 num_cmds = 1 << num_cmds_log2;
54 u32 ccb_size = num_cmds * cmd_size;
55 int err;
56
57 pvr_ccb->num_cmds = num_cmds;
58 pvr_ccb->cmd_size = cmd_size;
59
60 err = drmm_mutex_init(from_pvr_device(pvr_dev), &pvr_ccb->lock);
61 if (err)
62 return err;
63
64 /*
65 * Map CCB and control structure as uncached, so we don't have to flush
66 * CPU cache repeatedly when polling for space.
67 */
68 pvr_ccb->ctrl = pvr_fw_object_create_and_map(pvr_dev, sizeof(*pvr_ccb->ctrl),
69 PVR_BO_FW_FLAGS_DEVICE_UNCACHED,
70 ccb_ctrl_init, pvr_ccb, &pvr_ccb->ctrl_obj);
71 if (IS_ERR(pvr_ccb->ctrl))
72 return PTR_ERR(pvr_ccb->ctrl);
73
74 pvr_ccb->ccb = pvr_fw_object_create_and_map(pvr_dev, ccb_size,
75 PVR_BO_FW_FLAGS_DEVICE_UNCACHED,
76 NULL, NULL, &pvr_ccb->ccb_obj);
77 if (IS_ERR(pvr_ccb->ccb)) {
78 err = PTR_ERR(pvr_ccb->ccb);
79 goto err_free_ctrl;
80 }
81
82 pvr_fw_object_get_fw_addr(pvr_ccb->ctrl_obj, &pvr_ccb->ctrl_fw_addr);
83 pvr_fw_object_get_fw_addr(pvr_ccb->ccb_obj, &pvr_ccb->ccb_fw_addr);
84
85 WRITE_ONCE(pvr_ccb->ctrl->write_offset, 0);
86 WRITE_ONCE(pvr_ccb->ctrl->read_offset, 0);
87 WRITE_ONCE(pvr_ccb->ctrl->wrap_mask, num_cmds - 1);
88 WRITE_ONCE(pvr_ccb->ctrl->cmd_size, cmd_size);
89
90 return 0;
91
92err_free_ctrl:
93 pvr_fw_object_unmap_and_destroy(pvr_ccb->ctrl_obj);
94
95 return err;
96}
97
98/**
99 * pvr_ccb_fini() - Release CCB structure
100 * @pvr_ccb: CCB to release.
101 */
102void
103pvr_ccb_fini(struct pvr_ccb *pvr_ccb)
104{
105 pvr_fw_object_unmap_and_destroy(pvr_ccb->ccb_obj);
106 pvr_fw_object_unmap_and_destroy(pvr_ccb->ctrl_obj);
107}
108
109/**
110 * pvr_ccb_slot_available_locked() - Test whether any slots are available in CCB
111 * @pvr_ccb: CCB to test.
112 * @write_offset: Address to store number of next available slot. May be %NULL.
113 *
114 * Caller must hold @pvr_ccb->lock.
115 *
116 * Return:
117 * * %true if a slot is available, or
118 * * %false if no slot is available.
119 */
120static __always_inline bool
121pvr_ccb_slot_available_locked(struct pvr_ccb *pvr_ccb, u32 *write_offset)
122{
123 struct rogue_fwif_ccb_ctl *ctrl = pvr_ccb->ctrl;
124 u32 next_write_offset = (READ_ONCE(ctrl->write_offset) + 1) & READ_ONCE(ctrl->wrap_mask);
125
126 lockdep_assert_held(&pvr_ccb->lock);
127
128 if (READ_ONCE(ctrl->read_offset) != next_write_offset) {
129 if (write_offset)
130 *write_offset = next_write_offset;
131 return true;
132 }
133
134 return false;
135}
136
137static void
138process_fwccb_command(struct pvr_device *pvr_dev, struct rogue_fwif_fwccb_cmd *cmd)
139{
140 struct drm_device *drm_dev = from_pvr_device(pvr_dev);
141
142 if ((cmd->cmd_type & ROGUE_CMD_MAGIC_DWORD_MASK) != ROGUE_CMD_MAGIC_DWORD_SHIFTED) {
143 drm_warn_once(drm_dev, "Received FWCCB command with bad magic value; ignoring (type=0x%08x)\n",
144 cmd->cmd_type);
145 return;
146 }
147
148 switch (cmd->cmd_type) {
149 case ROGUE_FWIF_FWCCB_CMD_REQUEST_GPU_RESTART:
150 pvr_power_reset(pvr_dev, false);
151 break;
152
153 case ROGUE_FWIF_FWCCB_CMD_FREELISTS_RECONSTRUCTION:
154 pvr_free_list_process_reconstruct_req(pvr_dev,
155 &cmd->cmd_data.cmd_freelists_reconstruction);
156 break;
157
158 case ROGUE_FWIF_FWCCB_CMD_FREELIST_GROW:
159 pvr_free_list_process_grow_req(pvr_dev, &cmd->cmd_data.cmd_free_list_gs);
160 break;
161
162 case ROGUE_FWIF_FWCCB_CMD_UPDATE_STATS:
163 /*
164 * We currently have no infrastructure for processing these
165 * stats. It may be added in the future, but for now just
166 * suppress the "unknown" warning when receiving this command.
167 */
168 break;
169 case ROGUE_FWIF_FWCCB_CMD_CONTEXT_RESET_NOTIFICATION:
170 pvr_dump_context_reset_notification(pvr_dev,
171 &cmd->cmd_data.cmd_context_reset_notification);
172 break;
173
174 default:
175 drm_info(drm_dev, "Received unknown FWCCB command (type=%d)\n",
176 cmd->cmd_type & ~ROGUE_CMD_MAGIC_DWORD_MASK);
177 break;
178 }
179}
180
181/**
182 * pvr_fwccb_process() - Process any pending FWCCB commands
183 * @pvr_dev: Target PowerVR device
184 */
185void pvr_fwccb_process(struct pvr_device *pvr_dev)
186{
187 struct rogue_fwif_fwccb_cmd *fwccb = pvr_dev->fwccb.ccb;
188 struct rogue_fwif_ccb_ctl *ctrl = pvr_dev->fwccb.ctrl;
189 u32 read_offset;
190
191 mutex_lock(&pvr_dev->fwccb.lock);
192
193 while ((read_offset = READ_ONCE(ctrl->read_offset)) != READ_ONCE(ctrl->write_offset)) {
194 struct rogue_fwif_fwccb_cmd cmd = fwccb[read_offset];
195
196 WRITE_ONCE(ctrl->read_offset, (read_offset + 1) & READ_ONCE(ctrl->wrap_mask));
197
198 /* Drop FWCCB lock while we process command. */
199 mutex_unlock(&pvr_dev->fwccb.lock);
200
201 process_fwccb_command(pvr_dev, &cmd);
202
203 mutex_lock(&pvr_dev->fwccb.lock);
204 }
205
206 mutex_unlock(&pvr_dev->fwccb.lock);
207}
208
209/**
210 * pvr_kccb_capacity() - Returns the maximum number of usable KCCB slots.
211 * @pvr_dev: Target PowerVR device
212 *
213 * Return:
214 * * The maximum number of active slots.
215 */
216static u32 pvr_kccb_capacity(struct pvr_device *pvr_dev)
217{
218 /* Capacity is the number of slot minus one to cope with the wrapping
219 * mechanisms. If we were to use all slots, we might end up with
220 * read_offset == write_offset, which the FW considers as a KCCB-is-empty
221 * condition.
222 */
223 return pvr_dev->kccb.slot_count - 1;
224}
225
226/**
227 * pvr_kccb_used_slot_count_locked() - Get the number of used slots
228 * @pvr_dev: Device pointer.
229 *
230 * KCCB lock must be held.
231 *
232 * Return:
233 * * The number of slots currently used.
234 */
235static u32
236pvr_kccb_used_slot_count_locked(struct pvr_device *pvr_dev)
237{
238 struct pvr_ccb *pvr_ccb = &pvr_dev->kccb.ccb;
239 struct rogue_fwif_ccb_ctl *ctrl = pvr_ccb->ctrl;
240 u32 wr_offset = READ_ONCE(ctrl->write_offset);
241 u32 rd_offset = READ_ONCE(ctrl->read_offset);
242 u32 used_count;
243
244 lockdep_assert_held(&pvr_ccb->lock);
245
246 if (wr_offset >= rd_offset)
247 used_count = wr_offset - rd_offset;
248 else
249 used_count = wr_offset + pvr_dev->kccb.slot_count - rd_offset;
250
251 return used_count;
252}
253
254/**
255 * pvr_kccb_send_cmd_reserved_powered() - Send command to the KCCB, with the PM ref
256 * held and a slot pre-reserved
257 * @pvr_dev: Device pointer.
258 * @cmd: Command to sent.
259 * @kccb_slot: Address to store the KCCB slot for this command. May be %NULL.
260 */
261void
262pvr_kccb_send_cmd_reserved_powered(struct pvr_device *pvr_dev,
263 struct rogue_fwif_kccb_cmd *cmd,
264 u32 *kccb_slot)
265{
266 struct pvr_ccb *pvr_ccb = &pvr_dev->kccb.ccb;
267 struct rogue_fwif_kccb_cmd *kccb = pvr_ccb->ccb;
268 struct rogue_fwif_ccb_ctl *ctrl = pvr_ccb->ctrl;
269 u32 old_write_offset;
270 u32 new_write_offset;
271
272 WARN_ON(pvr_dev->lost);
273
274 mutex_lock(&pvr_ccb->lock);
275
276 if (WARN_ON(!pvr_dev->kccb.reserved_count))
277 goto out_unlock;
278
279 old_write_offset = READ_ONCE(ctrl->write_offset);
280
281 /* We reserved the slot, we should have one available. */
282 if (WARN_ON(!pvr_ccb_slot_available_locked(pvr_ccb, &new_write_offset)))
283 goto out_unlock;
284
285 memcpy(&kccb[old_write_offset], cmd,
286 sizeof(struct rogue_fwif_kccb_cmd));
287 if (kccb_slot) {
288 *kccb_slot = old_write_offset;
289 /* Clear return status for this slot. */
290 WRITE_ONCE(pvr_dev->kccb.rtn[old_write_offset],
291 ROGUE_FWIF_KCCB_RTN_SLOT_NO_RESPONSE);
292 }
293 mb(); /* memory barrier */
294 WRITE_ONCE(ctrl->write_offset, new_write_offset);
295 pvr_dev->kccb.reserved_count--;
296
297 /* Kick MTS */
298 pvr_fw_mts_schedule(pvr_dev,
299 PVR_FWIF_DM_GP & ~ROGUE_CR_MTS_SCHEDULE_DM_CLRMSK);
300
301out_unlock:
302 mutex_unlock(&pvr_ccb->lock);
303}
304
305/**
306 * pvr_kccb_try_reserve_slot() - Try to reserve a KCCB slot
307 * @pvr_dev: Device pointer.
308 *
309 * Return:
310 * * true if a KCCB slot was reserved, or
311 * * false otherwise.
312 */
313static bool pvr_kccb_try_reserve_slot(struct pvr_device *pvr_dev)
314{
315 bool reserved = false;
316 u32 used_count;
317
318 mutex_lock(&pvr_dev->kccb.ccb.lock);
319
320 used_count = pvr_kccb_used_slot_count_locked(pvr_dev);
321 if (pvr_dev->kccb.reserved_count < pvr_kccb_capacity(pvr_dev) - used_count) {
322 pvr_dev->kccb.reserved_count++;
323 reserved = true;
324 }
325
326 mutex_unlock(&pvr_dev->kccb.ccb.lock);
327
328 return reserved;
329}
330
331/**
332 * pvr_kccb_reserve_slot_sync() - Try to reserve a slot synchronously
333 * @pvr_dev: Device pointer.
334 *
335 * Return:
336 * * 0 on success, or
337 * * -EBUSY if no slots were reserved after %RESERVE_SLOT_TIMEOUT, with a minimum of
338 * %RESERVE_SLOT_MIN_RETRIES retries.
339 */
340static int pvr_kccb_reserve_slot_sync(struct pvr_device *pvr_dev)
341{
342 unsigned long start_timestamp = jiffies;
343 bool reserved = false;
344 u32 retries = 0;
345
346 while (time_before(jiffies, start_timestamp + RESERVE_SLOT_TIMEOUT) ||
347 retries < RESERVE_SLOT_MIN_RETRIES) {
348 reserved = pvr_kccb_try_reserve_slot(pvr_dev);
349 if (reserved)
350 break;
351
352 usleep_range(1, 50);
353
354 if (retries < U32_MAX)
355 retries++;
356 }
357
358 return reserved ? 0 : -EBUSY;
359}
360
361/**
362 * pvr_kccb_send_cmd_powered() - Send command to the KCCB, with a PM ref held
363 * @pvr_dev: Device pointer.
364 * @cmd: Command to sent.
365 * @kccb_slot: Address to store the KCCB slot for this command. May be %NULL.
366 *
367 * Returns:
368 * * Zero on success, or
369 * * -EBUSY if timeout while waiting for a free KCCB slot.
370 */
371int
372pvr_kccb_send_cmd_powered(struct pvr_device *pvr_dev, struct rogue_fwif_kccb_cmd *cmd,
373 u32 *kccb_slot)
374{
375 int err;
376
377 err = pvr_kccb_reserve_slot_sync(pvr_dev);
378 if (err)
379 return err;
380
381 pvr_kccb_send_cmd_reserved_powered(pvr_dev, cmd, kccb_slot);
382 return 0;
383}
384
385/**
386 * pvr_kccb_send_cmd() - Send command to the KCCB
387 * @pvr_dev: Device pointer.
388 * @cmd: Command to sent.
389 * @kccb_slot: Address to store the KCCB slot for this command. May be %NULL.
390 *
391 * Returns:
392 * * Zero on success, or
393 * * -EBUSY if timeout while waiting for a free KCCB slot.
394 */
395int
396pvr_kccb_send_cmd(struct pvr_device *pvr_dev, struct rogue_fwif_kccb_cmd *cmd,
397 u32 *kccb_slot)
398{
399 int err;
400
401 err = pvr_power_get(pvr_dev);
402 if (err)
403 return err;
404
405 err = pvr_kccb_send_cmd_powered(pvr_dev, cmd, kccb_slot);
406
407 pvr_power_put(pvr_dev);
408
409 return err;
410}
411
412/**
413 * pvr_kccb_wait_for_completion() - Wait for a KCCB command to complete
414 * @pvr_dev: Device pointer.
415 * @slot_nr: KCCB slot to wait on.
416 * @timeout: Timeout length (in jiffies).
417 * @rtn_out: Location to store KCCB command result. May be %NULL.
418 *
419 * Returns:
420 * * Zero on success, or
421 * * -ETIMEDOUT on timeout.
422 */
423int
424pvr_kccb_wait_for_completion(struct pvr_device *pvr_dev, u32 slot_nr,
425 u32 timeout, u32 *rtn_out)
426{
427 int ret = wait_event_timeout(pvr_dev->kccb.rtn_q, READ_ONCE(pvr_dev->kccb.rtn[slot_nr]) &
428 ROGUE_FWIF_KCCB_RTN_SLOT_CMD_EXECUTED, timeout);
429
430 if (ret && rtn_out)
431 *rtn_out = READ_ONCE(pvr_dev->kccb.rtn[slot_nr]);
432
433 return ret ? 0 : -ETIMEDOUT;
434}
435
436/**
437 * pvr_kccb_is_idle() - Returns whether the device's KCCB is idle
438 * @pvr_dev: Device pointer
439 *
440 * Returns:
441 * * %true if the KCCB is idle (contains no commands), or
442 * * %false if the KCCB contains pending commands.
443 */
444bool
445pvr_kccb_is_idle(struct pvr_device *pvr_dev)
446{
447 struct rogue_fwif_ccb_ctl *ctrl = pvr_dev->kccb.ccb.ctrl;
448 bool idle;
449
450 mutex_lock(&pvr_dev->kccb.ccb.lock);
451
452 idle = (READ_ONCE(ctrl->write_offset) == READ_ONCE(ctrl->read_offset));
453
454 mutex_unlock(&pvr_dev->kccb.ccb.lock);
455
456 return idle;
457}
458
459static const char *
460pvr_kccb_fence_get_driver_name(struct dma_fence *f)
461{
462 return PVR_DRIVER_NAME;
463}
464
465static const char *
466pvr_kccb_fence_get_timeline_name(struct dma_fence *f)
467{
468 return "kccb";
469}
470
471static const struct dma_fence_ops pvr_kccb_fence_ops = {
472 .get_driver_name = pvr_kccb_fence_get_driver_name,
473 .get_timeline_name = pvr_kccb_fence_get_timeline_name,
474};
475
476/**
477 * struct pvr_kccb_fence - Fence object used to wait for a KCCB slot
478 */
479struct pvr_kccb_fence {
480 /** @base: Base dma_fence object. */
481 struct dma_fence base;
482
483 /** @node: Node used to insert the fence in the pvr_device::kccb::waiters list. */
484 struct list_head node;
485};
486
487/**
488 * pvr_kccb_wake_up_waiters() - Check the KCCB waiters
489 * @pvr_dev: Target PowerVR device
490 *
491 * Signal as many KCCB fences as we have slots available.
492 */
493void pvr_kccb_wake_up_waiters(struct pvr_device *pvr_dev)
494{
495 struct pvr_kccb_fence *fence, *tmp_fence;
496 u32 used_count, available_count;
497
498 /* Wake up those waiting for KCCB slot execution. */
499 wake_up_all(&pvr_dev->kccb.rtn_q);
500
501 /* Then iterate over all KCCB fences and signal as many as we can. */
502 mutex_lock(&pvr_dev->kccb.ccb.lock);
503 used_count = pvr_kccb_used_slot_count_locked(pvr_dev);
504
505 if (WARN_ON(used_count + pvr_dev->kccb.reserved_count > pvr_kccb_capacity(pvr_dev)))
506 goto out_unlock;
507
508 available_count = pvr_kccb_capacity(pvr_dev) - used_count - pvr_dev->kccb.reserved_count;
509 list_for_each_entry_safe(fence, tmp_fence, &pvr_dev->kccb.waiters, node) {
510 if (!available_count)
511 break;
512
513 list_del(&fence->node);
514 pvr_dev->kccb.reserved_count++;
515 available_count--;
516 dma_fence_signal(&fence->base);
517 dma_fence_put(&fence->base);
518 }
519
520out_unlock:
521 mutex_unlock(&pvr_dev->kccb.ccb.lock);
522}
523
524/**
525 * pvr_kccb_fini() - Cleanup device KCCB
526 * @pvr_dev: Target PowerVR device
527 */
528void pvr_kccb_fini(struct pvr_device *pvr_dev)
529{
530 pvr_ccb_fini(&pvr_dev->kccb.ccb);
531 WARN_ON(!list_empty(&pvr_dev->kccb.waiters));
532 WARN_ON(pvr_dev->kccb.reserved_count);
533}
534
535/**
536 * pvr_kccb_init() - Initialise device KCCB
537 * @pvr_dev: Target PowerVR device
538 *
539 * Returns:
540 * * 0 on success, or
541 * * Any error returned by pvr_ccb_init().
542 */
543int
544pvr_kccb_init(struct pvr_device *pvr_dev)
545{
546 pvr_dev->kccb.slot_count = 1 << ROGUE_FWIF_KCCB_NUMCMDS_LOG2_DEFAULT;
547 INIT_LIST_HEAD(&pvr_dev->kccb.waiters);
548 pvr_dev->kccb.fence_ctx.id = dma_fence_context_alloc(1);
549 spin_lock_init(&pvr_dev->kccb.fence_ctx.lock);
550
551 return pvr_ccb_init(pvr_dev, &pvr_dev->kccb.ccb,
552 ROGUE_FWIF_KCCB_NUMCMDS_LOG2_DEFAULT,
553 sizeof(struct rogue_fwif_kccb_cmd));
554}
555
556/**
557 * pvr_kccb_fence_alloc() - Allocate a pvr_kccb_fence object
558 *
559 * Return:
560 * * NULL if the allocation fails, or
561 * * A valid dma_fence pointer otherwise.
562 */
563struct dma_fence *pvr_kccb_fence_alloc(void)
564{
565 struct pvr_kccb_fence *kccb_fence;
566
567 kccb_fence = kzalloc_obj(*kccb_fence);
568 if (!kccb_fence)
569 return NULL;
570
571 return &kccb_fence->base;
572}
573
574/**
575 * pvr_kccb_fence_put() - Drop a KCCB fence reference
576 * @fence: The fence to drop the reference on.
577 *
578 * If the fence hasn't been initialized yet, dma_fence_free() is called. This
579 * way we have a single function taking care of both cases.
580 */
581void pvr_kccb_fence_put(struct dma_fence *fence)
582{
583 if (!fence)
584 return;
585
586 if (!fence->ops) {
587 dma_fence_free(fence);
588 } else {
589 WARN_ON(fence->ops != &pvr_kccb_fence_ops);
590 dma_fence_put(fence);
591 }
592}
593
594/**
595 * pvr_kccb_reserve_slot() - Reserve a KCCB slot for later use
596 * @pvr_dev: Target PowerVR device
597 * @f: KCCB fence object previously allocated with pvr_kccb_fence_alloc()
598 *
599 * Try to reserve a KCCB slot, and if there's no slot available,
600 * initializes the fence object and queue it to the waiters list.
601 *
602 * If NULL is returned, that means the slot is reserved. In that case,
603 * the @f is freed and shouldn't be accessed after that point.
604 *
605 * Return:
606 * * NULL if a slot was available directly, or
607 * * A valid dma_fence object to wait on if no slot was available.
608 */
609struct dma_fence *
610pvr_kccb_reserve_slot(struct pvr_device *pvr_dev, struct dma_fence *f)
611{
612 struct pvr_kccb_fence *fence = container_of(f, struct pvr_kccb_fence, base);
613 struct dma_fence *out_fence = NULL;
614 u32 used_count;
615
616 mutex_lock(&pvr_dev->kccb.ccb.lock);
617
618 used_count = pvr_kccb_used_slot_count_locked(pvr_dev);
619 if (pvr_dev->kccb.reserved_count >= pvr_kccb_capacity(pvr_dev) - used_count) {
620 dma_fence_init(&fence->base, &pvr_kccb_fence_ops,
621 &pvr_dev->kccb.fence_ctx.lock,
622 pvr_dev->kccb.fence_ctx.id,
623 atomic_inc_return(&pvr_dev->kccb.fence_ctx.seqno));
624 out_fence = dma_fence_get(&fence->base);
625 list_add_tail(&fence->node, &pvr_dev->kccb.waiters);
626 } else {
627 pvr_kccb_fence_put(f);
628 pvr_dev->kccb.reserved_count++;
629 }
630
631 mutex_unlock(&pvr_dev->kccb.ccb.lock);
632
633 return out_fence;
634}
635
636/**
637 * pvr_kccb_release_slot() - Release a KCCB slot reserved with
638 * pvr_kccb_reserve_slot()
639 * @pvr_dev: Target PowerVR device
640 *
641 * Should only be called if something failed after the
642 * pvr_kccb_reserve_slot() call and you know you won't call
643 * pvr_kccb_send_cmd_reserved().
644 */
645void pvr_kccb_release_slot(struct pvr_device *pvr_dev)
646{
647 mutex_lock(&pvr_dev->kccb.ccb.lock);
648 if (!WARN_ON(!pvr_dev->kccb.reserved_count))
649 pvr_dev->kccb.reserved_count--;
650 mutex_unlock(&pvr_dev->kccb.ccb.lock);
651}
652
653/**
654 * pvr_fwccb_init() - Initialise device FWCCB
655 * @pvr_dev: Target PowerVR device
656 *
657 * Returns:
658 * * 0 on success, or
659 * * Any error returned by pvr_ccb_init().
660 */
661int
662pvr_fwccb_init(struct pvr_device *pvr_dev)
663{
664 return pvr_ccb_init(pvr_dev, &pvr_dev->fwccb,
665 ROGUE_FWIF_FWCCB_NUMCMDS_LOG2,
666 sizeof(struct rogue_fwif_fwccb_cmd));
667}