Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) 2013 Red Hat
4 * Author: Rob Clark <robdclark@gmail.com>
5 */
6
7#include <linux/dma-fence-unwrap.h>
8#include <linux/file.h>
9#include <linux/sync_file.h>
10#include <linux/uaccess.h>
11
12#include <drm/drm_drv.h>
13#include <drm/drm_file.h>
14#include <drm/drm_syncobj.h>
15
16#include "msm_drv.h"
17#include "msm_gpu.h"
18#include "msm_gem.h"
19#include "msm_gpu_trace.h"
20#include "msm_syncobj.h"
21
22/* For userspace errors, use DRM_UT_DRIVER.. so that userspace can enable
23 * error msgs for debugging, but we don't spam dmesg by default
24 */
25#define SUBMIT_ERROR(err, submit, fmt, ...) \
26 UERR(err, (submit)->dev, fmt, ##__VA_ARGS__)
27
28/*
29 * Cmdstream submission:
30 */
31
32static struct msm_gem_submit *submit_create(struct drm_device *dev,
33 struct msm_gpu *gpu,
34 struct msm_gpu_submitqueue *queue, uint32_t nr_bos,
35 uint32_t nr_cmds, u64 drm_client_id)
36{
37 static atomic_t ident = ATOMIC_INIT(0);
38 struct msm_gem_submit *submit;
39 uint64_t sz;
40 int ret;
41
42 sz = struct_size(submit, bos, nr_bos) +
43 ((u64)nr_cmds * sizeof(submit->cmd[0]));
44
45 if (sz > SIZE_MAX)
46 return ERR_PTR(-ENOMEM);
47
48 submit = kzalloc(sz, GFP_KERNEL | __GFP_NOWARN);
49 if (!submit)
50 return ERR_PTR(-ENOMEM);
51
52 submit->hw_fence = msm_fence_alloc();
53 if (IS_ERR(submit->hw_fence)) {
54 ret = PTR_ERR(submit->hw_fence);
55 kfree(submit);
56 return ERR_PTR(ret);
57 }
58
59 ret = drm_sched_job_init(&submit->base, queue->entity, 1, queue,
60 drm_client_id);
61 if (ret) {
62 kfree(submit->hw_fence);
63 kfree(submit);
64 return ERR_PTR(ret);
65 }
66
67 kref_init(&submit->ref);
68 submit->dev = dev;
69 submit->vm = msm_context_vm(dev, queue->ctx);
70 submit->gpu = gpu;
71 submit->cmd = (void *)&submit->bos[nr_bos];
72 submit->queue = queue;
73 submit->pid = get_pid(task_pid(current));
74 submit->ring = gpu->rb[queue->ring_nr];
75 submit->fault_dumped = false;
76
77 /* Get a unique identifier for the submission for logging purposes */
78 submit->ident = atomic_inc_return(&ident) - 1;
79
80 INIT_LIST_HEAD(&submit->node);
81
82 return submit;
83}
84
85void __msm_gem_submit_destroy(struct kref *kref)
86{
87 struct msm_gem_submit *submit =
88 container_of(kref, struct msm_gem_submit, ref);
89 unsigned i;
90
91 /*
92 * In error paths, we could unref the submit without calling
93 * drm_sched_entity_push_job(), so msm_job_free() will never
94 * get called. Since drm_sched_job_cleanup() will NULL out
95 * s_fence, we can use that to detect this case.
96 */
97 if (submit->base.s_fence)
98 drm_sched_job_cleanup(&submit->base);
99
100 if (submit->fence_id) {
101 spin_lock(&submit->queue->idr_lock);
102 idr_remove(&submit->queue->fence_idr, submit->fence_id);
103 spin_unlock(&submit->queue->idr_lock);
104 }
105
106 dma_fence_put(submit->user_fence);
107
108 /*
109 * If the submit is freed before msm_job_run(), then hw_fence is
110 * just some pre-allocated memory, not a reference counted fence.
111 * Once the job runs and the hw_fence is initialized, it will
112 * have a refcount of at least one, since the submit holds a ref
113 * to the hw_fence.
114 */
115 if (kref_read(&submit->hw_fence->refcount) == 0) {
116 kfree(submit->hw_fence);
117 } else {
118 dma_fence_put(submit->hw_fence);
119 }
120
121 put_pid(submit->pid);
122 msm_submitqueue_put(submit->queue);
123
124 for (i = 0; i < submit->nr_cmds; i++)
125 kfree(submit->cmd[i].relocs);
126
127 kfree(submit);
128}
129
130static int submit_lookup_objects(struct msm_gem_submit *submit,
131 struct drm_msm_gem_submit *args, struct drm_file *file)
132{
133 unsigned i;
134 int ret = 0;
135
136 for (i = 0; i < args->nr_bos; i++) {
137 struct drm_msm_gem_submit_bo submit_bo;
138 void __user *userptr =
139 u64_to_user_ptr(args->bos + (i * sizeof(submit_bo)));
140
141 /* make sure we don't have garbage flags, in case we hit
142 * error path before flags is initialized:
143 */
144 submit->bos[i].flags = 0;
145
146 if (copy_from_user(&submit_bo, userptr, sizeof(submit_bo))) {
147 ret = -EFAULT;
148 i = 0;
149 goto out;
150 }
151
152/* at least one of READ and/or WRITE flags should be set: */
153#define MANDATORY_FLAGS (MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_WRITE)
154
155 if ((submit_bo.flags & ~MSM_SUBMIT_BO_FLAGS) ||
156 !(submit_bo.flags & MANDATORY_FLAGS)) {
157 ret = SUBMIT_ERROR(EINVAL, submit, "invalid flags: %x\n", submit_bo.flags);
158 i = 0;
159 goto out;
160 }
161
162 submit->bos[i].handle = submit_bo.handle;
163 submit->bos[i].flags = submit_bo.flags;
164 }
165
166 spin_lock(&file->table_lock);
167
168 for (i = 0; i < args->nr_bos; i++) {
169 struct drm_gem_object *obj;
170
171 /* normally use drm_gem_object_lookup(), but for bulk lookup
172 * all under single table_lock just hit object_idr directly:
173 */
174 obj = idr_find(&file->object_idr, submit->bos[i].handle);
175 if (!obj) {
176 ret = SUBMIT_ERROR(EINVAL, submit, "invalid handle %u at index %u\n", submit->bos[i].handle, i);
177 goto out_unlock;
178 }
179
180 drm_gem_object_get(obj);
181
182 submit->bos[i].obj = obj;
183 }
184
185out_unlock:
186 spin_unlock(&file->table_lock);
187
188out:
189 submit->nr_bos = i;
190
191 return ret;
192}
193
194static int submit_lookup_cmds(struct msm_gem_submit *submit,
195 struct drm_msm_gem_submit *args, struct drm_file *file)
196{
197 struct msm_context *ctx = file->driver_priv;
198 unsigned i;
199 size_t sz;
200 int ret = 0;
201
202 for (i = 0; i < args->nr_cmds; i++) {
203 struct drm_msm_gem_submit_cmd submit_cmd;
204 void __user *userptr =
205 u64_to_user_ptr(args->cmds + (i * sizeof(submit_cmd)));
206
207 ret = copy_from_user(&submit_cmd, userptr, sizeof(submit_cmd));
208 if (ret) {
209 ret = -EFAULT;
210 goto out;
211 }
212
213 /* validate input from userspace: */
214 switch (submit_cmd.type) {
215 case MSM_SUBMIT_CMD_BUF:
216 case MSM_SUBMIT_CMD_IB_TARGET_BUF:
217 case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
218 break;
219 default:
220 return SUBMIT_ERROR(EINVAL, submit, "invalid type: %08x\n", submit_cmd.type);
221 }
222
223 if (submit_cmd.size % 4) {
224 ret = SUBMIT_ERROR(EINVAL, submit, "non-aligned cmdstream buffer size: %u\n",
225 submit_cmd.size);
226 goto out;
227 }
228
229 if (msm_context_is_vmbind(ctx)) {
230 if (submit_cmd.nr_relocs) {
231 ret = SUBMIT_ERROR(EINVAL, submit, "nr_relocs must be zero");
232 goto out;
233 }
234
235 if (submit_cmd.submit_idx || submit_cmd.submit_offset) {
236 ret = SUBMIT_ERROR(EINVAL, submit, "submit_idx/offset must be zero");
237 goto out;
238 }
239
240 submit->cmd[i].iova = submit_cmd.iova;
241 }
242
243 submit->cmd[i].type = submit_cmd.type;
244 submit->cmd[i].size = submit_cmd.size / 4;
245 submit->cmd[i].offset = submit_cmd.submit_offset / 4;
246 submit->cmd[i].idx = submit_cmd.submit_idx;
247 submit->cmd[i].nr_relocs = submit_cmd.nr_relocs;
248
249 userptr = u64_to_user_ptr(submit_cmd.relocs);
250
251 sz = array_size(submit_cmd.nr_relocs,
252 sizeof(struct drm_msm_gem_submit_reloc));
253 /* check for overflow: */
254 if (sz == SIZE_MAX) {
255 ret = -ENOMEM;
256 goto out;
257 }
258 submit->cmd[i].relocs = kmalloc(sz, GFP_KERNEL | __GFP_NOWARN);
259 if (!submit->cmd[i].relocs) {
260 ret = -ENOMEM;
261 goto out;
262 }
263 ret = copy_from_user(submit->cmd[i].relocs, userptr, sz);
264 if (ret) {
265 ret = -EFAULT;
266 goto out;
267 }
268 }
269
270out:
271 return ret;
272}
273
274static int submit_lock_objects_vmbind(struct msm_gem_submit *submit)
275{
276 unsigned flags = DRM_EXEC_INTERRUPTIBLE_WAIT | DRM_EXEC_IGNORE_DUPLICATES;
277 struct drm_exec *exec = &submit->exec;
278 int ret = 0;
279
280 drm_exec_init(&submit->exec, flags, submit->nr_bos);
281 submit->has_exec = true;
282
283 drm_exec_until_all_locked (&submit->exec) {
284 ret = drm_gpuvm_prepare_vm(submit->vm, exec, 1);
285 drm_exec_retry_on_contention(exec);
286 if (ret)
287 break;
288
289 ret = drm_gpuvm_prepare_objects(submit->vm, exec, 1);
290 drm_exec_retry_on_contention(exec);
291 if (ret)
292 break;
293 }
294
295 return ret;
296}
297
298/* This is where we make sure all the bo's are reserved and pin'd: */
299static int submit_lock_objects(struct msm_gem_submit *submit)
300{
301 unsigned flags = DRM_EXEC_INTERRUPTIBLE_WAIT;
302 int ret = 0;
303
304 if (msm_context_is_vmbind(submit->queue->ctx))
305 return submit_lock_objects_vmbind(submit);
306
307 drm_exec_init(&submit->exec, flags, submit->nr_bos);
308 submit->has_exec = true;
309
310 drm_exec_until_all_locked (&submit->exec) {
311 ret = drm_exec_lock_obj(&submit->exec,
312 drm_gpuvm_resv_obj(submit->vm));
313 drm_exec_retry_on_contention(&submit->exec);
314 if (ret)
315 break;
316 for (unsigned i = 0; i < submit->nr_bos; i++) {
317 struct drm_gem_object *obj = submit->bos[i].obj;
318 ret = drm_exec_prepare_obj(&submit->exec, obj, 1);
319 drm_exec_retry_on_contention(&submit->exec);
320 if (ret)
321 break;
322 }
323 }
324
325 return ret;
326}
327
328static int submit_fence_sync(struct msm_gem_submit *submit)
329{
330 int i, ret = 0;
331
332 for (i = 0; i < submit->nr_bos; i++) {
333 struct drm_gem_object *obj = submit->bos[i].obj;
334 bool write = submit->bos[i].flags & MSM_SUBMIT_BO_WRITE;
335
336 /* Otherwise userspace can ask for implicit sync to be
337 * disabled on specific buffers. This is useful for internal
338 * usermode driver managed buffers, suballocation, etc.
339 */
340 if (submit->bos[i].flags & MSM_SUBMIT_BO_NO_IMPLICIT)
341 continue;
342
343 ret = drm_sched_job_add_implicit_dependencies(&submit->base,
344 obj,
345 write);
346 if (ret)
347 break;
348 }
349
350 return ret;
351}
352
353static int submit_pin_objects(struct msm_gem_submit *submit)
354{
355 struct msm_drm_private *priv = submit->dev->dev_private;
356 int i, ret = 0;
357
358 for (i = 0; i < submit->nr_bos; i++) {
359 struct drm_gem_object *obj = submit->bos[i].obj;
360 struct drm_gpuva *vma;
361
362 /* if locking succeeded, pin bo: */
363 vma = msm_gem_get_vma_locked(obj, submit->vm);
364 if (IS_ERR(vma)) {
365 ret = PTR_ERR(vma);
366 break;
367 }
368
369 ret = msm_gem_pin_vma_locked(obj, vma);
370 if (ret)
371 break;
372
373 submit->bos[i].vm_bo = drm_gpuvm_bo_get(vma->vm_bo);
374 submit->bos[i].iova = vma->va.addr;
375 }
376
377 /*
378 * A second loop while holding the LRU lock (a) avoids acquiring/dropping
379 * the LRU lock for each individual bo, while (b) avoiding holding the
380 * LRU lock while calling msm_gem_pin_vma_locked() (which could trigger
381 * get_pages() which could trigger reclaim.. and if we held the LRU lock
382 * could trigger deadlock with the shrinker).
383 */
384 mutex_lock(&priv->lru.lock);
385 for (i = 0; i < submit->nr_bos; i++) {
386 msm_gem_pin_obj_locked(submit->bos[i].obj);
387 }
388 mutex_unlock(&priv->lru.lock);
389
390 submit->bos_pinned = true;
391
392 return ret;
393}
394
395static void submit_unpin_objects(struct msm_gem_submit *submit)
396{
397 if (!submit->bos_pinned)
398 return;
399
400 for (int i = 0; i < submit->nr_bos; i++) {
401 struct drm_gem_object *obj = submit->bos[i].obj;
402
403 msm_gem_unpin_locked(obj);
404 }
405
406 submit->bos_pinned = false;
407}
408
409static void submit_attach_object_fences(struct msm_gem_submit *submit)
410{
411 struct msm_gem_vm *vm = to_msm_vm(submit->vm);
412 struct dma_fence *last_fence;
413
414 if (msm_context_is_vmbind(submit->queue->ctx)) {
415 drm_gpuvm_resv_add_fence(submit->vm, &submit->exec,
416 submit->user_fence,
417 DMA_RESV_USAGE_BOOKKEEP,
418 DMA_RESV_USAGE_BOOKKEEP);
419
420 last_fence = vm->last_fence;
421 vm->last_fence = dma_fence_unwrap_merge(submit->user_fence, last_fence);
422 dma_fence_put(last_fence);
423
424 return;
425 }
426
427 for (unsigned i = 0; i < submit->nr_bos; i++) {
428 struct drm_gem_object *obj = submit->bos[i].obj;
429
430 if (submit->bos[i].flags & MSM_SUBMIT_BO_WRITE)
431 dma_resv_add_fence(obj->resv, submit->user_fence,
432 DMA_RESV_USAGE_WRITE);
433 else if (submit->bos[i].flags & MSM_SUBMIT_BO_READ)
434 dma_resv_add_fence(obj->resv, submit->user_fence,
435 DMA_RESV_USAGE_READ);
436 }
437}
438
439static int submit_bo(struct msm_gem_submit *submit, uint32_t idx,
440 struct drm_gem_object **obj, uint64_t *iova)
441{
442 if (idx >= submit->nr_bos) {
443 return SUBMIT_ERROR(EINVAL, submit, "invalid buffer index: %u (out of %u)\n",
444 idx, submit->nr_bos);
445 }
446
447 if (obj)
448 *obj = submit->bos[idx].obj;
449 if (iova)
450 *iova = submit->bos[idx].iova;
451
452 return 0;
453}
454
455/* process the reloc's and patch up the cmdstream as needed: */
456static int submit_reloc(struct msm_gem_submit *submit, struct drm_gem_object *obj,
457 uint32_t offset, uint32_t nr_relocs, struct drm_msm_gem_submit_reloc *relocs)
458{
459 uint32_t i, last_offset = 0;
460 uint32_t *ptr;
461 int ret = 0;
462
463 if (offset % 4)
464 return SUBMIT_ERROR(EINVAL, submit, "non-aligned cmdstream buffer: %u\n", offset);
465
466 /* For now, just map the entire thing. Eventually we probably
467 * to do it page-by-page, w/ kmap() if not vmap()d..
468 */
469 ptr = msm_gem_get_vaddr_locked(obj);
470
471 if (IS_ERR(ptr)) {
472 ret = PTR_ERR(ptr);
473 DBG("failed to map: %d", ret);
474 return ret;
475 }
476
477 for (i = 0; i < nr_relocs; i++) {
478 struct drm_msm_gem_submit_reloc submit_reloc = relocs[i];
479 uint32_t off;
480 uint64_t iova;
481
482 if (submit_reloc.submit_offset % 4) {
483 ret = SUBMIT_ERROR(EINVAL, submit, "non-aligned reloc offset: %u\n",
484 submit_reloc.submit_offset);
485 goto out;
486 }
487
488 /* offset in dwords: */
489 off = submit_reloc.submit_offset / 4;
490
491 if ((off >= (obj->size / 4)) ||
492 (off < last_offset)) {
493 ret = SUBMIT_ERROR(EINVAL, submit, "invalid offset %u at reloc %u\n", off, i);
494 goto out;
495 }
496
497 ret = submit_bo(submit, submit_reloc.reloc_idx, NULL, &iova);
498 if (ret)
499 goto out;
500
501 iova += submit_reloc.reloc_offset;
502
503 if (submit_reloc.shift < 0)
504 iova >>= -submit_reloc.shift;
505 else
506 iova <<= submit_reloc.shift;
507
508 ptr[off] = iova | submit_reloc.or;
509
510 last_offset = off;
511 }
512
513out:
514 msm_gem_put_vaddr_locked(obj);
515
516 return ret;
517}
518
519/* Cleanup submit at end of ioctl. In the error case, this also drops
520 * references, unpins, and drops active refcnt. In the non-error case,
521 * this is done when the submit is retired.
522 */
523static void submit_cleanup(struct msm_gem_submit *submit, bool error)
524{
525 if (error)
526 submit_unpin_objects(submit);
527
528 if (submit->has_exec)
529 drm_exec_fini(&submit->exec);
530
531 /* if job wasn't enqueued to scheduler, early retirement: */
532 if (error)
533 msm_submit_retire(submit);
534}
535
536void msm_submit_retire(struct msm_gem_submit *submit)
537{
538 int i;
539
540 for (i = 0; i < submit->nr_bos; i++) {
541 struct drm_gem_object *obj = submit->bos[i].obj;
542 struct drm_gpuvm_bo *vm_bo = submit->bos[i].vm_bo;
543
544 msm_gem_lock(obj);
545 drm_gpuvm_bo_put(vm_bo);
546 msm_gem_unlock(obj);
547 drm_gem_object_put(obj);
548 }
549}
550
551int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
552 struct drm_file *file)
553{
554 struct msm_drm_private *priv = dev->dev_private;
555 struct drm_msm_gem_submit *args = data;
556 struct msm_context *ctx = file->driver_priv;
557 struct msm_gem_submit *submit = NULL;
558 struct msm_gpu *gpu = priv->gpu;
559 struct msm_gpu_submitqueue *queue;
560 struct msm_ringbuffer *ring;
561 struct msm_syncobj_post_dep *post_deps = NULL;
562 struct drm_syncobj **syncobjs_to_reset = NULL;
563 struct sync_file *sync_file = NULL;
564 unsigned cmds_to_parse;
565 int out_fence_fd = -1;
566 unsigned i;
567 int ret;
568
569 if (!gpu)
570 return -ENXIO;
571
572 if (args->pad)
573 return -EINVAL;
574
575 if (to_msm_vm(ctx->vm)->unusable)
576 return UERR(EPIPE, dev, "context is unusable");
577
578 /* for now, we just have 3d pipe.. eventually this would need to
579 * be more clever to dispatch to appropriate gpu module:
580 */
581 if (MSM_PIPE_ID(args->flags) != MSM_PIPE_3D0)
582 return UERR(EINVAL, dev, "invalid pipe");
583
584 if (MSM_PIPE_FLAGS(args->flags) & ~MSM_SUBMIT_FLAGS)
585 return UERR(EINVAL, dev, "invalid flags");
586
587 if (args->flags & MSM_SUBMIT_SUDO) {
588 if (!IS_ENABLED(CONFIG_DRM_MSM_GPU_SUDO) ||
589 !capable(CAP_SYS_RAWIO))
590 return -EINVAL;
591 }
592
593 queue = msm_submitqueue_get(ctx, args->queueid);
594 if (!queue)
595 return -ENOENT;
596
597 if (queue->flags & MSM_SUBMITQUEUE_VM_BIND) {
598 ret = UERR(EINVAL, dev, "Invalid queue type");
599 goto out_post_unlock;
600 }
601
602 ring = gpu->rb[queue->ring_nr];
603
604 if (args->flags & MSM_SUBMIT_FENCE_FD_OUT) {
605 out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
606 if (out_fence_fd < 0) {
607 ret = out_fence_fd;
608 goto out_post_unlock;
609 }
610 }
611
612 submit = submit_create(dev, gpu, queue, args->nr_bos, args->nr_cmds,
613 file->client_id);
614 if (IS_ERR(submit)) {
615 ret = PTR_ERR(submit);
616 goto out_post_unlock;
617 }
618
619 trace_msm_gpu_submit(pid_nr(submit->pid), ring->id, submit->ident,
620 args->nr_bos, args->nr_cmds);
621
622 ret = mutex_lock_interruptible(&queue->lock);
623 if (ret)
624 goto out_post_unlock;
625
626 if (args->flags & MSM_SUBMIT_SUDO)
627 submit->in_rb = true;
628
629 if (args->flags & MSM_SUBMIT_FENCE_FD_IN) {
630 struct dma_fence *in_fence;
631
632 in_fence = sync_file_get_fence(args->fence_fd);
633
634 if (!in_fence) {
635 ret = UERR(EINVAL, dev, "invalid in-fence");
636 goto out_unlock;
637 }
638
639 ret = drm_sched_job_add_dependency(&submit->base, in_fence);
640 if (ret)
641 goto out_unlock;
642 }
643
644 if (args->flags & MSM_SUBMIT_SYNCOBJ_IN) {
645 syncobjs_to_reset = msm_syncobj_parse_deps(dev, &submit->base,
646 file, args->in_syncobjs,
647 args->nr_in_syncobjs,
648 args->syncobj_stride);
649 if (IS_ERR(syncobjs_to_reset)) {
650 ret = PTR_ERR(syncobjs_to_reset);
651 goto out_unlock;
652 }
653 }
654
655 if (args->flags & MSM_SUBMIT_SYNCOBJ_OUT) {
656 post_deps = msm_syncobj_parse_post_deps(dev, file,
657 args->out_syncobjs,
658 args->nr_out_syncobjs,
659 args->syncobj_stride);
660 if (IS_ERR(post_deps)) {
661 ret = PTR_ERR(post_deps);
662 goto out_unlock;
663 }
664 }
665
666 ret = submit_lookup_objects(submit, args, file);
667 if (ret)
668 goto out;
669
670 ret = submit_lookup_cmds(submit, args, file);
671 if (ret)
672 goto out;
673
674 /* copy_*_user while holding a ww ticket upsets lockdep */
675 ret = submit_lock_objects(submit);
676 if (ret)
677 goto out;
678
679 if (!(args->flags & MSM_SUBMIT_NO_IMPLICIT)) {
680 ret = submit_fence_sync(submit);
681 if (ret)
682 goto out;
683 }
684
685 ret = submit_pin_objects(submit);
686 if (ret)
687 goto out;
688
689 cmds_to_parse = msm_context_is_vmbind(ctx) ? 0 : args->nr_cmds;
690
691 for (i = 0; i < cmds_to_parse; i++) {
692 struct drm_gem_object *obj;
693 uint64_t iova;
694
695 ret = submit_bo(submit, submit->cmd[i].idx, &obj, &iova);
696 if (ret)
697 goto out;
698
699 if (!submit->cmd[i].size ||
700 (size_add(submit->cmd[i].size, submit->cmd[i].offset) > obj->size / 4)) {
701 ret = UERR(EINVAL, dev, "invalid cmdstream size: %u\n",
702 submit->cmd[i].size * 4);
703 goto out;
704 }
705
706 submit->cmd[i].iova = iova + (submit->cmd[i].offset * 4);
707
708 if (likely(!submit->cmd[i].nr_relocs))
709 continue;
710
711 if (!gpu->allow_relocs) {
712 ret = UERR(EINVAL, dev, "relocs not allowed\n");
713 goto out;
714 }
715
716 ret = submit_reloc(submit, obj, submit->cmd[i].offset * 4,
717 submit->cmd[i].nr_relocs, submit->cmd[i].relocs);
718 if (ret)
719 goto out;
720 }
721
722 submit->nr_cmds = args->nr_cmds;
723
724 idr_preload(GFP_KERNEL);
725
726 spin_lock(&queue->idr_lock);
727
728 /*
729 * If using userspace provided seqno fence, validate that the id
730 * is available before arming sched job. Since access to fence_idr
731 * is serialized on the queue lock, the slot should be still avail
732 * after the job is armed
733 */
734 if ((args->flags & MSM_SUBMIT_FENCE_SN_IN) &&
735 (!args->fence || idr_find(&queue->fence_idr, args->fence))) {
736 spin_unlock(&queue->idr_lock);
737 idr_preload_end();
738 ret = UERR(EINVAL, dev, "invalid in-fence-sn");
739 goto out;
740 }
741
742 drm_sched_job_arm(&submit->base);
743
744 submit->user_fence = dma_fence_get(&submit->base.s_fence->finished);
745
746 if (args->flags & MSM_SUBMIT_FENCE_SN_IN) {
747 /*
748 * Userspace has assigned the seqno fence that it wants
749 * us to use. It is an error to pick a fence sequence
750 * number that is not available.
751 */
752 submit->fence_id = args->fence;
753 ret = idr_alloc_u32(&queue->fence_idr, submit->user_fence,
754 &submit->fence_id, submit->fence_id,
755 GFP_NOWAIT);
756 /*
757 * We've already validated that the fence_id slot is valid,
758 * so if idr_alloc_u32 failed, it is a kernel bug
759 */
760 WARN_ON(ret);
761 } else {
762 /*
763 * Allocate an id which can be used by WAIT_FENCE ioctl to map
764 * back to the underlying fence.
765 */
766 submit->fence_id = idr_alloc_cyclic(&queue->fence_idr,
767 submit->user_fence, 1,
768 INT_MAX, GFP_NOWAIT);
769 }
770
771 spin_unlock(&queue->idr_lock);
772 idr_preload_end();
773
774 if (submit->fence_id < 0) {
775 ret = submit->fence_id;
776 submit->fence_id = 0;
777 }
778
779 if (ret == 0 && args->flags & MSM_SUBMIT_FENCE_FD_OUT) {
780 sync_file = sync_file_create(submit->user_fence);
781 if (!sync_file)
782 ret = -ENOMEM;
783 }
784
785 if (ret)
786 goto out;
787
788 submit_attach_object_fences(submit);
789
790 if (msm_context_is_vmbind(ctx)) {
791 /*
792 * If we are not using VM_BIND, submit_pin_vmas() will validate
793 * just the BOs attached to the submit. In that case we don't
794 * need to validate the _entire_ vm, because userspace tracked
795 * what BOs are associated with the submit.
796 */
797 ret = drm_gpuvm_validate(submit->vm, &submit->exec);
798 if (ret)
799 goto out;
800 }
801
802 /* The scheduler owns a ref now: */
803 msm_gem_submit_get(submit);
804
805 msm_rd_dump_submit(priv->rd, submit, NULL);
806
807 drm_sched_entity_push_job(&submit->base);
808
809 args->fence = submit->fence_id;
810 queue->last_fence = submit->fence_id;
811
812 msm_syncobj_reset(syncobjs_to_reset, args->nr_in_syncobjs);
813 msm_syncobj_process_post_deps(post_deps, args->nr_out_syncobjs, submit->user_fence);
814
815out:
816 submit_cleanup(submit, !!ret);
817out_unlock:
818 mutex_unlock(&queue->lock);
819out_post_unlock:
820 if (ret) {
821 if (out_fence_fd >= 0)
822 put_unused_fd(out_fence_fd);
823 if (sync_file)
824 fput(sync_file->file);
825 } else if (sync_file) {
826 fd_install(out_fence_fd, sync_file->file);
827 args->fence_fd = out_fence_fd;
828 }
829
830 if (!IS_ERR_OR_NULL(submit)) {
831 msm_gem_submit_put(submit);
832 } else {
833 /*
834 * If the submit hasn't yet taken ownership of the queue
835 * then we need to drop the reference ourself:
836 */
837 msm_submitqueue_put(queue);
838 }
839 if (!IS_ERR_OR_NULL(post_deps)) {
840 for (i = 0; i < args->nr_out_syncobjs; ++i) {
841 kfree(post_deps[i].chain);
842 drm_syncobj_put(post_deps[i].syncobj);
843 }
844 kfree(post_deps);
845 }
846
847 if (!IS_ERR_OR_NULL(syncobjs_to_reset)) {
848 for (i = 0; i < args->nr_in_syncobjs; ++i) {
849 if (syncobjs_to_reset[i])
850 drm_syncobj_put(syncobjs_to_reset[i]);
851 }
852 kfree(syncobjs_to_reset);
853 }
854
855 return ret;
856}