Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) 2020-2025 Intel Corporation
4 */
5
6#include <linux/firmware.h>
7#include <linux/module.h>
8#include <linux/pci.h>
9#include <linux/pm_runtime.h>
10#include <linux/workqueue.h>
11#include <generated/utsrelease.h>
12
13#include <drm/drm_accel.h>
14#include <drm/drm_file.h>
15#include <drm/drm_gem.h>
16#include <drm/drm_ioctl.h>
17#include <drm/drm_prime.h>
18
19#include "ivpu_coredump.h"
20#include "ivpu_debugfs.h"
21#include "ivpu_drv.h"
22#include "ivpu_fw.h"
23#include "ivpu_fw_log.h"
24#include "ivpu_gem.h"
25#include "ivpu_hw.h"
26#include "ivpu_ipc.h"
27#include "ivpu_job.h"
28#include "ivpu_jsm_msg.h"
29#include "ivpu_mmu.h"
30#include "ivpu_mmu_context.h"
31#include "ivpu_ms.h"
32#include "ivpu_pm.h"
33#include "ivpu_sysfs.h"
34#include "vpu_boot_api.h"
35
36#ifndef DRIVER_VERSION_STR
37#define DRIVER_VERSION_STR "1.0.0 " UTS_RELEASE
38#endif
39
40int ivpu_dbg_mask;
41module_param_named(dbg_mask, ivpu_dbg_mask, int, 0644);
42MODULE_PARM_DESC(dbg_mask, "Driver debug mask. See IVPU_DBG_* macros.");
43
44int ivpu_test_mode;
45#if IS_ENABLED(CONFIG_DRM_ACCEL_IVPU_DEBUG)
46module_param_named_unsafe(test_mode, ivpu_test_mode, int, 0644);
47MODULE_PARM_DESC(test_mode, "Test mode mask. See IVPU_TEST_MODE_* macros.");
48#endif
49
50u8 ivpu_pll_min_ratio;
51module_param_named(pll_min_ratio, ivpu_pll_min_ratio, byte, 0644);
52MODULE_PARM_DESC(pll_min_ratio, "Minimum PLL ratio used to set NPU frequency");
53
54u8 ivpu_pll_max_ratio = U8_MAX;
55module_param_named(pll_max_ratio, ivpu_pll_max_ratio, byte, 0644);
56MODULE_PARM_DESC(pll_max_ratio, "Maximum PLL ratio used to set NPU frequency");
57
58int ivpu_sched_mode = IVPU_SCHED_MODE_AUTO;
59module_param_named(sched_mode, ivpu_sched_mode, int, 0444);
60MODULE_PARM_DESC(sched_mode, "Scheduler mode: -1 - Use default scheduler, 0 - Use OS scheduler (supported on 27XX - 50XX), 1 - Use HW scheduler");
61
62bool ivpu_disable_mmu_cont_pages;
63module_param_named(disable_mmu_cont_pages, ivpu_disable_mmu_cont_pages, bool, 0444);
64MODULE_PARM_DESC(disable_mmu_cont_pages, "Disable MMU contiguous pages optimization");
65
66bool ivpu_force_snoop;
67module_param_named(force_snoop, ivpu_force_snoop, bool, 0444);
68MODULE_PARM_DESC(force_snoop, "Force snooping for NPU host memory access");
69
70static struct ivpu_user_limits *ivpu_user_limits_alloc(struct ivpu_device *vdev, uid_t uid)
71{
72 struct ivpu_user_limits *limits;
73
74 limits = kzalloc_obj(*limits);
75 if (!limits)
76 return ERR_PTR(-ENOMEM);
77
78 kref_init(&limits->ref);
79 atomic_set(&limits->db_count, 0);
80 limits->vdev = vdev;
81 limits->uid = uid;
82
83 /* Allow root user to allocate all contexts */
84 if (uid == 0) {
85 limits->max_ctx_count = ivpu_get_context_count(vdev);
86 limits->max_db_count = ivpu_get_doorbell_count(vdev);
87 } else {
88 limits->max_ctx_count = ivpu_get_context_count(vdev) / 2;
89 limits->max_db_count = ivpu_get_doorbell_count(vdev) / 2;
90 }
91
92 hash_add(vdev->user_limits, &limits->hash_node, uid);
93
94 return limits;
95}
96
97static struct ivpu_user_limits *ivpu_user_limits_get(struct ivpu_device *vdev)
98{
99 struct ivpu_user_limits *limits;
100 uid_t uid = current_uid().val;
101
102 guard(mutex)(&vdev->user_limits_lock);
103
104 hash_for_each_possible(vdev->user_limits, limits, hash_node, uid) {
105 if (limits->uid == uid) {
106 if (kref_read(&limits->ref) >= limits->max_ctx_count) {
107 ivpu_dbg(vdev, IOCTL, "User %u exceeded max ctx count %u\n", uid,
108 limits->max_ctx_count);
109 return ERR_PTR(-EMFILE);
110 }
111
112 kref_get(&limits->ref);
113 return limits;
114 }
115 }
116
117 return ivpu_user_limits_alloc(vdev, uid);
118}
119
120static void ivpu_user_limits_release(struct kref *ref)
121{
122 struct ivpu_user_limits *limits = container_of(ref, struct ivpu_user_limits, ref);
123 struct ivpu_device *vdev = limits->vdev;
124
125 lockdep_assert_held(&vdev->user_limits_lock);
126 drm_WARN_ON(&vdev->drm, atomic_read(&limits->db_count));
127 hash_del(&limits->hash_node);
128 kfree(limits);
129}
130
131static void ivpu_user_limits_put(struct ivpu_device *vdev, struct ivpu_user_limits *limits)
132{
133 guard(mutex)(&vdev->user_limits_lock);
134 kref_put(&limits->ref, ivpu_user_limits_release);
135}
136
137struct ivpu_file_priv *ivpu_file_priv_get(struct ivpu_file_priv *file_priv)
138{
139 struct ivpu_device *vdev = file_priv->vdev;
140
141 kref_get(&file_priv->ref);
142
143 ivpu_dbg(vdev, KREF, "file_priv get: ctx %u refcount %u\n",
144 file_priv->ctx.id, kref_read(&file_priv->ref));
145
146 return file_priv;
147}
148
149static void file_priv_unbind(struct ivpu_device *vdev, struct ivpu_file_priv *file_priv)
150{
151 mutex_lock(&file_priv->lock);
152 if (file_priv->bound) {
153 ivpu_dbg(vdev, FILE, "file_priv unbind: ctx %u\n", file_priv->ctx.id);
154
155 ivpu_cmdq_release_all_locked(file_priv);
156 ivpu_bo_unbind_all_bos_from_context(vdev, &file_priv->ctx);
157 ivpu_mmu_context_fini(vdev, &file_priv->ctx);
158 file_priv->bound = false;
159 drm_WARN_ON(&vdev->drm, !xa_erase_irq(&vdev->context_xa, file_priv->ctx.id));
160 }
161 mutex_unlock(&file_priv->lock);
162}
163
164static void file_priv_release(struct kref *ref)
165{
166 struct ivpu_file_priv *file_priv = container_of(ref, struct ivpu_file_priv, ref);
167 struct ivpu_device *vdev = file_priv->vdev;
168
169 ivpu_dbg(vdev, FILE, "file_priv release: ctx %u bound %d\n",
170 file_priv->ctx.id, (bool)file_priv->bound);
171
172 pm_runtime_get_sync(vdev->drm.dev);
173 mutex_lock(&vdev->context_list_lock);
174 file_priv_unbind(vdev, file_priv);
175 drm_WARN_ON(&vdev->drm, !xa_empty(&file_priv->cmdq_xa));
176 xa_destroy(&file_priv->cmdq_xa);
177 mutex_unlock(&vdev->context_list_lock);
178 pm_runtime_put_autosuspend(vdev->drm.dev);
179
180 ivpu_user_limits_put(vdev, file_priv->user_limits);
181 mutex_destroy(&file_priv->ms_lock);
182 mutex_destroy(&file_priv->lock);
183 kfree(file_priv);
184}
185
186void ivpu_file_priv_put(struct ivpu_file_priv **link)
187{
188 struct ivpu_file_priv *file_priv = *link;
189 struct ivpu_device *vdev = file_priv->vdev;
190
191 ivpu_dbg(vdev, KREF, "file_priv put: ctx %u refcount %u\n",
192 file_priv->ctx.id, kref_read(&file_priv->ref));
193
194 *link = NULL;
195 kref_put(&file_priv->ref, file_priv_release);
196}
197
198bool ivpu_is_capable(struct ivpu_device *vdev, u32 capability)
199{
200 switch (capability) {
201 case DRM_IVPU_CAP_METRIC_STREAMER:
202 return true;
203 case DRM_IVPU_CAP_DMA_MEMORY_RANGE:
204 return true;
205 case DRM_IVPU_CAP_BO_CREATE_FROM_USERPTR:
206 return true;
207 case DRM_IVPU_CAP_MANAGE_CMDQ:
208 return vdev->fw->sched_mode == VPU_SCHEDULING_MODE_HW;
209 default:
210 return false;
211 }
212}
213
214static int ivpu_get_param_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
215{
216 struct ivpu_file_priv *file_priv = file->driver_priv;
217 struct ivpu_device *vdev = file_priv->vdev;
218 struct pci_dev *pdev = to_pci_dev(vdev->drm.dev);
219 struct drm_ivpu_param *args = data;
220 int ret = 0;
221 int idx;
222
223 if (!drm_dev_enter(dev, &idx))
224 return -ENODEV;
225
226 switch (args->param) {
227 case DRM_IVPU_PARAM_DEVICE_ID:
228 args->value = pdev->device;
229 break;
230 case DRM_IVPU_PARAM_DEVICE_REVISION:
231 args->value = pdev->revision;
232 break;
233 case DRM_IVPU_PARAM_PLATFORM_TYPE:
234 args->value = vdev->platform;
235 break;
236 case DRM_IVPU_PARAM_CORE_CLOCK_RATE:
237 args->value = ivpu_hw_dpu_max_freq_get(vdev);
238 break;
239 case DRM_IVPU_PARAM_NUM_CONTEXTS:
240 args->value = file_priv->user_limits->max_ctx_count;
241 break;
242 case DRM_IVPU_PARAM_CONTEXT_BASE_ADDRESS:
243 args->value = vdev->hw->ranges.user.start;
244 break;
245 case DRM_IVPU_PARAM_CONTEXT_ID:
246 args->value = file_priv->ctx.id;
247 break;
248 case DRM_IVPU_PARAM_FW_API_VERSION:
249 if (args->index < VPU_FW_API_VER_NUM) {
250 struct vpu_firmware_header *fw_hdr;
251
252 fw_hdr = (struct vpu_firmware_header *)vdev->fw->file->data;
253 args->value = fw_hdr->api_version[args->index];
254 } else {
255 ret = -EINVAL;
256 }
257 break;
258 case DRM_IVPU_PARAM_ENGINE_HEARTBEAT:
259 ret = ivpu_jsm_get_heartbeat(vdev, args->index, &args->value);
260 break;
261 case DRM_IVPU_PARAM_UNIQUE_INFERENCE_ID:
262 args->value = (u64)atomic64_inc_return(&vdev->unique_id_counter);
263 break;
264 case DRM_IVPU_PARAM_TILE_CONFIG:
265 args->value = vdev->hw->tile_fuse;
266 break;
267 case DRM_IVPU_PARAM_SKU:
268 args->value = vdev->hw->sku;
269 break;
270 case DRM_IVPU_PARAM_CAPABILITIES:
271 args->value = ivpu_is_capable(vdev, args->index);
272 break;
273 case DRM_IVPU_PARAM_PREEMPT_BUFFER_SIZE:
274 args->value = ivpu_fw_preempt_buf_size(vdev);
275 break;
276 default:
277 ret = -EINVAL;
278 break;
279 }
280
281 drm_dev_exit(idx);
282 return ret;
283}
284
285static int ivpu_set_param_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
286{
287 struct drm_ivpu_param *args = data;
288 int ret = 0;
289
290 switch (args->param) {
291 default:
292 ret = -EINVAL;
293 }
294
295 return ret;
296}
297
298static int ivpu_open(struct drm_device *dev, struct drm_file *file)
299{
300 struct ivpu_device *vdev = to_ivpu_device(dev);
301 struct ivpu_file_priv *file_priv;
302 struct ivpu_user_limits *limits;
303 u32 ctx_id;
304 int idx, ret;
305
306 if (!drm_dev_enter(dev, &idx))
307 return -ENODEV;
308
309 limits = ivpu_user_limits_get(vdev);
310 if (IS_ERR(limits)) {
311 ret = PTR_ERR(limits);
312 goto err_dev_exit;
313 }
314
315 file_priv = kzalloc_obj(*file_priv);
316 if (!file_priv) {
317 ret = -ENOMEM;
318 goto err_user_limits_put;
319 }
320
321 INIT_LIST_HEAD(&file_priv->ms_instance_list);
322
323 file_priv->vdev = vdev;
324 file_priv->bound = true;
325 file_priv->user_limits = limits;
326 kref_init(&file_priv->ref);
327 mutex_init(&file_priv->lock);
328 mutex_init(&file_priv->ms_lock);
329
330 mutex_lock(&vdev->context_list_lock);
331
332 ret = xa_alloc_irq(&vdev->context_xa, &ctx_id, file_priv,
333 vdev->context_xa_limit, GFP_KERNEL);
334 if (ret) {
335 ivpu_err(vdev, "Failed to allocate context id: %d\n", ret);
336 goto err_unlock;
337 }
338
339 ivpu_mmu_context_init(vdev, &file_priv->ctx, ctx_id);
340
341 file_priv->job_limit.min = FIELD_PREP(IVPU_JOB_ID_CONTEXT_MASK, (file_priv->ctx.id - 1));
342 file_priv->job_limit.max = file_priv->job_limit.min | IVPU_JOB_ID_JOB_MASK;
343
344 xa_init_flags(&file_priv->cmdq_xa, XA_FLAGS_ALLOC1);
345 file_priv->cmdq_limit.min = IVPU_CMDQ_MIN_ID;
346 file_priv->cmdq_limit.max = IVPU_CMDQ_MAX_ID;
347
348 mutex_unlock(&vdev->context_list_lock);
349 drm_dev_exit(idx);
350
351 file->driver_priv = file_priv;
352
353 ivpu_dbg(vdev, FILE, "file_priv create: ctx %u process %s pid %d\n",
354 ctx_id, current->comm, task_pid_nr(current));
355
356 return 0;
357
358err_unlock:
359 mutex_unlock(&vdev->context_list_lock);
360 mutex_destroy(&file_priv->ms_lock);
361 mutex_destroy(&file_priv->lock);
362 kfree(file_priv);
363err_user_limits_put:
364 ivpu_user_limits_put(vdev, limits);
365err_dev_exit:
366 drm_dev_exit(idx);
367 return ret;
368}
369
370static void ivpu_postclose(struct drm_device *dev, struct drm_file *file)
371{
372 struct ivpu_file_priv *file_priv = file->driver_priv;
373 struct ivpu_device *vdev = to_ivpu_device(dev);
374
375 ivpu_dbg(vdev, FILE, "file_priv close: ctx %u process %s pid %d\n",
376 file_priv->ctx.id, current->comm, task_pid_nr(current));
377
378 ivpu_ms_cleanup(file_priv);
379 ivpu_file_priv_put(&file_priv);
380}
381
382static const struct drm_ioctl_desc ivpu_drm_ioctls[] = {
383 DRM_IOCTL_DEF_DRV(IVPU_GET_PARAM, ivpu_get_param_ioctl, 0),
384 DRM_IOCTL_DEF_DRV(IVPU_SET_PARAM, ivpu_set_param_ioctl, 0),
385 DRM_IOCTL_DEF_DRV(IVPU_BO_CREATE, ivpu_bo_create_ioctl, 0),
386 DRM_IOCTL_DEF_DRV(IVPU_BO_INFO, ivpu_bo_info_ioctl, 0),
387 DRM_IOCTL_DEF_DRV(IVPU_SUBMIT, ivpu_submit_ioctl, 0),
388 DRM_IOCTL_DEF_DRV(IVPU_BO_WAIT, ivpu_bo_wait_ioctl, 0),
389 DRM_IOCTL_DEF_DRV(IVPU_METRIC_STREAMER_START, ivpu_ms_start_ioctl, 0),
390 DRM_IOCTL_DEF_DRV(IVPU_METRIC_STREAMER_GET_DATA, ivpu_ms_get_data_ioctl, 0),
391 DRM_IOCTL_DEF_DRV(IVPU_METRIC_STREAMER_STOP, ivpu_ms_stop_ioctl, 0),
392 DRM_IOCTL_DEF_DRV(IVPU_METRIC_STREAMER_GET_INFO, ivpu_ms_get_info_ioctl, 0),
393 DRM_IOCTL_DEF_DRV(IVPU_CMDQ_CREATE, ivpu_cmdq_create_ioctl, 0),
394 DRM_IOCTL_DEF_DRV(IVPU_CMDQ_DESTROY, ivpu_cmdq_destroy_ioctl, 0),
395 DRM_IOCTL_DEF_DRV(IVPU_CMDQ_SUBMIT, ivpu_cmdq_submit_ioctl, 0),
396 DRM_IOCTL_DEF_DRV(IVPU_BO_CREATE_FROM_USERPTR, ivpu_bo_create_from_userptr_ioctl, 0),
397};
398
399static int ivpu_wait_for_ready(struct ivpu_device *vdev)
400{
401 struct ivpu_ipc_consumer cons;
402 struct ivpu_ipc_hdr ipc_hdr;
403 unsigned long timeout;
404 int ret;
405
406 if (ivpu_test_mode & IVPU_TEST_MODE_FW_TEST)
407 return 0;
408
409 ivpu_ipc_consumer_add(vdev, &cons, IVPU_IPC_CHAN_BOOT_MSG, NULL);
410
411 timeout = jiffies + msecs_to_jiffies(vdev->timeout.boot);
412 while (1) {
413 ivpu_ipc_irq_handler(vdev);
414 ret = ivpu_ipc_receive(vdev, &cons, &ipc_hdr, NULL, 0);
415 if (ret != -ETIMEDOUT || time_after_eq(jiffies, timeout))
416 break;
417
418 cond_resched();
419 }
420
421 ivpu_ipc_consumer_del(vdev, &cons);
422
423 if (!ret && ipc_hdr.data_addr != IVPU_IPC_BOOT_MSG_DATA_ADDR) {
424 ivpu_err(vdev, "Invalid NPU ready message: 0x%x\n", ipc_hdr.data_addr);
425 return -EIO;
426 }
427
428 if (!ret)
429 ivpu_dbg(vdev, PM, "NPU ready message received successfully\n");
430
431 return ret;
432}
433
434static int ivpu_hw_sched_init(struct ivpu_device *vdev)
435{
436 int ret = 0;
437
438 if (vdev->fw->sched_mode == VPU_SCHEDULING_MODE_HW) {
439 ret = ivpu_jsm_hws_setup_priority_bands(vdev);
440 if (ret) {
441 ivpu_err(vdev, "Failed to enable hw scheduler: %d", ret);
442 return ret;
443 }
444 }
445
446 return ret;
447}
448
449/**
450 * ivpu_boot() - Start VPU firmware
451 * @vdev: VPU device
452 *
453 * This function is paired with ivpu_shutdown() but it doesn't power up the
454 * VPU because power up has to be called very early in ivpu_probe().
455 */
456int ivpu_boot(struct ivpu_device *vdev)
457{
458 int ret;
459
460 drm_WARN_ON(&vdev->drm, atomic_read(&vdev->job_timeout_counter));
461 drm_WARN_ON(&vdev->drm, !xa_empty(&vdev->submitted_jobs_xa));
462
463 ivpu_fw_boot_params_setup(vdev, ivpu_bo_vaddr(vdev->fw->mem_bp));
464 vdev->fw->last_boot_mode = vdev->fw->next_boot_mode;
465
466 ret = ivpu_hw_boot_fw(vdev);
467 if (ret) {
468 ivpu_err(vdev, "Failed to start the firmware: %d\n", ret);
469 return ret;
470 }
471
472 ret = ivpu_wait_for_ready(vdev);
473 if (ret) {
474 ivpu_err(vdev, "Failed to boot the firmware: %d\n", ret);
475 goto err_diagnose_failure;
476 }
477 ivpu_hw_irq_clear(vdev);
478 enable_irq(vdev->irq);
479 ivpu_hw_irq_enable(vdev);
480 ivpu_ipc_enable(vdev);
481
482 if (!ivpu_fw_is_warm_boot(vdev)) {
483 ret = ivpu_pm_dct_init(vdev);
484 if (ret)
485 goto err_disable_ipc;
486
487 ret = ivpu_hw_sched_init(vdev);
488 if (ret)
489 goto err_disable_ipc;
490 }
491
492 return 0;
493
494err_disable_ipc:
495 ivpu_ipc_disable(vdev);
496 ivpu_hw_irq_disable(vdev);
497 disable_irq(vdev->irq);
498err_diagnose_failure:
499 ivpu_hw_diagnose_failure(vdev);
500 ivpu_mmu_evtq_dump(vdev);
501 ivpu_dev_coredump(vdev);
502 return ret;
503}
504
505void ivpu_prepare_for_reset(struct ivpu_device *vdev)
506{
507 ivpu_hw_irq_disable(vdev);
508 disable_irq(vdev->irq);
509 flush_work(&vdev->irq_ipc_work);
510 flush_work(&vdev->irq_dct_work);
511 flush_work(&vdev->context_abort_work);
512 ivpu_ipc_disable(vdev);
513 ivpu_mmu_disable(vdev);
514}
515
516int ivpu_shutdown(struct ivpu_device *vdev)
517{
518 int ret;
519
520 /* Save PCI state before powering down as it sometimes gets corrupted if NPU hangs */
521 pci_save_state(to_pci_dev(vdev->drm.dev));
522
523 ret = ivpu_hw_power_down(vdev);
524 if (ret)
525 ivpu_warn(vdev, "Failed to power down HW: %d\n", ret);
526
527 pci_set_power_state(to_pci_dev(vdev->drm.dev), PCI_D3hot);
528
529 return ret;
530}
531
532static const struct file_operations ivpu_fops = {
533 .owner = THIS_MODULE,
534 DRM_ACCEL_FOPS,
535#ifdef CONFIG_PROC_FS
536 .show_fdinfo = drm_show_fdinfo,
537#endif
538};
539
540static const struct drm_driver driver = {
541 .driver_features = DRIVER_GEM | DRIVER_COMPUTE_ACCEL,
542
543 .open = ivpu_open,
544 .postclose = ivpu_postclose,
545
546 .gem_create_object = ivpu_gem_create_object,
547 .gem_prime_import = ivpu_gem_prime_import,
548
549 .ioctls = ivpu_drm_ioctls,
550 .num_ioctls = ARRAY_SIZE(ivpu_drm_ioctls),
551 .fops = &ivpu_fops,
552#ifdef CONFIG_PROC_FS
553 .show_fdinfo = drm_show_memory_stats,
554#endif
555
556 .name = DRIVER_NAME,
557 .desc = DRIVER_DESC,
558
559 .major = 1,
560};
561
562static int ivpu_irq_init(struct ivpu_device *vdev)
563{
564 struct pci_dev *pdev = to_pci_dev(vdev->drm.dev);
565 int ret;
566
567 ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI | PCI_IRQ_MSIX);
568 if (ret < 0) {
569 ivpu_err(vdev, "Failed to allocate a MSI IRQ: %d\n", ret);
570 return ret;
571 }
572
573 INIT_WORK(&vdev->irq_ipc_work, ivpu_ipc_irq_work_fn);
574 INIT_WORK(&vdev->irq_dct_work, ivpu_pm_irq_dct_work_fn);
575 INIT_WORK(&vdev->context_abort_work, ivpu_context_abort_work_fn);
576
577 ivpu_irq_handlers_init(vdev);
578
579 vdev->irq = pci_irq_vector(pdev, 0);
580
581 ret = devm_request_irq(vdev->drm.dev, vdev->irq, ivpu_hw_irq_handler,
582 IRQF_NO_AUTOEN, DRIVER_NAME, vdev);
583 if (ret)
584 ivpu_err(vdev, "Failed to request an IRQ %d\n", ret);
585
586 return ret;
587}
588
589static int ivpu_pci_init(struct ivpu_device *vdev)
590{
591 struct pci_dev *pdev = to_pci_dev(vdev->drm.dev);
592 struct resource *bar0 = &pdev->resource[0];
593 struct resource *bar4 = &pdev->resource[4];
594 int ret;
595
596 ivpu_dbg(vdev, MISC, "Mapping BAR0 (RegV) %pR\n", bar0);
597 vdev->regv = devm_ioremap_resource(vdev->drm.dev, bar0);
598 if (IS_ERR(vdev->regv)) {
599 ivpu_err(vdev, "Failed to map bar 0: %pe\n", vdev->regv);
600 return PTR_ERR(vdev->regv);
601 }
602
603 ivpu_dbg(vdev, MISC, "Mapping BAR4 (RegB) %pR\n", bar4);
604 vdev->regb = devm_ioremap_resource(vdev->drm.dev, bar4);
605 if (IS_ERR(vdev->regb)) {
606 ivpu_err(vdev, "Failed to map bar 4: %pe\n", vdev->regb);
607 return PTR_ERR(vdev->regb);
608 }
609
610 ret = dma_set_mask_and_coherent(vdev->drm.dev, DMA_BIT_MASK(vdev->hw->dma_bits));
611 if (ret) {
612 ivpu_err(vdev, "Failed to set DMA mask: %d\n", ret);
613 return ret;
614 }
615 dma_set_max_seg_size(vdev->drm.dev, UINT_MAX);
616
617 /* Clear any pending errors */
618 pcie_capability_clear_word(pdev, PCI_EXP_DEVSTA, 0x3f);
619
620 /* NPU does not require 10m D3hot delay */
621 pdev->d3hot_delay = 0;
622
623 ret = pcim_enable_device(pdev);
624 if (ret) {
625 ivpu_err(vdev, "Failed to enable PCI device: %d\n", ret);
626 return ret;
627 }
628
629 pci_set_master(pdev);
630
631 return 0;
632}
633
634static int ivpu_dev_init(struct ivpu_device *vdev)
635{
636 int ret;
637
638 vdev->hw = drmm_kzalloc(&vdev->drm, sizeof(*vdev->hw), GFP_KERNEL);
639 if (!vdev->hw)
640 return -ENOMEM;
641
642 vdev->mmu = drmm_kzalloc(&vdev->drm, sizeof(*vdev->mmu), GFP_KERNEL);
643 if (!vdev->mmu)
644 return -ENOMEM;
645
646 vdev->fw = drmm_kzalloc(&vdev->drm, sizeof(*vdev->fw), GFP_KERNEL);
647 if (!vdev->fw)
648 return -ENOMEM;
649
650 vdev->ipc = drmm_kzalloc(&vdev->drm, sizeof(*vdev->ipc), GFP_KERNEL);
651 if (!vdev->ipc)
652 return -ENOMEM;
653
654 vdev->pm = drmm_kzalloc(&vdev->drm, sizeof(*vdev->pm), GFP_KERNEL);
655 if (!vdev->pm)
656 return -ENOMEM;
657
658 if (ivpu_hw_ip_gen(vdev) >= IVPU_HW_IP_40XX)
659 vdev->hw->dma_bits = 48;
660 else
661 vdev->hw->dma_bits = 38;
662
663 vdev->platform = IVPU_PLATFORM_INVALID;
664 vdev->context_xa_limit.min = IVPU_USER_CONTEXT_MIN_SSID;
665 vdev->context_xa_limit.max = IVPU_USER_CONTEXT_MAX_SSID;
666 atomic64_set(&vdev->unique_id_counter, 0);
667 atomic_set(&vdev->job_timeout_counter, 0);
668 atomic_set(&vdev->faults_detected, 0);
669 xa_init_flags(&vdev->context_xa, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_IRQ);
670 xa_init_flags(&vdev->submitted_jobs_xa, XA_FLAGS_ALLOC1);
671 xa_init_flags(&vdev->db_xa, XA_FLAGS_ALLOC1);
672 INIT_LIST_HEAD(&vdev->bo_list);
673 hash_init(vdev->user_limits);
674
675 vdev->db_limit.min = IVPU_MIN_DB;
676 vdev->db_limit.max = IVPU_MAX_DB;
677
678 ret = drmm_mutex_init(&vdev->drm, &vdev->context_list_lock);
679 if (ret)
680 goto err_xa_destroy;
681
682 ret = drmm_mutex_init(&vdev->drm, &vdev->user_limits_lock);
683 if (ret)
684 goto err_xa_destroy;
685
686 ret = drmm_mutex_init(&vdev->drm, &vdev->submitted_jobs_lock);
687 if (ret)
688 goto err_xa_destroy;
689
690 ret = drmm_mutex_init(&vdev->drm, &vdev->bo_list_lock);
691 if (ret)
692 goto err_xa_destroy;
693
694 ret = ivpu_pci_init(vdev);
695 if (ret)
696 goto err_xa_destroy;
697
698 ret = ivpu_irq_init(vdev);
699 if (ret)
700 goto err_xa_destroy;
701
702 /* Init basic HW info based on buttress registers which are accessible before power up */
703 ret = ivpu_hw_init(vdev);
704 if (ret)
705 goto err_xa_destroy;
706
707 /* Power up early so the rest of init code can access VPU registers */
708 ret = ivpu_hw_power_up(vdev);
709 if (ret)
710 goto err_shutdown;
711
712 ivpu_mmu_global_context_init(vdev);
713
714 ret = ivpu_mmu_init(vdev);
715 if (ret)
716 goto err_mmu_gctx_fini;
717
718 ret = ivpu_mmu_reserved_context_init(vdev);
719 if (ret)
720 goto err_mmu_gctx_fini;
721
722 ret = ivpu_fw_init(vdev);
723 if (ret)
724 goto err_mmu_rctx_fini;
725
726 ret = ivpu_ipc_init(vdev);
727 if (ret)
728 goto err_fw_fini;
729
730 ivpu_pm_init(vdev);
731
732 ret = ivpu_boot(vdev);
733 if (ret)
734 goto err_ipc_fini;
735
736 ivpu_job_done_consumer_init(vdev);
737 ivpu_pm_enable(vdev);
738
739 return 0;
740
741err_ipc_fini:
742 ivpu_ipc_fini(vdev);
743err_fw_fini:
744 ivpu_fw_fini(vdev);
745err_mmu_rctx_fini:
746 ivpu_mmu_reserved_context_fini(vdev);
747err_mmu_gctx_fini:
748 ivpu_mmu_global_context_fini(vdev);
749err_shutdown:
750 ivpu_shutdown(vdev);
751err_xa_destroy:
752 xa_destroy(&vdev->db_xa);
753 xa_destroy(&vdev->submitted_jobs_xa);
754 xa_destroy(&vdev->context_xa);
755 return ret;
756}
757
758static void ivpu_bo_unbind_all_user_contexts(struct ivpu_device *vdev)
759{
760 struct ivpu_file_priv *file_priv;
761 unsigned long ctx_id;
762
763 mutex_lock(&vdev->context_list_lock);
764
765 xa_for_each(&vdev->context_xa, ctx_id, file_priv)
766 file_priv_unbind(vdev, file_priv);
767
768 mutex_unlock(&vdev->context_list_lock);
769}
770
771static void ivpu_dev_fini(struct ivpu_device *vdev)
772{
773 ivpu_jobs_abort_all(vdev);
774 ivpu_pm_disable_recovery(vdev);
775 ivpu_pm_disable(vdev);
776 ivpu_prepare_for_reset(vdev);
777 ivpu_shutdown(vdev);
778
779 ivpu_ms_cleanup_all(vdev);
780 ivpu_job_done_consumer_fini(vdev);
781 ivpu_bo_unbind_all_user_contexts(vdev);
782
783 ivpu_ipc_fini(vdev);
784 ivpu_fw_fini(vdev);
785 ivpu_mmu_reserved_context_fini(vdev);
786 ivpu_mmu_global_context_fini(vdev);
787
788 drm_WARN_ON(&vdev->drm, !xa_empty(&vdev->db_xa));
789 xa_destroy(&vdev->db_xa);
790 drm_WARN_ON(&vdev->drm, !xa_empty(&vdev->submitted_jobs_xa));
791 xa_destroy(&vdev->submitted_jobs_xa);
792 drm_WARN_ON(&vdev->drm, !xa_empty(&vdev->context_xa));
793 xa_destroy(&vdev->context_xa);
794}
795
796static struct pci_device_id ivpu_pci_ids[] = {
797 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_MTL) },
798 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_ARL) },
799 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_LNL) },
800 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_PTL_P) },
801 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_WCL) },
802 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_NVL) },
803 {}
804};
805MODULE_DEVICE_TABLE(pci, ivpu_pci_ids);
806
807static int ivpu_probe(struct pci_dev *pdev, const struct pci_device_id *id)
808{
809 struct ivpu_device *vdev;
810 int ret;
811
812 vdev = devm_drm_dev_alloc(&pdev->dev, &driver, struct ivpu_device, drm);
813 if (IS_ERR(vdev))
814 return PTR_ERR(vdev);
815
816 pci_set_drvdata(pdev, vdev);
817
818 ret = ivpu_dev_init(vdev);
819 if (ret)
820 return ret;
821
822 ivpu_debugfs_init(vdev);
823 ivpu_sysfs_init(vdev);
824
825 ret = drm_dev_register(&vdev->drm, 0);
826 if (ret) {
827 dev_err(&pdev->dev, "Failed to register DRM device: %d\n", ret);
828 ivpu_dev_fini(vdev);
829 }
830
831 return ret;
832}
833
834static void ivpu_remove(struct pci_dev *pdev)
835{
836 struct ivpu_device *vdev = pci_get_drvdata(pdev);
837
838 drm_dev_unplug(&vdev->drm);
839 ivpu_dev_fini(vdev);
840}
841
842static const struct dev_pm_ops ivpu_drv_pci_pm = {
843 SET_SYSTEM_SLEEP_PM_OPS(ivpu_pm_suspend_cb, ivpu_pm_resume_cb)
844 SET_RUNTIME_PM_OPS(ivpu_pm_runtime_suspend_cb, ivpu_pm_runtime_resume_cb, NULL)
845};
846
847static const struct pci_error_handlers ivpu_drv_pci_err = {
848 .reset_prepare = ivpu_pm_reset_prepare_cb,
849 .reset_done = ivpu_pm_reset_done_cb,
850};
851
852static struct pci_driver ivpu_pci_driver = {
853 .name = KBUILD_MODNAME,
854 .id_table = ivpu_pci_ids,
855 .probe = ivpu_probe,
856 .remove = ivpu_remove,
857 .driver = {
858 .pm = &ivpu_drv_pci_pm,
859 },
860 .err_handler = &ivpu_drv_pci_err,
861};
862
863module_pci_driver(ivpu_pci_driver);
864
865MODULE_AUTHOR("Intel Corporation");
866MODULE_DESCRIPTION(DRIVER_DESC);
867MODULE_LICENSE("GPL and additional rights");
868MODULE_VERSION(DRIVER_VERSION_STR);