Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2022-2024, Advanced Micro Devices, Inc.
4 */
5
6#include <drm/amdxdna_accel.h>
7#include <drm/drm_accel.h>
8#include <drm/drm_drv.h>
9#include <drm/drm_gem.h>
10#include <drm/drm_gem_shmem_helper.h>
11#include <drm/drm_ioctl.h>
12#include <drm/drm_managed.h>
13#include <drm/gpu_scheduler.h>
14#include <linux/iommu.h>
15#include <linux/pci.h>
16
17#include "amdxdna_ctx.h"
18#include "amdxdna_gem.h"
19#include "amdxdna_pci_drv.h"
20#include "amdxdna_pm.h"
21
22MODULE_FIRMWARE("amdnpu/1502_00/npu.sbin");
23MODULE_FIRMWARE("amdnpu/17f0_10/npu.sbin");
24MODULE_FIRMWARE("amdnpu/17f0_11/npu.sbin");
25MODULE_FIRMWARE("amdnpu/17f0_20/npu.sbin");
26MODULE_FIRMWARE("amdnpu/1502_00/npu_7.sbin");
27MODULE_FIRMWARE("amdnpu/17f0_10/npu_7.sbin");
28MODULE_FIRMWARE("amdnpu/17f0_11/npu_7.sbin");
29
30/*
31 * 0.0: Initial version
32 * 0.1: Support getting all hardware contexts by DRM_IOCTL_AMDXDNA_GET_ARRAY
33 * 0.2: Support getting last error hardware error
34 * 0.3: Support firmware debug buffer
35 * 0.4: Support getting resource information
36 * 0.5: Support getting telemetry data
37 * 0.6: Support preemption
38 * 0.7: Support getting power and utilization data
39 * 0.8: Support BO usage query
40 */
41#define AMDXDNA_DRIVER_MAJOR 0
42#define AMDXDNA_DRIVER_MINOR 8
43
44/*
45 * Bind the driver base on (vendor_id, device_id) pair and later use the
46 * (device_id, rev_id) pair as a key to select the devices. The devices with
47 * same device_id have very similar interface to host driver.
48 */
49static const struct pci_device_id pci_ids[] = {
50 { PCI_DEVICE(PCI_VENDOR_ID_AMD, 0x1502) },
51 { PCI_DEVICE(PCI_VENDOR_ID_AMD, 0x17f0) },
52 {0}
53};
54
55MODULE_DEVICE_TABLE(pci, pci_ids);
56
57static const struct amdxdna_device_id amdxdna_ids[] = {
58 { 0x1502, 0x0, &dev_npu1_info },
59 { 0x17f0, 0x10, &dev_npu4_info },
60 { 0x17f0, 0x11, &dev_npu5_info },
61 { 0x17f0, 0x20, &dev_npu6_info },
62 {0}
63};
64
65static int amdxdna_drm_open(struct drm_device *ddev, struct drm_file *filp)
66{
67 struct amdxdna_dev *xdna = to_xdna_dev(ddev);
68 struct amdxdna_client *client;
69 int ret;
70
71 client = kzalloc_obj(*client);
72 if (!client)
73 return -ENOMEM;
74
75 client->pid = pid_nr(rcu_access_pointer(filp->pid));
76 client->xdna = xdna;
77 client->pasid = IOMMU_PASID_INVALID;
78
79 if (!amdxdna_iova_on(xdna)) {
80 client->sva = iommu_sva_bind_device(xdna->ddev.dev, current->mm);
81 if (IS_ERR(client->sva)) {
82 ret = PTR_ERR(client->sva);
83 XDNA_ERR(xdna, "SVA bind device failed, ret %d", ret);
84 goto failed;
85 }
86 client->pasid = iommu_sva_get_pasid(client->sva);
87 if (client->pasid == IOMMU_PASID_INVALID) {
88 XDNA_ERR(xdna, "SVA get pasid failed");
89 ret = -ENODEV;
90 goto unbind_sva;
91 }
92 }
93 client->mm = current->mm;
94 mmgrab(client->mm);
95 init_srcu_struct(&client->hwctx_srcu);
96 xa_init_flags(&client->hwctx_xa, XA_FLAGS_ALLOC);
97 mutex_init(&client->mm_lock);
98
99 mutex_lock(&xdna->dev_lock);
100 list_add_tail(&client->node, &xdna->client_list);
101 mutex_unlock(&xdna->dev_lock);
102
103 filp->driver_priv = client;
104 client->filp = filp;
105
106 XDNA_DBG(xdna, "pid %d opened", client->pid);
107 return 0;
108
109unbind_sva:
110 if (!IS_ERR_OR_NULL(client->sva))
111 iommu_sva_unbind_device(client->sva);
112failed:
113 kfree(client);
114
115 return ret;
116}
117
118static void amdxdna_client_cleanup(struct amdxdna_client *client)
119{
120 list_del(&client->node);
121 amdxdna_hwctx_remove_all(client);
122 xa_destroy(&client->hwctx_xa);
123 cleanup_srcu_struct(&client->hwctx_srcu);
124
125 if (client->dev_heap)
126 drm_gem_object_put(to_gobj(client->dev_heap));
127
128 mutex_destroy(&client->mm_lock);
129
130 if (!IS_ERR_OR_NULL(client->sva))
131 iommu_sva_unbind_device(client->sva);
132 mmdrop(client->mm);
133
134 kfree(client);
135}
136
137static void amdxdna_drm_close(struct drm_device *ddev, struct drm_file *filp)
138{
139 struct amdxdna_client *client = filp->driver_priv;
140 struct amdxdna_dev *xdna = to_xdna_dev(ddev);
141 int idx;
142
143 XDNA_DBG(xdna, "closing pid %d", client->pid);
144
145 if (!drm_dev_enter(&xdna->ddev, &idx))
146 return;
147
148 mutex_lock(&xdna->dev_lock);
149 amdxdna_client_cleanup(client);
150 mutex_unlock(&xdna->dev_lock);
151
152 drm_dev_exit(idx);
153}
154
155static int amdxdna_drm_get_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
156{
157 struct amdxdna_client *client = filp->driver_priv;
158 struct amdxdna_dev *xdna = to_xdna_dev(dev);
159 struct amdxdna_drm_get_info *args = data;
160 int ret;
161
162 if (!xdna->dev_info->ops->get_aie_info)
163 return -EOPNOTSUPP;
164
165 XDNA_DBG(xdna, "Request parameter %u", args->param);
166 mutex_lock(&xdna->dev_lock);
167 ret = xdna->dev_info->ops->get_aie_info(client, args);
168 mutex_unlock(&xdna->dev_lock);
169 return ret;
170}
171
172static int amdxdna_drm_get_array_ioctl(struct drm_device *dev, void *data,
173 struct drm_file *filp)
174{
175 struct amdxdna_client *client = filp->driver_priv;
176 struct amdxdna_dev *xdna = to_xdna_dev(dev);
177 struct amdxdna_drm_get_array *args = data;
178
179 if (!xdna->dev_info->ops->get_array)
180 return -EOPNOTSUPP;
181
182 if (args->pad || !args->num_element || !args->element_size)
183 return -EINVAL;
184
185 guard(mutex)(&xdna->dev_lock);
186 return xdna->dev_info->ops->get_array(client, args);
187}
188
189static int amdxdna_drm_set_state_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
190{
191 struct amdxdna_client *client = filp->driver_priv;
192 struct amdxdna_dev *xdna = to_xdna_dev(dev);
193 struct amdxdna_drm_set_state *args = data;
194 int ret;
195
196 if (!xdna->dev_info->ops->set_aie_state)
197 return -EOPNOTSUPP;
198
199 XDNA_DBG(xdna, "Request parameter %u", args->param);
200 mutex_lock(&xdna->dev_lock);
201 ret = xdna->dev_info->ops->set_aie_state(client, args);
202 mutex_unlock(&xdna->dev_lock);
203
204 return ret;
205}
206
207static const struct drm_ioctl_desc amdxdna_drm_ioctls[] = {
208 /* Context */
209 DRM_IOCTL_DEF_DRV(AMDXDNA_CREATE_HWCTX, amdxdna_drm_create_hwctx_ioctl, 0),
210 DRM_IOCTL_DEF_DRV(AMDXDNA_DESTROY_HWCTX, amdxdna_drm_destroy_hwctx_ioctl, 0),
211 DRM_IOCTL_DEF_DRV(AMDXDNA_CONFIG_HWCTX, amdxdna_drm_config_hwctx_ioctl, 0),
212 /* BO */
213 DRM_IOCTL_DEF_DRV(AMDXDNA_CREATE_BO, amdxdna_drm_create_bo_ioctl, 0),
214 DRM_IOCTL_DEF_DRV(AMDXDNA_GET_BO_INFO, amdxdna_drm_get_bo_info_ioctl, 0),
215 DRM_IOCTL_DEF_DRV(AMDXDNA_SYNC_BO, amdxdna_drm_sync_bo_ioctl, 0),
216 /* Execution */
217 DRM_IOCTL_DEF_DRV(AMDXDNA_EXEC_CMD, amdxdna_drm_submit_cmd_ioctl, 0),
218 /* AIE hardware */
219 DRM_IOCTL_DEF_DRV(AMDXDNA_GET_INFO, amdxdna_drm_get_info_ioctl, 0),
220 DRM_IOCTL_DEF_DRV(AMDXDNA_GET_ARRAY, amdxdna_drm_get_array_ioctl, 0),
221 DRM_IOCTL_DEF_DRV(AMDXDNA_SET_STATE, amdxdna_drm_set_state_ioctl, DRM_ROOT_ONLY),
222};
223
224static const struct file_operations amdxdna_fops = {
225 .owner = THIS_MODULE,
226 .open = accel_open,
227 .release = drm_release,
228 .unlocked_ioctl = drm_ioctl,
229 .compat_ioctl = drm_compat_ioctl,
230 .poll = drm_poll,
231 .read = drm_read,
232 .llseek = noop_llseek,
233 .mmap = drm_gem_mmap,
234 .fop_flags = FOP_UNSIGNED_OFFSET,
235};
236
237const struct drm_driver amdxdna_drm_drv = {
238 .driver_features = DRIVER_GEM | DRIVER_COMPUTE_ACCEL |
239 DRIVER_SYNCOBJ | DRIVER_SYNCOBJ_TIMELINE,
240 .fops = &amdxdna_fops,
241 .name = "amdxdna_accel_driver",
242 .desc = "AMD XDNA DRM implementation",
243 .major = AMDXDNA_DRIVER_MAJOR,
244 .minor = AMDXDNA_DRIVER_MINOR,
245 .open = amdxdna_drm_open,
246 .postclose = amdxdna_drm_close,
247 .ioctls = amdxdna_drm_ioctls,
248 .num_ioctls = ARRAY_SIZE(amdxdna_drm_ioctls),
249
250 .gem_create_object = amdxdna_gem_create_shmem_object_cb,
251 .gem_prime_import = amdxdna_gem_prime_import,
252};
253
254static const struct amdxdna_dev_info *
255amdxdna_get_dev_info(struct pci_dev *pdev)
256{
257 int i;
258
259 for (i = 0; i < ARRAY_SIZE(amdxdna_ids); i++) {
260 if (pdev->device == amdxdna_ids[i].device &&
261 pdev->revision == amdxdna_ids[i].revision)
262 return amdxdna_ids[i].dev_info;
263 }
264 return NULL;
265}
266
267static int amdxdna_probe(struct pci_dev *pdev, const struct pci_device_id *id)
268{
269 struct device *dev = &pdev->dev;
270 struct amdxdna_dev *xdna;
271 int ret;
272
273 xdna = devm_drm_dev_alloc(dev, &amdxdna_drm_drv, typeof(*xdna), ddev);
274 if (IS_ERR(xdna))
275 return PTR_ERR(xdna);
276
277 xdna->dev_info = amdxdna_get_dev_info(pdev);
278 if (!xdna->dev_info)
279 return -ENODEV;
280
281 drmm_mutex_init(&xdna->ddev, &xdna->dev_lock);
282 init_rwsem(&xdna->notifier_lock);
283 INIT_LIST_HEAD(&xdna->client_list);
284 pci_set_drvdata(pdev, xdna);
285
286 if (IS_ENABLED(CONFIG_LOCKDEP)) {
287 fs_reclaim_acquire(GFP_KERNEL);
288 might_lock(&xdna->notifier_lock);
289 fs_reclaim_release(GFP_KERNEL);
290 }
291
292 ret = amdxdna_iommu_init(xdna);
293 if (ret)
294 return ret;
295
296 xdna->notifier_wq = alloc_ordered_workqueue("notifier_wq", WQ_MEM_RECLAIM);
297 if (!xdna->notifier_wq) {
298 ret = -ENOMEM;
299 goto iommu_fini;
300 }
301
302 mutex_lock(&xdna->dev_lock);
303 ret = xdna->dev_info->ops->init(xdna);
304 mutex_unlock(&xdna->dev_lock);
305 if (ret) {
306 XDNA_ERR(xdna, "Hardware init failed, ret %d", ret);
307 goto destroy_notifier_wq;
308 }
309
310 ret = amdxdna_sysfs_init(xdna);
311 if (ret) {
312 XDNA_ERR(xdna, "Create amdxdna attrs failed: %d", ret);
313 goto failed_dev_fini;
314 }
315
316 ret = drm_dev_register(&xdna->ddev, 0);
317 if (ret) {
318 XDNA_ERR(xdna, "DRM register failed, ret %d", ret);
319 goto failed_sysfs_fini;
320 }
321
322 return 0;
323
324failed_sysfs_fini:
325 amdxdna_sysfs_fini(xdna);
326failed_dev_fini:
327 mutex_lock(&xdna->dev_lock);
328 xdna->dev_info->ops->fini(xdna);
329 mutex_unlock(&xdna->dev_lock);
330destroy_notifier_wq:
331 destroy_workqueue(xdna->notifier_wq);
332iommu_fini:
333 amdxdna_iommu_fini(xdna);
334 return ret;
335}
336
337static void amdxdna_remove(struct pci_dev *pdev)
338{
339 struct amdxdna_dev *xdna = pci_get_drvdata(pdev);
340 struct amdxdna_client *client;
341
342 destroy_workqueue(xdna->notifier_wq);
343
344 drm_dev_unplug(&xdna->ddev);
345 amdxdna_sysfs_fini(xdna);
346
347 mutex_lock(&xdna->dev_lock);
348 client = list_first_entry_or_null(&xdna->client_list,
349 struct amdxdna_client, node);
350 while (client) {
351 amdxdna_client_cleanup(client);
352
353 client = list_first_entry_or_null(&xdna->client_list,
354 struct amdxdna_client, node);
355 }
356
357 xdna->dev_info->ops->fini(xdna);
358 mutex_unlock(&xdna->dev_lock);
359
360 amdxdna_iommu_fini(xdna);
361}
362
363static const struct dev_pm_ops amdxdna_pm_ops = {
364 SYSTEM_SLEEP_PM_OPS(amdxdna_pm_suspend, amdxdna_pm_resume)
365 RUNTIME_PM_OPS(amdxdna_pm_suspend, amdxdna_pm_resume, NULL)
366};
367
368static struct pci_driver amdxdna_pci_driver = {
369 .name = KBUILD_MODNAME,
370 .id_table = pci_ids,
371 .probe = amdxdna_probe,
372 .remove = amdxdna_remove,
373 .driver.pm = &amdxdna_pm_ops,
374};
375
376module_pci_driver(amdxdna_pci_driver);
377
378MODULE_LICENSE("GPL");
379MODULE_IMPORT_NS("AMD_PMF");
380MODULE_AUTHOR("XRT Team <runtimeca39d@amd.com>");
381MODULE_DESCRIPTION("amdxdna driver");