Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * AMD IOMMU driver
4 *
5 * Copyright (C) 2018 Advanced Micro Devices, Inc.
6 *
7 * Author: Gary R Hook <gary.hook@amd.com>
8 */
9
10#include <linux/debugfs.h>
11#include <linux/pci.h>
12
13#include "amd_iommu.h"
14#include "../irq_remapping.h"
15
16static struct dentry *amd_iommu_debugfs;
17
18#define MAX_NAME_LEN 20
19#define OFS_IN_SZ 8
20#define DEVID_IN_SZ 16
21
22static int sbdf = -1;
23
24static ssize_t iommu_mmio_write(struct file *filp, const char __user *ubuf,
25 size_t cnt, loff_t *ppos)
26{
27 struct seq_file *m = filp->private_data;
28 struct amd_iommu *iommu = m->private;
29 int ret, dbg_mmio_offset = iommu->dbg_mmio_offset = -1;
30
31 if (cnt > OFS_IN_SZ)
32 return -EINVAL;
33
34 ret = kstrtou32_from_user(ubuf, cnt, 0, &dbg_mmio_offset);
35 if (ret)
36 return ret;
37
38 if (dbg_mmio_offset > iommu->mmio_phys_end - sizeof(u64))
39 return -EINVAL;
40
41 iommu->dbg_mmio_offset = dbg_mmio_offset;
42 return cnt;
43}
44
45static int iommu_mmio_show(struct seq_file *m, void *unused)
46{
47 struct amd_iommu *iommu = m->private;
48 u64 value;
49 int dbg_mmio_offset = iommu->dbg_mmio_offset;
50
51 if (dbg_mmio_offset < 0 || dbg_mmio_offset >
52 iommu->mmio_phys_end - sizeof(u64)) {
53 seq_puts(m, "Please provide mmio register's offset\n");
54 return 0;
55 }
56
57 value = readq(iommu->mmio_base + dbg_mmio_offset);
58 seq_printf(m, "Offset:0x%x Value:0x%016llx\n", dbg_mmio_offset, value);
59
60 return 0;
61}
62DEFINE_SHOW_STORE_ATTRIBUTE(iommu_mmio);
63
64static ssize_t iommu_capability_write(struct file *filp, const char __user *ubuf,
65 size_t cnt, loff_t *ppos)
66{
67 struct seq_file *m = filp->private_data;
68 struct amd_iommu *iommu = m->private;
69 int ret, dbg_cap_offset = iommu->dbg_cap_offset = -1;
70
71 if (cnt > OFS_IN_SZ)
72 return -EINVAL;
73
74 ret = kstrtou32_from_user(ubuf, cnt, 0, &dbg_cap_offset);
75 if (ret)
76 return ret;
77
78 /* Capability register at offset 0x14 is the last IOMMU capability register. */
79 if (dbg_cap_offset > 0x14)
80 return -EINVAL;
81
82 iommu->dbg_cap_offset = dbg_cap_offset;
83 return cnt;
84}
85
86static int iommu_capability_show(struct seq_file *m, void *unused)
87{
88 struct amd_iommu *iommu = m->private;
89 u32 value;
90 int err, dbg_cap_offset = iommu->dbg_cap_offset;
91
92 if (dbg_cap_offset < 0 || dbg_cap_offset > 0x14) {
93 seq_puts(m, "Please provide capability register's offset in the range [0x00 - 0x14]\n");
94 return 0;
95 }
96
97 err = pci_read_config_dword(iommu->dev, iommu->cap_ptr + dbg_cap_offset, &value);
98 if (err) {
99 seq_printf(m, "Not able to read capability register at 0x%x\n",
100 dbg_cap_offset);
101 return 0;
102 }
103
104 seq_printf(m, "Offset:0x%x Value:0x%08x\n", dbg_cap_offset, value);
105
106 return 0;
107}
108DEFINE_SHOW_STORE_ATTRIBUTE(iommu_capability);
109
110static int iommu_cmdbuf_show(struct seq_file *m, void *unused)
111{
112 struct amd_iommu *iommu = m->private;
113 struct iommu_cmd *cmd;
114 unsigned long flag;
115 u32 head, tail;
116 int i;
117
118 raw_spin_lock_irqsave(&iommu->lock, flag);
119 head = readl(iommu->mmio_base + MMIO_CMD_HEAD_OFFSET);
120 tail = readl(iommu->mmio_base + MMIO_CMD_TAIL_OFFSET);
121 seq_printf(m, "CMD Buffer Head Offset:%d Tail Offset:%d\n",
122 (head >> 4) & 0x7fff, (tail >> 4) & 0x7fff);
123 for (i = 0; i < CMD_BUFFER_ENTRIES; i++) {
124 cmd = (struct iommu_cmd *)(iommu->cmd_buf + i * sizeof(*cmd));
125 seq_printf(m, "%3d: %08x %08x %08x %08x\n", i, cmd->data[0],
126 cmd->data[1], cmd->data[2], cmd->data[3]);
127 }
128 raw_spin_unlock_irqrestore(&iommu->lock, flag);
129
130 return 0;
131}
132DEFINE_SHOW_ATTRIBUTE(iommu_cmdbuf);
133
134static ssize_t devid_write(struct file *filp, const char __user *ubuf,
135 size_t cnt, loff_t *ppos)
136{
137 struct amd_iommu_pci_seg *pci_seg;
138 int seg, bus, slot, func;
139 struct amd_iommu *iommu;
140 char *srcid_ptr;
141 u16 devid;
142 int i;
143
144 sbdf = -1;
145
146 if (cnt >= DEVID_IN_SZ)
147 return -EINVAL;
148
149 srcid_ptr = memdup_user_nul(ubuf, cnt);
150 if (IS_ERR(srcid_ptr))
151 return PTR_ERR(srcid_ptr);
152
153 i = sscanf(srcid_ptr, "%x:%x:%x.%x", &seg, &bus, &slot, &func);
154 if (i != 4) {
155 i = sscanf(srcid_ptr, "%x:%x.%x", &bus, &slot, &func);
156 if (i != 3) {
157 kfree(srcid_ptr);
158 return -EINVAL;
159 }
160 seg = 0;
161 }
162
163 devid = PCI_DEVID(bus, PCI_DEVFN(slot, func));
164
165 /* Check if user device id input is a valid input */
166 for_each_pci_segment(pci_seg) {
167 if (pci_seg->id != seg)
168 continue;
169 if (devid > pci_seg->last_bdf) {
170 kfree(srcid_ptr);
171 return -EINVAL;
172 }
173 iommu = pci_seg->rlookup_table[devid];
174 if (!iommu) {
175 kfree(srcid_ptr);
176 return -ENODEV;
177 }
178 break;
179 }
180
181 if (pci_seg->id != seg) {
182 kfree(srcid_ptr);
183 return -EINVAL;
184 }
185
186 sbdf = PCI_SEG_DEVID_TO_SBDF(seg, devid);
187
188 kfree(srcid_ptr);
189
190 return cnt;
191}
192
193static int devid_show(struct seq_file *m, void *unused)
194{
195 u16 devid;
196 int sbdf_shadow = sbdf;
197
198 if (sbdf_shadow >= 0) {
199 devid = PCI_SBDF_TO_DEVID(sbdf_shadow);
200 seq_printf(m, "%04x:%02x:%02x.%x\n", PCI_SBDF_TO_SEGID(sbdf_shadow),
201 PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid));
202 } else
203 seq_puts(m, "No or Invalid input provided\n");
204
205 return 0;
206}
207DEFINE_SHOW_STORE_ATTRIBUTE(devid);
208
209static void dump_dte(struct seq_file *m, struct amd_iommu_pci_seg *pci_seg, u16 devid)
210{
211 struct dev_table_entry *dev_table;
212 struct amd_iommu *iommu;
213
214 iommu = pci_seg->rlookup_table[devid];
215 if (!iommu)
216 return;
217
218 dev_table = get_dev_table(iommu);
219 if (!dev_table) {
220 seq_puts(m, "Device table not found");
221 return;
222 }
223
224 seq_printf(m, "%-12s %16s %16s %16s %16s iommu\n", "DeviceId",
225 "QWORD[3]", "QWORD[2]", "QWORD[1]", "QWORD[0]");
226 seq_printf(m, "%04x:%02x:%02x.%x ", pci_seg->id, PCI_BUS_NUM(devid),
227 PCI_SLOT(devid), PCI_FUNC(devid));
228 for (int i = 3; i >= 0; --i)
229 seq_printf(m, "%016llx ", dev_table[devid].data[i]);
230 seq_printf(m, "iommu%d\n", iommu->index);
231}
232
233static int iommu_devtbl_show(struct seq_file *m, void *unused)
234{
235 struct amd_iommu_pci_seg *pci_seg;
236 u16 seg, devid;
237 int sbdf_shadow = sbdf;
238
239 if (sbdf_shadow < 0) {
240 seq_puts(m, "Enter a valid device ID to 'devid' file\n");
241 return 0;
242 }
243 seg = PCI_SBDF_TO_SEGID(sbdf_shadow);
244 devid = PCI_SBDF_TO_DEVID(sbdf_shadow);
245
246 for_each_pci_segment(pci_seg) {
247 if (pci_seg->id != seg)
248 continue;
249 dump_dte(m, pci_seg, devid);
250 break;
251 }
252
253 return 0;
254}
255DEFINE_SHOW_ATTRIBUTE(iommu_devtbl);
256
257static void dump_128_irte(struct seq_file *m, struct irq_remap_table *table, u16 int_tab_len)
258{
259 struct irte_ga *ptr, *irte;
260 int index;
261
262 for (index = 0; index < int_tab_len; index++) {
263 ptr = (struct irte_ga *)table->table;
264 irte = &ptr[index];
265
266 if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) &&
267 !irte->lo.fields_vapic.valid)
268 continue;
269 else if (!irte->lo.fields_remap.valid)
270 continue;
271 seq_printf(m, "IRT[%04d] %016llx %016llx\n", index, irte->hi.val, irte->lo.val);
272 }
273}
274
275static void dump_32_irte(struct seq_file *m, struct irq_remap_table *table, u16 int_tab_len)
276{
277 union irte *ptr, *irte;
278 int index;
279
280 for (index = 0; index < int_tab_len; index++) {
281 ptr = (union irte *)table->table;
282 irte = &ptr[index];
283
284 if (!irte->fields.valid)
285 continue;
286 seq_printf(m, "IRT[%04d] %08x\n", index, irte->val);
287 }
288}
289
290static void dump_irte(struct seq_file *m, u16 devid, struct amd_iommu_pci_seg *pci_seg)
291{
292 struct dev_table_entry *dev_table;
293 struct irq_remap_table *table;
294 struct amd_iommu *iommu;
295 unsigned long flags;
296 u16 int_tab_len;
297
298 table = pci_seg->irq_lookup_table[devid];
299 if (!table) {
300 seq_printf(m, "IRQ lookup table not set for %04x:%02x:%02x:%x\n",
301 pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid));
302 return;
303 }
304
305 iommu = pci_seg->rlookup_table[devid];
306 if (!iommu)
307 return;
308
309 dev_table = get_dev_table(iommu);
310 if (!dev_table) {
311 seq_puts(m, "Device table not found");
312 return;
313 }
314
315 int_tab_len = dev_table[devid].data[2] & DTE_INTTABLEN_MASK;
316 if (int_tab_len != DTE_INTTABLEN_512 && int_tab_len != DTE_INTTABLEN_2K) {
317 seq_puts(m, "The device's DTE contains an invalid IRT length value.");
318 return;
319 }
320
321 seq_printf(m, "DeviceId %04x:%02x:%02x.%x\n", pci_seg->id, PCI_BUS_NUM(devid),
322 PCI_SLOT(devid), PCI_FUNC(devid));
323
324 raw_spin_lock_irqsave(&table->lock, flags);
325 if (AMD_IOMMU_GUEST_IR_GA(amd_iommu_guest_ir))
326 dump_128_irte(m, table, BIT(int_tab_len >> 1));
327 else
328 dump_32_irte(m, table, BIT(int_tab_len >> 1));
329 seq_puts(m, "\n");
330 raw_spin_unlock_irqrestore(&table->lock, flags);
331}
332
333static int iommu_irqtbl_show(struct seq_file *m, void *unused)
334{
335 struct amd_iommu_pci_seg *pci_seg;
336 u16 devid, seg;
337 int sbdf_shadow = sbdf;
338
339 if (!irq_remapping_enabled) {
340 seq_puts(m, "Interrupt remapping is disabled\n");
341 return 0;
342 }
343
344 if (sbdf_shadow < 0) {
345 seq_puts(m, "Enter a valid device ID to 'devid' file\n");
346 return 0;
347 }
348
349 seg = PCI_SBDF_TO_SEGID(sbdf_shadow);
350 devid = PCI_SBDF_TO_DEVID(sbdf_shadow);
351
352 for_each_pci_segment(pci_seg) {
353 if (pci_seg->id != seg)
354 continue;
355 dump_irte(m, devid, pci_seg);
356 break;
357 }
358
359 return 0;
360}
361DEFINE_SHOW_ATTRIBUTE(iommu_irqtbl);
362
363void amd_iommu_debugfs_setup(void)
364{
365 struct amd_iommu *iommu;
366 char name[MAX_NAME_LEN + 1];
367
368 amd_iommu_debugfs = debugfs_create_dir("amd", iommu_debugfs_dir);
369
370 for_each_iommu(iommu) {
371 iommu->dbg_mmio_offset = -1;
372 iommu->dbg_cap_offset = -1;
373
374 snprintf(name, MAX_NAME_LEN, "iommu%02d", iommu->index);
375 iommu->debugfs = debugfs_create_dir(name, amd_iommu_debugfs);
376
377 debugfs_create_file("mmio", 0644, iommu->debugfs, iommu,
378 &iommu_mmio_fops);
379 debugfs_create_file("capability", 0644, iommu->debugfs, iommu,
380 &iommu_capability_fops);
381 debugfs_create_file("cmdbuf", 0444, iommu->debugfs, iommu,
382 &iommu_cmdbuf_fops);
383 }
384
385 debugfs_create_file("devid", 0644, amd_iommu_debugfs, NULL,
386 &devid_fops);
387 debugfs_create_file("devtbl", 0444, amd_iommu_debugfs, NULL,
388 &iommu_devtbl_fops);
389 debugfs_create_file("irqtbl", 0444, amd_iommu_debugfs, NULL,
390 &iommu_irqtbl_fops);
391}