Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) 2007-2010 Advanced Micro Devices, Inc.
4 * Author: Joerg Roedel <jroedel@suse.de>
5 * Leo Duran <leo.duran@amd.com>
6 */
7
8#define pr_fmt(fmt) "AMD-Vi: " fmt
9#define dev_fmt(fmt) pr_fmt(fmt)
10
11#include <linux/ratelimit.h>
12#include <linux/pci.h>
13#include <linux/acpi.h>
14#include <linux/pci-ats.h>
15#include <linux/bitmap.h>
16#include <linux/slab.h>
17#include <linux/string_choices.h>
18#include <linux/debugfs.h>
19#include <linux/scatterlist.h>
20#include <linux/dma-map-ops.h>
21#include <linux/dma-direct.h>
22#include <linux/idr.h>
23#include <linux/iommu-helper.h>
24#include <linux/delay.h>
25#include <linux/amd-iommu.h>
26#include <linux/notifier.h>
27#include <linux/export.h>
28#include <linux/irq.h>
29#include <linux/irqchip/irq-msi-lib.h>
30#include <linux/msi.h>
31#include <linux/irqdomain.h>
32#include <linux/percpu.h>
33#include <linux/cc_platform.h>
34#include <asm/irq_remapping.h>
35#include <asm/io_apic.h>
36#include <asm/apic.h>
37#include <asm/hw_irq.h>
38#include <asm/proto.h>
39#include <asm/iommu.h>
40#include <asm/gart.h>
41#include <asm/dma.h>
42#include <uapi/linux/iommufd.h>
43#include <linux/generic_pt/iommu.h>
44
45#include "amd_iommu.h"
46#include "iommufd.h"
47#include "../irq_remapping.h"
48#include "../iommu-pages.h"
49
50#define CMD_SET_TYPE(cmd, t) ((cmd)->data[1] |= ((t) << 28))
51
52/* Reserved IOVA ranges */
53#define MSI_RANGE_START (0xfee00000)
54#define MSI_RANGE_END (0xfeefffff)
55#define HT_RANGE_START (0xfd00000000ULL)
56#define HT_RANGE_END (0xffffffffffULL)
57
58LIST_HEAD(ioapic_map);
59LIST_HEAD(hpet_map);
60LIST_HEAD(acpihid_map);
61
62const struct iommu_ops amd_iommu_ops;
63
64int amd_iommu_max_glx_val = -1;
65
66/*
67 * AMD IOMMU allows up to 2^16 different protection domains. This is a bitmap
68 * to know which ones are already in use.
69 */
70DEFINE_IDA(pdom_ids);
71
72static int amd_iommu_attach_device(struct iommu_domain *dom, struct device *dev,
73 struct iommu_domain *old);
74
75static void set_dte_entry(struct amd_iommu *iommu,
76 struct iommu_dev_data *dev_data,
77 phys_addr_t top_paddr, unsigned int top_level);
78
79static int device_flush_dte(struct iommu_dev_data *dev_data);
80
81static void amd_iommu_change_top(struct pt_iommu *iommu_table,
82 phys_addr_t top_paddr, unsigned int top_level);
83
84static void iommu_flush_dte_sync(struct amd_iommu *iommu, u16 devid);
85
86static struct iommu_dev_data *find_dev_data(struct amd_iommu *iommu, u16 devid);
87static bool amd_iommu_enforce_cache_coherency(struct iommu_domain *domain);
88static int amd_iommu_set_dirty_tracking(struct iommu_domain *domain,
89 bool enable);
90
91static void clone_aliases(struct amd_iommu *iommu, struct device *dev);
92
93static int iommu_completion_wait(struct amd_iommu *iommu);
94
95/****************************************************************************
96 *
97 * Helper functions
98 *
99 ****************************************************************************/
100
101static __always_inline void amd_iommu_atomic128_set(__int128 *ptr, __int128 val)
102{
103 /*
104 * Note:
105 * We use arch_cmpxchg128_local() because:
106 * - Need cmpxchg16b instruction mainly for 128-bit store to DTE
107 * (not necessary for cmpxchg since this function is already
108 * protected by a spin_lock for this DTE).
109 * - Neither need LOCK_PREFIX nor try loop because of the spin_lock.
110 */
111 arch_cmpxchg128_local(ptr, *ptr, val);
112}
113
114static void write_dte_upper128(struct dev_table_entry *ptr, struct dev_table_entry *new)
115{
116 struct dev_table_entry old;
117
118 old.data128[1] = ptr->data128[1];
119 /*
120 * Preserve DTE_DATA2_INTR_MASK. This needs to be
121 * done here since it requires to be inside
122 * spin_lock(&dev_data->dte_lock) context.
123 */
124 new->data[2] &= ~DTE_DATA2_INTR_MASK;
125 new->data[2] |= old.data[2] & DTE_DATA2_INTR_MASK;
126
127 amd_iommu_atomic128_set(&ptr->data128[1], new->data128[1]);
128}
129
130static void write_dte_lower128(struct dev_table_entry *ptr, struct dev_table_entry *new)
131{
132 amd_iommu_atomic128_set(&ptr->data128[0], new->data128[0]);
133}
134
135/*
136 * Note:
137 * IOMMU reads the entire Device Table entry in a single 256-bit transaction
138 * but the driver is programming DTE using 2 128-bit cmpxchg. So, the driver
139 * need to ensure the following:
140 * - DTE[V|GV] bit is being written last when setting.
141 * - DTE[V|GV] bit is being written first when clearing.
142 *
143 * This function is used only by code, which updates DMA translation part of the DTE.
144 * So, only consider control bits related to DMA when updating the entry.
145 */
146static void update_dte256(struct amd_iommu *iommu, struct iommu_dev_data *dev_data,
147 struct dev_table_entry *new)
148{
149 unsigned long flags;
150 struct dev_table_entry *dev_table = get_dev_table(iommu);
151 struct dev_table_entry *ptr = &dev_table[dev_data->devid];
152
153 spin_lock_irqsave(&dev_data->dte_lock, flags);
154
155 if (!(ptr->data[0] & DTE_FLAG_V)) {
156 /* Existing DTE is not valid. */
157 write_dte_upper128(ptr, new);
158 write_dte_lower128(ptr, new);
159 iommu_flush_dte_sync(iommu, dev_data->devid);
160 } else if (!(new->data[0] & DTE_FLAG_V)) {
161 /* Existing DTE is valid. New DTE is not valid. */
162 write_dte_lower128(ptr, new);
163 write_dte_upper128(ptr, new);
164 iommu_flush_dte_sync(iommu, dev_data->devid);
165 } else if (!FIELD_GET(DTE_FLAG_GV, ptr->data[0])) {
166 /*
167 * Both DTEs are valid.
168 * Existing DTE has no guest page table.
169 */
170 write_dte_upper128(ptr, new);
171 write_dte_lower128(ptr, new);
172 iommu_flush_dte_sync(iommu, dev_data->devid);
173 } else if (!FIELD_GET(DTE_FLAG_GV, new->data[0])) {
174 /*
175 * Both DTEs are valid.
176 * Existing DTE has guest page table,
177 * new DTE has no guest page table,
178 */
179 write_dte_lower128(ptr, new);
180 write_dte_upper128(ptr, new);
181 iommu_flush_dte_sync(iommu, dev_data->devid);
182 } else if (FIELD_GET(DTE_GPT_LEVEL_MASK, ptr->data[2]) !=
183 FIELD_GET(DTE_GPT_LEVEL_MASK, new->data[2])) {
184 /*
185 * Both DTEs are valid and have guest page table,
186 * but have different number of levels. So, we need
187 * to upadte both upper and lower 128-bit value, which
188 * require disabling and flushing.
189 */
190 struct dev_table_entry clear = {};
191
192 /* First disable DTE */
193 write_dte_lower128(ptr, &clear);
194 iommu_flush_dte_sync(iommu, dev_data->devid);
195
196 /* Then update DTE */
197 write_dte_upper128(ptr, new);
198 write_dte_lower128(ptr, new);
199 iommu_flush_dte_sync(iommu, dev_data->devid);
200 } else {
201 /*
202 * Both DTEs are valid and have guest page table,
203 * and same number of levels. We just need to only
204 * update the lower 128-bit. So no need to disable DTE.
205 */
206 write_dte_lower128(ptr, new);
207 }
208
209 spin_unlock_irqrestore(&dev_data->dte_lock, flags);
210}
211
212void amd_iommu_update_dte(struct amd_iommu *iommu,
213 struct iommu_dev_data *dev_data,
214 struct dev_table_entry *new)
215{
216 update_dte256(iommu, dev_data, new);
217 clone_aliases(iommu, dev_data->dev);
218 device_flush_dte(dev_data);
219 iommu_completion_wait(iommu);
220}
221
222static void get_dte256(struct amd_iommu *iommu, struct iommu_dev_data *dev_data,
223 struct dev_table_entry *dte)
224{
225 unsigned long flags;
226 struct dev_table_entry *ptr;
227 struct dev_table_entry *dev_table = get_dev_table(iommu);
228
229 ptr = &dev_table[dev_data->devid];
230
231 spin_lock_irqsave(&dev_data->dte_lock, flags);
232 dte->data128[0] = ptr->data128[0];
233 dte->data128[1] = ptr->data128[1];
234 spin_unlock_irqrestore(&dev_data->dte_lock, flags);
235}
236
237static inline bool pdom_is_v2_pgtbl_mode(struct protection_domain *pdom)
238{
239 return (pdom && (pdom->pd_mode == PD_MODE_V2));
240}
241
242static inline bool pdom_is_in_pt_mode(struct protection_domain *pdom)
243{
244 return (pdom->domain.type == IOMMU_DOMAIN_IDENTITY);
245}
246
247/*
248 * We cannot support PASID w/ existing v1 page table in the same domain
249 * since it will be nested. However, existing domain w/ v2 page table
250 * or passthrough mode can be used for PASID.
251 */
252static inline bool pdom_is_sva_capable(struct protection_domain *pdom)
253{
254 return pdom_is_v2_pgtbl_mode(pdom) || pdom_is_in_pt_mode(pdom);
255}
256
257static inline int get_acpihid_device_id(struct device *dev,
258 struct acpihid_map_entry **entry)
259{
260 struct acpi_device *adev = ACPI_COMPANION(dev);
261 struct acpihid_map_entry *p, *p1 = NULL;
262 int hid_count = 0;
263 bool fw_bug;
264
265 if (!adev)
266 return -ENODEV;
267
268 list_for_each_entry(p, &acpihid_map, list) {
269 if (acpi_dev_hid_uid_match(adev, p->hid,
270 p->uid[0] ? p->uid : NULL)) {
271 p1 = p;
272 fw_bug = false;
273 hid_count = 1;
274 break;
275 }
276
277 /*
278 * Count HID matches w/o UID, raise FW_BUG but allow exactly one match
279 */
280 if (acpi_dev_hid_match(adev, p->hid)) {
281 p1 = p;
282 hid_count++;
283 fw_bug = true;
284 }
285 }
286
287 if (!p1)
288 return -EINVAL;
289 if (fw_bug)
290 dev_err_once(dev, FW_BUG "No ACPI device matched UID, but %d device%s matched HID.\n",
291 hid_count, str_plural(hid_count));
292 if (hid_count > 1)
293 return -EINVAL;
294 if (entry)
295 *entry = p1;
296
297 return p1->devid;
298}
299
300static inline int get_device_sbdf_id(struct device *dev)
301{
302 int sbdf;
303
304 if (dev_is_pci(dev))
305 sbdf = get_pci_sbdf_id(to_pci_dev(dev));
306 else
307 sbdf = get_acpihid_device_id(dev, NULL);
308
309 return sbdf;
310}
311
312struct dev_table_entry *get_dev_table(struct amd_iommu *iommu)
313{
314 struct dev_table_entry *dev_table;
315 struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg;
316
317 BUG_ON(pci_seg == NULL);
318 dev_table = pci_seg->dev_table;
319 BUG_ON(dev_table == NULL);
320
321 return dev_table;
322}
323
324static inline u16 get_device_segment(struct device *dev)
325{
326 u16 seg;
327
328 if (dev_is_pci(dev)) {
329 struct pci_dev *pdev = to_pci_dev(dev);
330
331 seg = pci_domain_nr(pdev->bus);
332 } else {
333 u32 devid = get_acpihid_device_id(dev, NULL);
334
335 seg = PCI_SBDF_TO_SEGID(devid);
336 }
337
338 return seg;
339}
340
341/* Writes the specific IOMMU for a device into the PCI segment rlookup table */
342void amd_iommu_set_rlookup_table(struct amd_iommu *iommu, u16 devid)
343{
344 struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg;
345
346 pci_seg->rlookup_table[devid] = iommu;
347}
348
349static struct amd_iommu *__rlookup_amd_iommu(u16 seg, u16 devid)
350{
351 struct amd_iommu_pci_seg *pci_seg;
352
353 for_each_pci_segment(pci_seg) {
354 if (pci_seg->id == seg)
355 return pci_seg->rlookup_table[devid];
356 }
357 return NULL;
358}
359
360static struct amd_iommu *rlookup_amd_iommu(struct device *dev)
361{
362 u16 seg = get_device_segment(dev);
363 int devid = get_device_sbdf_id(dev);
364
365 if (devid < 0)
366 return NULL;
367 return __rlookup_amd_iommu(seg, PCI_SBDF_TO_DEVID(devid));
368}
369
370static struct iommu_dev_data *alloc_dev_data(struct amd_iommu *iommu, u16 devid)
371{
372 struct iommu_dev_data *dev_data;
373 struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg;
374
375 dev_data = kzalloc_obj(*dev_data);
376 if (!dev_data)
377 return NULL;
378
379 mutex_init(&dev_data->mutex);
380 spin_lock_init(&dev_data->dte_lock);
381 dev_data->devid = devid;
382 ratelimit_default_init(&dev_data->rs);
383
384 llist_add(&dev_data->dev_data_list, &pci_seg->dev_data_list);
385 return dev_data;
386}
387
388struct iommu_dev_data *search_dev_data(struct amd_iommu *iommu, u16 devid)
389{
390 struct iommu_dev_data *dev_data;
391 struct llist_node *node;
392 struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg;
393
394 if (llist_empty(&pci_seg->dev_data_list))
395 return NULL;
396
397 node = pci_seg->dev_data_list.first;
398 llist_for_each_entry(dev_data, node, dev_data_list) {
399 if (dev_data->devid == devid)
400 return dev_data;
401 }
402
403 return NULL;
404}
405
406static int clone_alias(struct pci_dev *pdev_origin, u16 alias, void *data)
407{
408 struct dev_table_entry new;
409 struct amd_iommu *iommu;
410 struct iommu_dev_data *dev_data, *alias_data;
411 struct pci_dev *pdev = data;
412 u16 devid = pci_dev_id(pdev);
413 int ret = 0;
414
415 if (devid == alias)
416 return 0;
417
418 iommu = rlookup_amd_iommu(&pdev->dev);
419 if (!iommu)
420 return 0;
421
422 /* Copy the data from pdev */
423 dev_data = dev_iommu_priv_get(&pdev->dev);
424 if (!dev_data) {
425 pr_err("%s : Failed to get dev_data for 0x%x\n", __func__, devid);
426 ret = -EINVAL;
427 goto out;
428 }
429 get_dte256(iommu, dev_data, &new);
430
431 /* Setup alias */
432 alias_data = find_dev_data(iommu, alias);
433 if (!alias_data) {
434 pr_err("%s : Failed to get alias dev_data for 0x%x\n", __func__, alias);
435 ret = -EINVAL;
436 goto out;
437 }
438 update_dte256(iommu, alias_data, &new);
439
440 amd_iommu_set_rlookup_table(iommu, alias);
441out:
442 return ret;
443}
444
445static void clone_aliases(struct amd_iommu *iommu, struct device *dev)
446{
447 struct pci_dev *pdev;
448
449 if (!dev_is_pci(dev))
450 return;
451 pdev = to_pci_dev(dev);
452
453 /*
454 * The IVRS alias stored in the alias table may not be
455 * part of the PCI DMA aliases if it's bus differs
456 * from the original device.
457 */
458 clone_alias(pdev, iommu->pci_seg->alias_table[pci_dev_id(pdev)], pdev);
459
460 pci_for_each_dma_alias(pdev, clone_alias, pdev);
461}
462
463static void setup_aliases(struct amd_iommu *iommu, struct device *dev)
464{
465 struct pci_dev *pdev = to_pci_dev(dev);
466 struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg;
467 u16 ivrs_alias;
468
469 /* For ACPI HID devices, there are no aliases */
470 if (!dev_is_pci(dev))
471 return;
472
473 /*
474 * Add the IVRS alias to the pci aliases if it is on the same
475 * bus. The IVRS table may know about a quirk that we don't.
476 */
477 ivrs_alias = pci_seg->alias_table[pci_dev_id(pdev)];
478 if (ivrs_alias != pci_dev_id(pdev) &&
479 PCI_BUS_NUM(ivrs_alias) == pdev->bus->number)
480 pci_add_dma_alias(pdev, ivrs_alias & 0xff, 1);
481
482 clone_aliases(iommu, dev);
483}
484
485static struct iommu_dev_data *find_dev_data(struct amd_iommu *iommu, u16 devid)
486{
487 struct iommu_dev_data *dev_data;
488
489 dev_data = search_dev_data(iommu, devid);
490
491 if (dev_data == NULL) {
492 dev_data = alloc_dev_data(iommu, devid);
493 if (!dev_data)
494 return NULL;
495
496 if (translation_pre_enabled(iommu))
497 dev_data->defer_attach = true;
498 }
499
500 return dev_data;
501}
502
503/*
504* Find or create an IOMMU group for a acpihid device.
505*/
506static struct iommu_group *acpihid_device_group(struct device *dev)
507{
508 struct acpihid_map_entry *p, *entry = NULL;
509 int devid;
510
511 devid = get_acpihid_device_id(dev, &entry);
512 if (devid < 0)
513 return ERR_PTR(devid);
514
515 list_for_each_entry(p, &acpihid_map, list) {
516 if ((devid == p->devid) && p->group)
517 entry->group = p->group;
518 }
519
520 if (!entry->group)
521 entry->group = generic_device_group(dev);
522 else
523 iommu_group_ref_get(entry->group);
524
525 return entry->group;
526}
527
528static inline bool pdev_pasid_supported(struct iommu_dev_data *dev_data)
529{
530 return (dev_data->flags & AMD_IOMMU_DEVICE_FLAG_PASID_SUP);
531}
532
533static u32 pdev_get_caps(struct pci_dev *pdev)
534{
535 int features;
536 u32 flags = 0;
537
538 if (pci_ats_supported(pdev))
539 flags |= AMD_IOMMU_DEVICE_FLAG_ATS_SUP;
540
541 if (pci_pri_supported(pdev))
542 flags |= AMD_IOMMU_DEVICE_FLAG_PRI_SUP;
543
544 features = pci_pasid_features(pdev);
545 if (features >= 0) {
546 flags |= AMD_IOMMU_DEVICE_FLAG_PASID_SUP;
547
548 if (features & PCI_PASID_CAP_EXEC)
549 flags |= AMD_IOMMU_DEVICE_FLAG_EXEC_SUP;
550
551 if (features & PCI_PASID_CAP_PRIV)
552 flags |= AMD_IOMMU_DEVICE_FLAG_PRIV_SUP;
553 }
554
555 return flags;
556}
557
558static inline int pdev_enable_cap_ats(struct pci_dev *pdev)
559{
560 struct iommu_dev_data *dev_data = dev_iommu_priv_get(&pdev->dev);
561 int ret = -EINVAL;
562
563 if (dev_data->ats_enabled)
564 return 0;
565
566 if (amd_iommu_iotlb_sup &&
567 (dev_data->flags & AMD_IOMMU_DEVICE_FLAG_ATS_SUP)) {
568 ret = pci_enable_ats(pdev, PAGE_SHIFT);
569 if (!ret) {
570 dev_data->ats_enabled = 1;
571 dev_data->ats_qdep = pci_ats_queue_depth(pdev);
572 }
573 }
574
575 return ret;
576}
577
578static inline void pdev_disable_cap_ats(struct pci_dev *pdev)
579{
580 struct iommu_dev_data *dev_data = dev_iommu_priv_get(&pdev->dev);
581
582 if (dev_data->ats_enabled) {
583 pci_disable_ats(pdev);
584 dev_data->ats_enabled = 0;
585 }
586}
587
588static inline int pdev_enable_cap_pri(struct pci_dev *pdev)
589{
590 struct iommu_dev_data *dev_data = dev_iommu_priv_get(&pdev->dev);
591 int ret = -EINVAL;
592
593 if (dev_data->pri_enabled)
594 return 0;
595
596 if (!dev_data->ats_enabled)
597 return 0;
598
599 if (dev_data->flags & AMD_IOMMU_DEVICE_FLAG_PRI_SUP) {
600 /*
601 * First reset the PRI state of the device.
602 * FIXME: Hardcode number of outstanding requests for now
603 */
604 if (!pci_reset_pri(pdev) && !pci_enable_pri(pdev, 32)) {
605 dev_data->pri_enabled = 1;
606 dev_data->pri_tlp = pci_prg_resp_pasid_required(pdev);
607
608 ret = 0;
609 }
610 }
611
612 return ret;
613}
614
615static inline void pdev_disable_cap_pri(struct pci_dev *pdev)
616{
617 struct iommu_dev_data *dev_data = dev_iommu_priv_get(&pdev->dev);
618
619 if (dev_data->pri_enabled) {
620 pci_disable_pri(pdev);
621 dev_data->pri_enabled = 0;
622 }
623}
624
625static inline int pdev_enable_cap_pasid(struct pci_dev *pdev)
626{
627 struct iommu_dev_data *dev_data = dev_iommu_priv_get(&pdev->dev);
628 int ret = -EINVAL;
629
630 if (dev_data->pasid_enabled)
631 return 0;
632
633 if (dev_data->flags & AMD_IOMMU_DEVICE_FLAG_PASID_SUP) {
634 /* Only allow access to user-accessible pages */
635 ret = pci_enable_pasid(pdev, 0);
636 if (!ret)
637 dev_data->pasid_enabled = 1;
638 }
639
640 return ret;
641}
642
643static inline void pdev_disable_cap_pasid(struct pci_dev *pdev)
644{
645 struct iommu_dev_data *dev_data = dev_iommu_priv_get(&pdev->dev);
646
647 if (dev_data->pasid_enabled) {
648 pci_disable_pasid(pdev);
649 dev_data->pasid_enabled = 0;
650 }
651}
652
653static void pdev_enable_caps(struct pci_dev *pdev)
654{
655 pdev_enable_cap_pasid(pdev);
656 pdev_enable_cap_ats(pdev);
657 pdev_enable_cap_pri(pdev);
658}
659
660static void pdev_disable_caps(struct pci_dev *pdev)
661{
662 pdev_disable_cap_ats(pdev);
663 pdev_disable_cap_pasid(pdev);
664 pdev_disable_cap_pri(pdev);
665}
666
667/*
668 * This function checks if the driver got a valid device from the caller to
669 * avoid dereferencing invalid pointers.
670 */
671static bool check_device(struct device *dev)
672{
673 struct amd_iommu_pci_seg *pci_seg;
674 struct amd_iommu *iommu;
675 int devid, sbdf;
676
677 if (!dev)
678 return false;
679
680 sbdf = get_device_sbdf_id(dev);
681 if (sbdf < 0)
682 return false;
683 devid = PCI_SBDF_TO_DEVID(sbdf);
684
685 iommu = rlookup_amd_iommu(dev);
686 if (!iommu)
687 return false;
688
689 /* Out of our scope? */
690 pci_seg = iommu->pci_seg;
691 if (devid > pci_seg->last_bdf)
692 return false;
693
694 return true;
695}
696
697static int iommu_init_device(struct amd_iommu *iommu, struct device *dev)
698{
699 struct iommu_dev_data *dev_data;
700 int devid, sbdf;
701
702 if (dev_iommu_priv_get(dev))
703 return 0;
704
705 sbdf = get_device_sbdf_id(dev);
706 if (sbdf < 0)
707 return sbdf;
708
709 devid = PCI_SBDF_TO_DEVID(sbdf);
710 dev_data = find_dev_data(iommu, devid);
711 if (!dev_data)
712 return -ENOMEM;
713
714 dev_data->dev = dev;
715
716 /*
717 * The dev_iommu_priv_set() needes to be called before setup_aliases.
718 * Otherwise, subsequent call to dev_iommu_priv_get() will fail.
719 */
720 dev_iommu_priv_set(dev, dev_data);
721 setup_aliases(iommu, dev);
722
723 /*
724 * By default we use passthrough mode for IOMMUv2 capable device.
725 * But if amd_iommu=force_isolation is set (e.g. to debug DMA to
726 * invalid address), we ignore the capability for the device so
727 * it'll be forced to go into translation mode.
728 */
729 if ((iommu_default_passthrough() || !amd_iommu_force_isolation) &&
730 dev_is_pci(dev) && amd_iommu_gt_ppr_supported()) {
731 dev_data->flags = pdev_get_caps(to_pci_dev(dev));
732 }
733
734 return 0;
735}
736
737static void iommu_ignore_device(struct amd_iommu *iommu, struct device *dev)
738{
739 struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg;
740 struct dev_table_entry *dev_table = get_dev_table(iommu);
741 int devid, sbdf;
742
743 sbdf = get_device_sbdf_id(dev);
744 if (sbdf < 0)
745 return;
746
747 devid = PCI_SBDF_TO_DEVID(sbdf);
748 pci_seg->rlookup_table[devid] = NULL;
749 memset(&dev_table[devid], 0, sizeof(struct dev_table_entry));
750
751 setup_aliases(iommu, dev);
752}
753
754
755/****************************************************************************
756 *
757 * Interrupt handling functions
758 *
759 ****************************************************************************/
760
761static void dump_dte_entry(struct amd_iommu *iommu, u16 devid)
762{
763 int i;
764 struct dev_table_entry dte;
765 struct iommu_dev_data *dev_data = find_dev_data(iommu, devid);
766
767 get_dte256(iommu, dev_data, &dte);
768
769 for (i = 0; i < 4; ++i)
770 pr_err("DTE[%d]: %016llx\n", i, dte.data[i]);
771}
772
773static void dump_command(unsigned long phys_addr)
774{
775 struct iommu_cmd *cmd = iommu_phys_to_virt(phys_addr);
776 int i;
777
778 for (i = 0; i < 4; ++i)
779 pr_err("CMD[%d]: %08x\n", i, cmd->data[i]);
780}
781
782static void amd_iommu_report_rmp_hw_error(struct amd_iommu *iommu, volatile u32 *event)
783{
784 struct iommu_dev_data *dev_data = NULL;
785 int devid, vmg_tag, flags;
786 struct pci_dev *pdev;
787 u64 spa;
788
789 devid = (event[0] >> EVENT_DEVID_SHIFT) & EVENT_DEVID_MASK;
790 vmg_tag = (event[1]) & 0xFFFF;
791 flags = (event[1] >> EVENT_FLAGS_SHIFT) & EVENT_FLAGS_MASK;
792 spa = ((u64)event[3] << 32) | (event[2] & 0xFFFFFFF8);
793
794 pdev = pci_get_domain_bus_and_slot(iommu->pci_seg->id, PCI_BUS_NUM(devid),
795 devid & 0xff);
796 if (pdev)
797 dev_data = dev_iommu_priv_get(&pdev->dev);
798
799 if (dev_data) {
800 if (__ratelimit(&dev_data->rs)) {
801 pci_err(pdev, "Event logged [RMP_HW_ERROR vmg_tag=0x%04x, spa=0x%llx, flags=0x%04x]\n",
802 vmg_tag, spa, flags);
803 }
804 } else {
805 pr_err_ratelimited("Event logged [RMP_HW_ERROR device=%04x:%02x:%02x.%x, vmg_tag=0x%04x, spa=0x%llx, flags=0x%04x]\n",
806 iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
807 vmg_tag, spa, flags);
808 }
809
810 if (pdev)
811 pci_dev_put(pdev);
812}
813
814static void amd_iommu_report_rmp_fault(struct amd_iommu *iommu, volatile u32 *event)
815{
816 struct iommu_dev_data *dev_data = NULL;
817 int devid, flags_rmp, vmg_tag, flags;
818 struct pci_dev *pdev;
819 u64 gpa;
820
821 devid = (event[0] >> EVENT_DEVID_SHIFT) & EVENT_DEVID_MASK;
822 flags_rmp = (event[0] >> EVENT_FLAGS_SHIFT) & 0xFF;
823 vmg_tag = (event[1]) & 0xFFFF;
824 flags = (event[1] >> EVENT_FLAGS_SHIFT) & EVENT_FLAGS_MASK;
825 gpa = ((u64)event[3] << 32) | event[2];
826
827 pdev = pci_get_domain_bus_and_slot(iommu->pci_seg->id, PCI_BUS_NUM(devid),
828 devid & 0xff);
829 if (pdev)
830 dev_data = dev_iommu_priv_get(&pdev->dev);
831
832 if (dev_data) {
833 if (__ratelimit(&dev_data->rs)) {
834 pci_err(pdev, "Event logged [RMP_PAGE_FAULT vmg_tag=0x%04x, gpa=0x%llx, flags_rmp=0x%04x, flags=0x%04x]\n",
835 vmg_tag, gpa, flags_rmp, flags);
836 }
837 } else {
838 pr_err_ratelimited("Event logged [RMP_PAGE_FAULT device=%04x:%02x:%02x.%x, vmg_tag=0x%04x, gpa=0x%llx, flags_rmp=0x%04x, flags=0x%04x]\n",
839 iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
840 vmg_tag, gpa, flags_rmp, flags);
841 }
842
843 if (pdev)
844 pci_dev_put(pdev);
845}
846
847#define IS_IOMMU_MEM_TRANSACTION(flags) \
848 (((flags) & EVENT_FLAG_I) == 0)
849
850#define IS_WRITE_REQUEST(flags) \
851 ((flags) & EVENT_FLAG_RW)
852
853static void amd_iommu_report_page_fault(struct amd_iommu *iommu,
854 u16 devid, u16 domain_id,
855 u64 address, int flags)
856{
857 struct iommu_dev_data *dev_data = NULL;
858 struct pci_dev *pdev;
859
860 pdev = pci_get_domain_bus_and_slot(iommu->pci_seg->id, PCI_BUS_NUM(devid),
861 devid & 0xff);
862 if (pdev)
863 dev_data = dev_iommu_priv_get(&pdev->dev);
864
865 if (dev_data) {
866 /*
867 * If this is a DMA fault (for which the I(nterrupt)
868 * bit will be unset), allow report_iommu_fault() to
869 * prevent logging it.
870 */
871 if (IS_IOMMU_MEM_TRANSACTION(flags)) {
872 /* Device not attached to domain properly */
873 if (dev_data->domain == NULL) {
874 pr_err_ratelimited("Event logged [Device not attached to domain properly]\n");
875 pr_err_ratelimited(" device=%04x:%02x:%02x.%x domain=0x%04x\n",
876 iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid),
877 PCI_FUNC(devid), domain_id);
878 goto out;
879 }
880
881 if (!report_iommu_fault(&dev_data->domain->domain,
882 &pdev->dev, address,
883 IS_WRITE_REQUEST(flags) ?
884 IOMMU_FAULT_WRITE :
885 IOMMU_FAULT_READ))
886 goto out;
887 }
888
889 if (__ratelimit(&dev_data->rs)) {
890 pci_err(pdev, "Event logged [IO_PAGE_FAULT domain=0x%04x address=0x%llx flags=0x%04x]\n",
891 domain_id, address, flags);
892 }
893 } else {
894 pr_err_ratelimited("Event logged [IO_PAGE_FAULT device=%04x:%02x:%02x.%x domain=0x%04x address=0x%llx flags=0x%04x]\n",
895 iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
896 domain_id, address, flags);
897 }
898
899out:
900 if (pdev)
901 pci_dev_put(pdev);
902}
903
904static void iommu_print_event(struct amd_iommu *iommu, void *__evt)
905{
906 struct device *dev = iommu->iommu.dev;
907 int type, devid, flags, tag;
908 volatile u32 *event = __evt;
909 int count = 0;
910 u64 address, ctrl;
911 u32 pasid;
912
913retry:
914 type = (event[1] >> EVENT_TYPE_SHIFT) & EVENT_TYPE_MASK;
915 devid = (event[0] >> EVENT_DEVID_SHIFT) & EVENT_DEVID_MASK;
916 pasid = (event[0] & EVENT_DOMID_MASK_HI) |
917 (event[1] & EVENT_DOMID_MASK_LO);
918 flags = (event[1] >> EVENT_FLAGS_SHIFT) & EVENT_FLAGS_MASK;
919 address = (u64)(((u64)event[3]) << 32) | event[2];
920 ctrl = readq(iommu->mmio_base + MMIO_CONTROL_OFFSET);
921
922 if (type == 0) {
923 /* Did we hit the erratum? */
924 if (++count == LOOP_TIMEOUT) {
925 pr_err("No event written to event log\n");
926 return;
927 }
928 udelay(1);
929 goto retry;
930 }
931
932 if (type == EVENT_TYPE_IO_FAULT) {
933 amd_iommu_report_page_fault(iommu, devid, pasid, address, flags);
934 return;
935 }
936
937 switch (type) {
938 case EVENT_TYPE_ILL_DEV:
939 dev_err(dev, "Event logged [ILLEGAL_DEV_TABLE_ENTRY device=%04x:%02x:%02x.%x pasid=0x%05x address=0x%llx flags=0x%04x]\n",
940 iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
941 pasid, address, flags);
942 dev_err(dev, "Control Reg : 0x%llx\n", ctrl);
943 dump_dte_entry(iommu, devid);
944 break;
945 case EVENT_TYPE_DEV_TAB_ERR:
946 dev_err(dev, "Event logged [DEV_TAB_HARDWARE_ERROR device=%04x:%02x:%02x.%x "
947 "address=0x%llx flags=0x%04x]\n",
948 iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
949 address, flags);
950 break;
951 case EVENT_TYPE_PAGE_TAB_ERR:
952 dev_err(dev, "Event logged [PAGE_TAB_HARDWARE_ERROR device=%04x:%02x:%02x.%x pasid=0x%04x address=0x%llx flags=0x%04x]\n",
953 iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
954 pasid, address, flags);
955 break;
956 case EVENT_TYPE_ILL_CMD:
957 dev_err(dev, "Event logged [ILLEGAL_COMMAND_ERROR address=0x%llx]\n", address);
958 dump_command(address);
959 break;
960 case EVENT_TYPE_CMD_HARD_ERR:
961 dev_err(dev, "Event logged [COMMAND_HARDWARE_ERROR address=0x%llx flags=0x%04x]\n",
962 address, flags);
963 break;
964 case EVENT_TYPE_IOTLB_INV_TO:
965 dev_err(dev, "Event logged [IOTLB_INV_TIMEOUT device=%04x:%02x:%02x.%x address=0x%llx]\n",
966 iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
967 address);
968 break;
969 case EVENT_TYPE_INV_DEV_REQ:
970 dev_err(dev, "Event logged [INVALID_DEVICE_REQUEST device=%04x:%02x:%02x.%x pasid=0x%05x address=0x%llx flags=0x%04x]\n",
971 iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
972 pasid, address, flags);
973 break;
974 case EVENT_TYPE_RMP_FAULT:
975 amd_iommu_report_rmp_fault(iommu, event);
976 break;
977 case EVENT_TYPE_RMP_HW_ERR:
978 amd_iommu_report_rmp_hw_error(iommu, event);
979 break;
980 case EVENT_TYPE_INV_PPR_REQ:
981 pasid = PPR_PASID(*((u64 *)__evt));
982 tag = event[1] & 0x03FF;
983 dev_err(dev, "Event logged [INVALID_PPR_REQUEST device=%04x:%02x:%02x.%x pasid=0x%05x address=0x%llx flags=0x%04x tag=0x%03x]\n",
984 iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
985 pasid, address, flags, tag);
986 break;
987 default:
988 dev_err(dev, "Event logged [UNKNOWN event[0]=0x%08x event[1]=0x%08x event[2]=0x%08x event[3]=0x%08x\n",
989 event[0], event[1], event[2], event[3]);
990 }
991
992 /*
993 * To detect the hardware errata 732 we need to clear the
994 * entry back to zero. This issue does not exist on SNP
995 * enabled system. Also this buffer is not writeable on
996 * SNP enabled system.
997 */
998 if (!amd_iommu_snp_en)
999 memset(__evt, 0, 4 * sizeof(u32));
1000}
1001
1002static void iommu_poll_events(struct amd_iommu *iommu)
1003{
1004 u32 head, tail;
1005
1006 head = readl(iommu->mmio_base + MMIO_EVT_HEAD_OFFSET);
1007 tail = readl(iommu->mmio_base + MMIO_EVT_TAIL_OFFSET);
1008
1009 while (head != tail) {
1010 iommu_print_event(iommu, iommu->evt_buf + head);
1011
1012 /* Update head pointer of hardware ring-buffer */
1013 head = (head + EVTLOG_ENTRY_SIZE) % amd_iommu_evtlog_size;
1014 writel(head, iommu->mmio_base + MMIO_EVT_HEAD_OFFSET);
1015 }
1016
1017}
1018
1019#ifdef CONFIG_IRQ_REMAP
1020static int (*iommu_ga_log_notifier)(u32);
1021
1022int amd_iommu_register_ga_log_notifier(int (*notifier)(u32))
1023{
1024 iommu_ga_log_notifier = notifier;
1025
1026 /*
1027 * Ensure all in-flight IRQ handlers run to completion before returning
1028 * to the caller, e.g. to ensure module code isn't unloaded while it's
1029 * being executed in the IRQ handler.
1030 */
1031 if (!notifier)
1032 synchronize_rcu();
1033
1034 return 0;
1035}
1036EXPORT_SYMBOL(amd_iommu_register_ga_log_notifier);
1037
1038static void iommu_poll_ga_log(struct amd_iommu *iommu)
1039{
1040 u32 head, tail;
1041
1042 if (iommu->ga_log == NULL)
1043 return;
1044
1045 head = readl(iommu->mmio_base + MMIO_GA_HEAD_OFFSET);
1046 tail = readl(iommu->mmio_base + MMIO_GA_TAIL_OFFSET);
1047
1048 while (head != tail) {
1049 volatile u64 *raw;
1050 u64 log_entry;
1051
1052 raw = (u64 *)(iommu->ga_log + head);
1053
1054 /* Avoid memcpy function-call overhead */
1055 log_entry = *raw;
1056
1057 /* Update head pointer of hardware ring-buffer */
1058 head = (head + GA_ENTRY_SIZE) % GA_LOG_SIZE;
1059 writel(head, iommu->mmio_base + MMIO_GA_HEAD_OFFSET);
1060
1061 /* Handle GA entry */
1062 switch (GA_REQ_TYPE(log_entry)) {
1063 case GA_GUEST_NR:
1064 if (!iommu_ga_log_notifier)
1065 break;
1066
1067 pr_debug("%s: devid=%#x, ga_tag=%#x\n",
1068 __func__, GA_DEVID(log_entry),
1069 GA_TAG(log_entry));
1070
1071 if (iommu_ga_log_notifier(GA_TAG(log_entry)) != 0)
1072 pr_err("GA log notifier failed.\n");
1073 break;
1074 default:
1075 break;
1076 }
1077 }
1078}
1079
1080static void
1081amd_iommu_set_pci_msi_domain(struct device *dev, struct amd_iommu *iommu)
1082{
1083 if (!irq_remapping_enabled || !dev_is_pci(dev) ||
1084 !pci_dev_has_default_msi_parent_domain(to_pci_dev(dev)))
1085 return;
1086
1087 dev_set_msi_domain(dev, iommu->ir_domain);
1088}
1089
1090#else /* CONFIG_IRQ_REMAP */
1091static inline void
1092amd_iommu_set_pci_msi_domain(struct device *dev, struct amd_iommu *iommu) { }
1093#endif /* !CONFIG_IRQ_REMAP */
1094
1095static void amd_iommu_handle_irq(void *data, const char *evt_type,
1096 u32 int_mask, u32 overflow_mask,
1097 void (*int_handler)(struct amd_iommu *),
1098 void (*overflow_handler)(struct amd_iommu *))
1099{
1100 struct amd_iommu *iommu = (struct amd_iommu *) data;
1101 u32 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
1102 u32 mask = int_mask | overflow_mask;
1103
1104 while (status & mask) {
1105 /* Enable interrupt sources again */
1106 writel(mask, iommu->mmio_base + MMIO_STATUS_OFFSET);
1107
1108 if (int_handler) {
1109 pr_devel("Processing IOMMU (ivhd%d) %s Log\n",
1110 iommu->index, evt_type);
1111 int_handler(iommu);
1112 }
1113
1114 if ((status & overflow_mask) && overflow_handler)
1115 overflow_handler(iommu);
1116
1117 /*
1118 * Hardware bug: ERBT1312
1119 * When re-enabling interrupt (by writing 1
1120 * to clear the bit), the hardware might also try to set
1121 * the interrupt bit in the event status register.
1122 * In this scenario, the bit will be set, and disable
1123 * subsequent interrupts.
1124 *
1125 * Workaround: The IOMMU driver should read back the
1126 * status register and check if the interrupt bits are cleared.
1127 * If not, driver will need to go through the interrupt handler
1128 * again and re-clear the bits
1129 */
1130 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
1131 }
1132}
1133
1134irqreturn_t amd_iommu_int_thread_evtlog(int irq, void *data)
1135{
1136 amd_iommu_handle_irq(data, "Evt", MMIO_STATUS_EVT_INT_MASK,
1137 MMIO_STATUS_EVT_OVERFLOW_MASK,
1138 iommu_poll_events, amd_iommu_restart_event_logging);
1139
1140 return IRQ_HANDLED;
1141}
1142
1143irqreturn_t amd_iommu_int_thread_pprlog(int irq, void *data)
1144{
1145 amd_iommu_handle_irq(data, "PPR", MMIO_STATUS_PPR_INT_MASK,
1146 MMIO_STATUS_PPR_OVERFLOW_MASK,
1147 amd_iommu_poll_ppr_log, amd_iommu_restart_ppr_log);
1148
1149 return IRQ_HANDLED;
1150}
1151
1152irqreturn_t amd_iommu_int_thread_galog(int irq, void *data)
1153{
1154#ifdef CONFIG_IRQ_REMAP
1155 amd_iommu_handle_irq(data, "GA", MMIO_STATUS_GALOG_INT_MASK,
1156 MMIO_STATUS_GALOG_OVERFLOW_MASK,
1157 iommu_poll_ga_log, amd_iommu_restart_ga_log);
1158#endif
1159
1160 return IRQ_HANDLED;
1161}
1162
1163irqreturn_t amd_iommu_int_thread(int irq, void *data)
1164{
1165 amd_iommu_int_thread_evtlog(irq, data);
1166 amd_iommu_int_thread_pprlog(irq, data);
1167 amd_iommu_int_thread_galog(irq, data);
1168
1169 return IRQ_HANDLED;
1170}
1171
1172/****************************************************************************
1173 *
1174 * IOMMU command queuing functions
1175 *
1176 ****************************************************************************/
1177
1178static void dump_command_buffer(struct amd_iommu *iommu)
1179{
1180 struct iommu_cmd *cmd;
1181 u32 head, tail;
1182 int i;
1183
1184 head = readl(iommu->mmio_base + MMIO_CMD_HEAD_OFFSET);
1185 tail = readl(iommu->mmio_base + MMIO_CMD_TAIL_OFFSET);
1186
1187 pr_err("CMD Buffer head=%llu tail=%llu\n", MMIO_CMD_BUFFER_HEAD(head),
1188 MMIO_CMD_BUFFER_TAIL(tail));
1189
1190 for (i = 0; i < CMD_BUFFER_ENTRIES; i++) {
1191 cmd = (struct iommu_cmd *)(iommu->cmd_buf + i * sizeof(*cmd));
1192 pr_err("%3d: %08x %08x %08x %08x\n", i, cmd->data[0], cmd->data[1], cmd->data[2],
1193 cmd->data[3]);
1194 }
1195}
1196
1197static int wait_on_sem(struct amd_iommu *iommu, u64 data)
1198{
1199 int i = 0;
1200
1201 /*
1202 * cmd_sem holds a monotonically non-decreasing completion sequence
1203 * number.
1204 */
1205 while ((__s64)(READ_ONCE(*iommu->cmd_sem) - data) < 0 &&
1206 i < LOOP_TIMEOUT) {
1207 udelay(1);
1208 i += 1;
1209 }
1210
1211 if (i == LOOP_TIMEOUT) {
1212
1213 pr_alert("IOMMU %04x:%02x:%02x.%01x: Completion-Wait loop timed out\n",
1214 iommu->pci_seg->id, PCI_BUS_NUM(iommu->devid),
1215 PCI_SLOT(iommu->devid), PCI_FUNC(iommu->devid));
1216
1217 if (amd_iommu_dump)
1218 DO_ONCE_LITE(dump_command_buffer, iommu);
1219
1220 return -EIO;
1221 }
1222
1223 return 0;
1224}
1225
1226static void copy_cmd_to_buffer(struct amd_iommu *iommu,
1227 struct iommu_cmd *cmd)
1228{
1229 u8 *target;
1230 u32 tail;
1231
1232 /* Copy command to buffer */
1233 tail = iommu->cmd_buf_tail;
1234 target = iommu->cmd_buf + tail;
1235 memcpy(target, cmd, sizeof(*cmd));
1236
1237 tail = (tail + sizeof(*cmd)) % CMD_BUFFER_SIZE;
1238 iommu->cmd_buf_tail = tail;
1239
1240 /* Tell the IOMMU about it */
1241 writel(tail, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET);
1242}
1243
1244static void build_completion_wait(struct iommu_cmd *cmd,
1245 struct amd_iommu *iommu,
1246 u64 data)
1247{
1248 u64 paddr = iommu->cmd_sem_paddr;
1249
1250 memset(cmd, 0, sizeof(*cmd));
1251 cmd->data[0] = lower_32_bits(paddr) | CMD_COMPL_WAIT_STORE_MASK;
1252 cmd->data[1] = upper_32_bits(paddr);
1253 cmd->data[2] = lower_32_bits(data);
1254 cmd->data[3] = upper_32_bits(data);
1255 CMD_SET_TYPE(cmd, CMD_COMPL_WAIT);
1256}
1257
1258static void build_inv_dte(struct iommu_cmd *cmd, u16 devid)
1259{
1260 memset(cmd, 0, sizeof(*cmd));
1261 cmd->data[0] = devid;
1262 CMD_SET_TYPE(cmd, CMD_INV_DEV_ENTRY);
1263}
1264
1265/*
1266 * Builds an invalidation address which is suitable for one page or multiple
1267 * pages. Sets the size bit (S) as needed is more than one page is flushed.
1268 */
1269static inline u64 build_inv_address(u64 address, size_t size)
1270{
1271 u64 pages, end, msb_diff;
1272
1273 pages = iommu_num_pages(address, size, PAGE_SIZE);
1274
1275 if (pages == 1)
1276 return address & PAGE_MASK;
1277
1278 end = address + size - 1;
1279
1280 /*
1281 * msb_diff would hold the index of the most significant bit that
1282 * flipped between the start and end.
1283 */
1284 msb_diff = fls64(end ^ address) - 1;
1285
1286 /*
1287 * Bits 63:52 are sign extended. If for some reason bit 51 is different
1288 * between the start and the end, invalidate everything.
1289 */
1290 if (unlikely(msb_diff > 51)) {
1291 address = CMD_INV_IOMMU_ALL_PAGES_ADDRESS;
1292 } else {
1293 /*
1294 * The msb-bit must be clear on the address. Just set all the
1295 * lower bits.
1296 */
1297 address |= (1ull << msb_diff) - 1;
1298 }
1299
1300 /* Clear bits 11:0 */
1301 address &= PAGE_MASK;
1302
1303 /* Set the size bit - we flush more than one 4kb page */
1304 return address | CMD_INV_IOMMU_PAGES_SIZE_MASK;
1305}
1306
1307static void build_inv_iommu_pages(struct iommu_cmd *cmd, u64 address,
1308 size_t size, u16 domid,
1309 ioasid_t pasid, bool gn)
1310{
1311 u64 inv_address = build_inv_address(address, size);
1312
1313 memset(cmd, 0, sizeof(*cmd));
1314
1315 cmd->data[1] |= domid;
1316 cmd->data[2] = lower_32_bits(inv_address);
1317 cmd->data[3] = upper_32_bits(inv_address);
1318 /* PDE bit - we want to flush everything, not only the PTEs */
1319 cmd->data[2] |= CMD_INV_IOMMU_PAGES_PDE_MASK;
1320 if (gn) {
1321 cmd->data[0] |= pasid;
1322 cmd->data[2] |= CMD_INV_IOMMU_PAGES_GN_MASK;
1323 }
1324 CMD_SET_TYPE(cmd, CMD_INV_IOMMU_PAGES);
1325}
1326
1327static void build_inv_iotlb_pages(struct iommu_cmd *cmd, u16 devid, int qdep,
1328 u64 address, size_t size,
1329 ioasid_t pasid, bool gn)
1330{
1331 u64 inv_address = build_inv_address(address, size);
1332
1333 memset(cmd, 0, sizeof(*cmd));
1334
1335 cmd->data[0] = devid;
1336 cmd->data[0] |= (qdep & 0xff) << 24;
1337 cmd->data[1] = devid;
1338 cmd->data[2] = lower_32_bits(inv_address);
1339 cmd->data[3] = upper_32_bits(inv_address);
1340 if (gn) {
1341 cmd->data[0] |= ((pasid >> 8) & 0xff) << 16;
1342 cmd->data[1] |= (pasid & 0xff) << 16;
1343 cmd->data[2] |= CMD_INV_IOMMU_PAGES_GN_MASK;
1344 }
1345
1346 CMD_SET_TYPE(cmd, CMD_INV_IOTLB_PAGES);
1347}
1348
1349static void build_complete_ppr(struct iommu_cmd *cmd, u16 devid, u32 pasid,
1350 int status, int tag, u8 gn)
1351{
1352 memset(cmd, 0, sizeof(*cmd));
1353
1354 cmd->data[0] = devid;
1355 if (gn) {
1356 cmd->data[1] = pasid;
1357 cmd->data[2] = CMD_INV_IOMMU_PAGES_GN_MASK;
1358 }
1359 cmd->data[3] = tag & 0x1ff;
1360 cmd->data[3] |= (status & PPR_STATUS_MASK) << PPR_STATUS_SHIFT;
1361
1362 CMD_SET_TYPE(cmd, CMD_COMPLETE_PPR);
1363}
1364
1365static void build_inv_all(struct iommu_cmd *cmd)
1366{
1367 memset(cmd, 0, sizeof(*cmd));
1368 CMD_SET_TYPE(cmd, CMD_INV_ALL);
1369}
1370
1371static void build_inv_irt(struct iommu_cmd *cmd, u16 devid)
1372{
1373 memset(cmd, 0, sizeof(*cmd));
1374 cmd->data[0] = devid;
1375 CMD_SET_TYPE(cmd, CMD_INV_IRT);
1376}
1377
1378/*
1379 * Writes the command to the IOMMUs command buffer and informs the
1380 * hardware about the new command.
1381 */
1382static int __iommu_queue_command_sync(struct amd_iommu *iommu,
1383 struct iommu_cmd *cmd,
1384 bool sync)
1385{
1386 unsigned int count = 0;
1387 u32 left, next_tail;
1388
1389 next_tail = (iommu->cmd_buf_tail + sizeof(*cmd)) % CMD_BUFFER_SIZE;
1390again:
1391 left = (iommu->cmd_buf_head - next_tail) % CMD_BUFFER_SIZE;
1392
1393 if (left <= 0x20) {
1394 /* Skip udelay() the first time around */
1395 if (count++) {
1396 if (count == LOOP_TIMEOUT) {
1397 pr_err("Command buffer timeout\n");
1398 return -EIO;
1399 }
1400
1401 udelay(1);
1402 }
1403
1404 /* Update head and recheck remaining space */
1405 iommu->cmd_buf_head = readl(iommu->mmio_base +
1406 MMIO_CMD_HEAD_OFFSET);
1407
1408 goto again;
1409 }
1410
1411 copy_cmd_to_buffer(iommu, cmd);
1412
1413 /* Do we need to make sure all commands are processed? */
1414 iommu->need_sync = sync;
1415
1416 return 0;
1417}
1418
1419static int iommu_queue_command_sync(struct amd_iommu *iommu,
1420 struct iommu_cmd *cmd,
1421 bool sync)
1422{
1423 unsigned long flags;
1424 int ret;
1425
1426 raw_spin_lock_irqsave(&iommu->lock, flags);
1427 ret = __iommu_queue_command_sync(iommu, cmd, sync);
1428 raw_spin_unlock_irqrestore(&iommu->lock, flags);
1429
1430 return ret;
1431}
1432
1433static int iommu_queue_command(struct amd_iommu *iommu, struct iommu_cmd *cmd)
1434{
1435 return iommu_queue_command_sync(iommu, cmd, true);
1436}
1437
1438static u64 get_cmdsem_val(struct amd_iommu *iommu)
1439{
1440 lockdep_assert_held(&iommu->lock);
1441 return ++iommu->cmd_sem_val;
1442}
1443
1444/*
1445 * This function queues a completion wait command into the command
1446 * buffer of an IOMMU
1447 */
1448static int iommu_completion_wait(struct amd_iommu *iommu)
1449{
1450 struct iommu_cmd cmd;
1451 unsigned long flags;
1452 int ret;
1453 u64 data;
1454
1455 if (!iommu->need_sync)
1456 return 0;
1457
1458 raw_spin_lock_irqsave(&iommu->lock, flags);
1459
1460 data = get_cmdsem_val(iommu);
1461 build_completion_wait(&cmd, iommu, data);
1462
1463 ret = __iommu_queue_command_sync(iommu, &cmd, false);
1464 raw_spin_unlock_irqrestore(&iommu->lock, flags);
1465
1466 if (ret)
1467 return ret;
1468
1469 ret = wait_on_sem(iommu, data);
1470
1471 return ret;
1472}
1473
1474static void domain_flush_complete(struct protection_domain *domain)
1475{
1476 struct pdom_iommu_info *pdom_iommu_info;
1477 unsigned long i;
1478
1479 lockdep_assert_held(&domain->lock);
1480
1481 /*
1482 * Devices of this domain are behind this IOMMU
1483 * We need to wait for completion of all commands.
1484 */
1485 xa_for_each(&domain->iommu_array, i, pdom_iommu_info)
1486 iommu_completion_wait(pdom_iommu_info->iommu);
1487}
1488
1489static int iommu_flush_dte(struct amd_iommu *iommu, u16 devid)
1490{
1491 struct iommu_cmd cmd;
1492
1493 build_inv_dte(&cmd, devid);
1494
1495 return iommu_queue_command(iommu, &cmd);
1496}
1497
1498static void iommu_flush_dte_sync(struct amd_iommu *iommu, u16 devid)
1499{
1500 int ret;
1501
1502 ret = iommu_flush_dte(iommu, devid);
1503 if (!ret)
1504 iommu_completion_wait(iommu);
1505}
1506
1507static void amd_iommu_flush_dte_all(struct amd_iommu *iommu)
1508{
1509 u32 devid;
1510 u16 last_bdf = iommu->pci_seg->last_bdf;
1511
1512 for (devid = 0; devid <= last_bdf; ++devid)
1513 iommu_flush_dte(iommu, devid);
1514
1515 iommu_completion_wait(iommu);
1516}
1517
1518/*
1519 * This function uses heavy locking and may disable irqs for some time. But
1520 * this is no issue because it is only called during resume.
1521 */
1522static void amd_iommu_flush_tlb_all(struct amd_iommu *iommu)
1523{
1524 u32 dom_id;
1525 u16 last_bdf = iommu->pci_seg->last_bdf;
1526
1527 for (dom_id = 0; dom_id <= last_bdf; ++dom_id) {
1528 struct iommu_cmd cmd;
1529 build_inv_iommu_pages(&cmd, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS,
1530 dom_id, IOMMU_NO_PASID, false);
1531 iommu_queue_command(iommu, &cmd);
1532 }
1533
1534 iommu_completion_wait(iommu);
1535}
1536
1537static void amd_iommu_flush_tlb_domid(struct amd_iommu *iommu, u32 dom_id)
1538{
1539 struct iommu_cmd cmd;
1540
1541 build_inv_iommu_pages(&cmd, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS,
1542 dom_id, IOMMU_NO_PASID, false);
1543 iommu_queue_command(iommu, &cmd);
1544
1545 iommu_completion_wait(iommu);
1546}
1547
1548static int iommu_flush_pages_v1_hdom_ids(struct protection_domain *pdom, u64 address, size_t size)
1549{
1550 int ret = 0;
1551 struct amd_iommu_viommu *aviommu;
1552
1553 list_for_each_entry(aviommu, &pdom->viommu_list, pdom_list) {
1554 unsigned long i;
1555 struct guest_domain_mapping_info *gdom_info;
1556 struct amd_iommu *iommu = container_of(aviommu->core.iommu_dev,
1557 struct amd_iommu, iommu);
1558
1559 xa_lock(&aviommu->gdomid_array);
1560 xa_for_each(&aviommu->gdomid_array, i, gdom_info) {
1561 struct iommu_cmd cmd;
1562
1563 pr_debug("%s: iommu=%#x, hdom_id=%#x\n", __func__,
1564 iommu->devid, gdom_info->hdom_id);
1565 build_inv_iommu_pages(&cmd, address, size, gdom_info->hdom_id,
1566 IOMMU_NO_PASID, false);
1567 ret |= iommu_queue_command(iommu, &cmd);
1568 }
1569 xa_unlock(&aviommu->gdomid_array);
1570 }
1571 return ret;
1572}
1573
1574static void amd_iommu_flush_all(struct amd_iommu *iommu)
1575{
1576 struct iommu_cmd cmd;
1577
1578 build_inv_all(&cmd);
1579
1580 iommu_queue_command(iommu, &cmd);
1581 iommu_completion_wait(iommu);
1582}
1583
1584static void iommu_flush_irt(struct amd_iommu *iommu, u16 devid)
1585{
1586 struct iommu_cmd cmd;
1587
1588 build_inv_irt(&cmd, devid);
1589
1590 iommu_queue_command(iommu, &cmd);
1591}
1592
1593static void amd_iommu_flush_irt_all(struct amd_iommu *iommu)
1594{
1595 u32 devid;
1596 u16 last_bdf = iommu->pci_seg->last_bdf;
1597
1598 if (iommu->irtcachedis_enabled)
1599 return;
1600
1601 for (devid = 0; devid <= last_bdf; devid++)
1602 iommu_flush_irt(iommu, devid);
1603
1604 iommu_completion_wait(iommu);
1605}
1606
1607void amd_iommu_flush_all_caches(struct amd_iommu *iommu)
1608{
1609 if (check_feature(FEATURE_IA)) {
1610 amd_iommu_flush_all(iommu);
1611 } else {
1612 amd_iommu_flush_dte_all(iommu);
1613 amd_iommu_flush_irt_all(iommu);
1614 amd_iommu_flush_tlb_all(iommu);
1615 }
1616}
1617
1618/*
1619 * Command send function for flushing on-device TLB
1620 */
1621static int device_flush_iotlb(struct iommu_dev_data *dev_data, u64 address,
1622 size_t size, ioasid_t pasid, bool gn)
1623{
1624 struct amd_iommu *iommu = get_amd_iommu_from_dev_data(dev_data);
1625 struct iommu_cmd cmd;
1626 int qdep = dev_data->ats_qdep;
1627
1628 build_inv_iotlb_pages(&cmd, dev_data->devid, qdep, address,
1629 size, pasid, gn);
1630
1631 return iommu_queue_command(iommu, &cmd);
1632}
1633
1634static int device_flush_dte_alias(struct pci_dev *pdev, u16 alias, void *data)
1635{
1636 struct amd_iommu *iommu = data;
1637
1638 return iommu_flush_dte(iommu, alias);
1639}
1640
1641/*
1642 * Command send function for invalidating a device table entry
1643 */
1644static int device_flush_dte(struct iommu_dev_data *dev_data)
1645{
1646 struct amd_iommu *iommu = get_amd_iommu_from_dev_data(dev_data);
1647 struct pci_dev *pdev = NULL;
1648 struct amd_iommu_pci_seg *pci_seg;
1649 u16 alias;
1650 int ret;
1651
1652 if (dev_is_pci(dev_data->dev))
1653 pdev = to_pci_dev(dev_data->dev);
1654
1655 if (pdev)
1656 ret = pci_for_each_dma_alias(pdev,
1657 device_flush_dte_alias, iommu);
1658 else
1659 ret = iommu_flush_dte(iommu, dev_data->devid);
1660 if (ret)
1661 return ret;
1662
1663 pci_seg = iommu->pci_seg;
1664 alias = pci_seg->alias_table[dev_data->devid];
1665 if (alias != dev_data->devid) {
1666 ret = iommu_flush_dte(iommu, alias);
1667 if (ret)
1668 return ret;
1669 }
1670
1671 if (dev_data->ats_enabled) {
1672 /* Invalidate the entire contents of an IOTLB */
1673 ret = device_flush_iotlb(dev_data, 0, ~0UL,
1674 IOMMU_NO_PASID, false);
1675 }
1676
1677 return ret;
1678}
1679
1680static int domain_flush_pages_v2(struct protection_domain *pdom,
1681 u64 address, size_t size)
1682{
1683 struct iommu_dev_data *dev_data;
1684 struct iommu_cmd cmd;
1685 int ret = 0;
1686
1687 lockdep_assert_held(&pdom->lock);
1688 list_for_each_entry(dev_data, &pdom->dev_list, list) {
1689 struct amd_iommu *iommu = get_amd_iommu_from_dev(dev_data->dev);
1690 u16 domid = dev_data->gcr3_info.domid;
1691
1692 build_inv_iommu_pages(&cmd, address, size,
1693 domid, IOMMU_NO_PASID, true);
1694
1695 ret |= iommu_queue_command(iommu, &cmd);
1696 }
1697
1698 return ret;
1699}
1700
1701static int domain_flush_pages_v1(struct protection_domain *pdom,
1702 u64 address, size_t size)
1703{
1704 struct pdom_iommu_info *pdom_iommu_info;
1705 struct iommu_cmd cmd;
1706 int ret = 0;
1707 unsigned long i;
1708
1709 lockdep_assert_held(&pdom->lock);
1710
1711 build_inv_iommu_pages(&cmd, address, size,
1712 pdom->id, IOMMU_NO_PASID, false);
1713
1714 xa_for_each(&pdom->iommu_array, i, pdom_iommu_info) {
1715 /*
1716 * Devices of this domain are behind this IOMMU
1717 * We need a TLB flush
1718 */
1719 ret |= iommu_queue_command(pdom_iommu_info->iommu, &cmd);
1720 }
1721
1722 /*
1723 * A domain w/ v1 table can be a nest parent, which can have
1724 * multiple nested domains. Each nested domain has 1:1 mapping
1725 * between gDomID and hDomID. Therefore, flush every hDomID
1726 * associated to this nest parent domain.
1727 *
1728 * See drivers/iommu/amd/nested.c: amd_iommu_alloc_domain_nested()
1729 */
1730 if (!list_empty(&pdom->viommu_list))
1731 ret |= iommu_flush_pages_v1_hdom_ids(pdom, address, size);
1732
1733 return ret;
1734}
1735
1736/*
1737 * TLB invalidation function which is called from the mapping functions.
1738 * It flushes range of PTEs of the domain.
1739 */
1740static void __domain_flush_pages(struct protection_domain *domain,
1741 u64 address, size_t size)
1742{
1743 struct iommu_dev_data *dev_data;
1744 int ret = 0;
1745 ioasid_t pasid = IOMMU_NO_PASID;
1746 bool gn = false;
1747
1748 lockdep_assert_held(&domain->lock);
1749
1750 if (pdom_is_v2_pgtbl_mode(domain)) {
1751 gn = true;
1752 ret = domain_flush_pages_v2(domain, address, size);
1753 } else {
1754 ret = domain_flush_pages_v1(domain, address, size);
1755 }
1756
1757 list_for_each_entry(dev_data, &domain->dev_list, list) {
1758
1759 if (!dev_data->ats_enabled)
1760 continue;
1761
1762 ret |= device_flush_iotlb(dev_data, address, size, pasid, gn);
1763 }
1764
1765 WARN_ON(ret);
1766}
1767
1768void amd_iommu_domain_flush_pages(struct protection_domain *domain,
1769 u64 address, size_t size)
1770{
1771 lockdep_assert_held(&domain->lock);
1772
1773 if (likely(!amd_iommu_np_cache)) {
1774 __domain_flush_pages(domain, address, size);
1775
1776 /* Wait until IOMMU TLB and all device IOTLB flushes are complete */
1777 domain_flush_complete(domain);
1778
1779 return;
1780 }
1781
1782 /*
1783 * When NpCache is on, we infer that we run in a VM and use a vIOMMU.
1784 * In such setups it is best to avoid flushes of ranges which are not
1785 * naturally aligned, since it would lead to flushes of unmodified
1786 * PTEs. Such flushes would require the hypervisor to do more work than
1787 * necessary. Therefore, perform repeated flushes of aligned ranges
1788 * until you cover the range. Each iteration flushes the smaller
1789 * between the natural alignment of the address that we flush and the
1790 * greatest naturally aligned region that fits in the range.
1791 */
1792 while (size != 0) {
1793 int addr_alignment = __ffs(address);
1794 int size_alignment = __fls(size);
1795 int min_alignment;
1796 size_t flush_size;
1797
1798 /*
1799 * size is always non-zero, but address might be zero, causing
1800 * addr_alignment to be negative. As the casting of the
1801 * argument in __ffs(address) to long might trim the high bits
1802 * of the address on x86-32, cast to long when doing the check.
1803 */
1804 if (likely((unsigned long)address != 0))
1805 min_alignment = min(addr_alignment, size_alignment);
1806 else
1807 min_alignment = size_alignment;
1808
1809 flush_size = 1ul << min_alignment;
1810
1811 __domain_flush_pages(domain, address, flush_size);
1812 address += flush_size;
1813 size -= flush_size;
1814 }
1815
1816 /* Wait until IOMMU TLB and all device IOTLB flushes are complete */
1817 domain_flush_complete(domain);
1818}
1819
1820/* Flush the whole IO/TLB for a given protection domain - including PDE */
1821static void amd_iommu_domain_flush_all(struct protection_domain *domain)
1822{
1823 amd_iommu_domain_flush_pages(domain, 0,
1824 CMD_INV_IOMMU_ALL_PAGES_ADDRESS);
1825}
1826
1827void amd_iommu_dev_flush_pasid_pages(struct iommu_dev_data *dev_data,
1828 ioasid_t pasid, u64 address, size_t size)
1829{
1830 struct iommu_cmd cmd;
1831 struct amd_iommu *iommu = get_amd_iommu_from_dev(dev_data->dev);
1832
1833 build_inv_iommu_pages(&cmd, address, size,
1834 dev_data->gcr3_info.domid, pasid, true);
1835 iommu_queue_command(iommu, &cmd);
1836
1837 if (dev_data->ats_enabled)
1838 device_flush_iotlb(dev_data, address, size, pasid, true);
1839
1840 iommu_completion_wait(iommu);
1841}
1842
1843static void dev_flush_pasid_all(struct iommu_dev_data *dev_data,
1844 ioasid_t pasid)
1845{
1846 amd_iommu_dev_flush_pasid_pages(dev_data, pasid, 0,
1847 CMD_INV_IOMMU_ALL_PAGES_ADDRESS);
1848}
1849
1850int amd_iommu_complete_ppr(struct device *dev, u32 pasid, int status, int tag)
1851{
1852 struct iommu_dev_data *dev_data;
1853 struct amd_iommu *iommu;
1854 struct iommu_cmd cmd;
1855
1856 dev_data = dev_iommu_priv_get(dev);
1857 iommu = get_amd_iommu_from_dev(dev);
1858
1859 build_complete_ppr(&cmd, dev_data->devid, pasid, status,
1860 tag, dev_data->pri_tlp);
1861
1862 return iommu_queue_command(iommu, &cmd);
1863}
1864
1865/****************************************************************************
1866 *
1867 * The next functions belong to the domain allocation. A domain is
1868 * allocated for every IOMMU as the default domain. If device isolation
1869 * is enabled, every device get its own domain. The most important thing
1870 * about domains is the page table mapping the DMA address space they
1871 * contain.
1872 *
1873 ****************************************************************************/
1874int amd_iommu_pdom_id_alloc(void)
1875{
1876 return ida_alloc_range(&pdom_ids, 1, MAX_DOMAIN_ID - 1, GFP_ATOMIC);
1877}
1878
1879int amd_iommu_pdom_id_reserve(u16 id, gfp_t gfp)
1880{
1881 return ida_alloc_range(&pdom_ids, id, id, gfp);
1882}
1883
1884void amd_iommu_pdom_id_free(int id)
1885{
1886 ida_free(&pdom_ids, id);
1887}
1888
1889void amd_iommu_pdom_id_destroy(void)
1890{
1891 ida_destroy(&pdom_ids);
1892}
1893
1894static void free_gcr3_tbl_level1(u64 *tbl)
1895{
1896 u64 *ptr;
1897 int i;
1898
1899 for (i = 0; i < 512; ++i) {
1900 if (!(tbl[i] & GCR3_VALID))
1901 continue;
1902
1903 ptr = iommu_phys_to_virt(tbl[i] & PAGE_MASK);
1904
1905 iommu_free_pages(ptr);
1906 }
1907}
1908
1909static void free_gcr3_tbl_level2(u64 *tbl)
1910{
1911 u64 *ptr;
1912 int i;
1913
1914 for (i = 0; i < 512; ++i) {
1915 if (!(tbl[i] & GCR3_VALID))
1916 continue;
1917
1918 ptr = iommu_phys_to_virt(tbl[i] & PAGE_MASK);
1919
1920 free_gcr3_tbl_level1(ptr);
1921 }
1922}
1923
1924static void free_gcr3_table(struct gcr3_tbl_info *gcr3_info)
1925{
1926 if (gcr3_info->glx == 2)
1927 free_gcr3_tbl_level2(gcr3_info->gcr3_tbl);
1928 else if (gcr3_info->glx == 1)
1929 free_gcr3_tbl_level1(gcr3_info->gcr3_tbl);
1930 else
1931 WARN_ON_ONCE(gcr3_info->glx != 0);
1932
1933 gcr3_info->glx = 0;
1934
1935 /* Free per device domain ID */
1936 amd_iommu_pdom_id_free(gcr3_info->domid);
1937
1938 iommu_free_pages(gcr3_info->gcr3_tbl);
1939 gcr3_info->gcr3_tbl = NULL;
1940}
1941
1942/*
1943 * Number of GCR3 table levels required. Level must be 4-Kbyte
1944 * page and can contain up to 512 entries.
1945 */
1946static int get_gcr3_levels(int pasids)
1947{
1948 int levels;
1949
1950 if (pasids == -1)
1951 return amd_iommu_max_glx_val;
1952
1953 levels = get_count_order(pasids);
1954
1955 return levels ? (DIV_ROUND_UP(levels, 9) - 1) : levels;
1956}
1957
1958static int setup_gcr3_table(struct gcr3_tbl_info *gcr3_info,
1959 struct amd_iommu *iommu, int pasids)
1960{
1961 int levels = get_gcr3_levels(pasids);
1962 int nid = iommu ? dev_to_node(&iommu->dev->dev) : NUMA_NO_NODE;
1963 int domid;
1964
1965 if (levels > amd_iommu_max_glx_val)
1966 return -EINVAL;
1967
1968 if (gcr3_info->gcr3_tbl)
1969 return -EBUSY;
1970
1971 /* Allocate per device domain ID */
1972 domid = amd_iommu_pdom_id_alloc();
1973 if (domid <= 0)
1974 return -ENOSPC;
1975 gcr3_info->domid = domid;
1976
1977 gcr3_info->gcr3_tbl = iommu_alloc_pages_node_sz(nid, GFP_ATOMIC, SZ_4K);
1978 if (gcr3_info->gcr3_tbl == NULL) {
1979 amd_iommu_pdom_id_free(domid);
1980 return -ENOMEM;
1981 }
1982
1983 gcr3_info->glx = levels;
1984
1985 return 0;
1986}
1987
1988static u64 *__get_gcr3_pte(struct gcr3_tbl_info *gcr3_info,
1989 ioasid_t pasid, bool alloc)
1990{
1991 int index;
1992 u64 *pte;
1993 u64 *root = gcr3_info->gcr3_tbl;
1994 int level = gcr3_info->glx;
1995
1996 while (true) {
1997
1998 index = (pasid >> (9 * level)) & 0x1ff;
1999 pte = &root[index];
2000
2001 if (level == 0)
2002 break;
2003
2004 if (!(*pte & GCR3_VALID)) {
2005 if (!alloc)
2006 return NULL;
2007
2008 root = (void *)get_zeroed_page(GFP_ATOMIC);
2009 if (root == NULL)
2010 return NULL;
2011
2012 *pte = iommu_virt_to_phys(root) | GCR3_VALID;
2013 }
2014
2015 root = iommu_phys_to_virt(*pte & PAGE_MASK);
2016
2017 level -= 1;
2018 }
2019
2020 return pte;
2021}
2022
2023static int update_gcr3(struct iommu_dev_data *dev_data,
2024 ioasid_t pasid, unsigned long gcr3, bool set)
2025{
2026 struct gcr3_tbl_info *gcr3_info = &dev_data->gcr3_info;
2027 u64 *pte;
2028
2029 pte = __get_gcr3_pte(gcr3_info, pasid, true);
2030 if (pte == NULL)
2031 return -ENOMEM;
2032
2033 if (set)
2034 *pte = (gcr3 & PAGE_MASK) | GCR3_VALID;
2035 else
2036 *pte = 0;
2037
2038 dev_flush_pasid_all(dev_data, pasid);
2039 return 0;
2040}
2041
2042int amd_iommu_set_gcr3(struct iommu_dev_data *dev_data, ioasid_t pasid,
2043 unsigned long gcr3)
2044{
2045 struct gcr3_tbl_info *gcr3_info = &dev_data->gcr3_info;
2046 int ret;
2047
2048 iommu_group_mutex_assert(dev_data->dev);
2049
2050 ret = update_gcr3(dev_data, pasid, gcr3, true);
2051 if (ret)
2052 return ret;
2053
2054 gcr3_info->pasid_cnt++;
2055 return ret;
2056}
2057
2058int amd_iommu_clear_gcr3(struct iommu_dev_data *dev_data, ioasid_t pasid)
2059{
2060 struct gcr3_tbl_info *gcr3_info = &dev_data->gcr3_info;
2061 int ret;
2062
2063 iommu_group_mutex_assert(dev_data->dev);
2064
2065 ret = update_gcr3(dev_data, pasid, 0, false);
2066 if (ret)
2067 return ret;
2068
2069 gcr3_info->pasid_cnt--;
2070 return ret;
2071}
2072
2073/*
2074 * Note:
2075 * The old value for GCR3 table and GPT have been cleared from caller.
2076 */
2077static void set_dte_gcr3_table(struct iommu_dev_data *dev_data,
2078 struct dev_table_entry *new)
2079{
2080 struct gcr3_tbl_info *gcr3_info = &dev_data->gcr3_info;
2081 u64 gcr3 = iommu_virt_to_phys(gcr3_info->gcr3_tbl);
2082
2083 new->data[0] |= DTE_FLAG_TV |
2084 (dev_data->ppr ? DTE_FLAG_PPR : 0) |
2085 (pdom_is_v2_pgtbl_mode(dev_data->domain) ? DTE_FLAG_GIOV : 0) |
2086 DTE_FLAG_GV |
2087 FIELD_PREP(DTE_GLX, gcr3_info->glx) |
2088 FIELD_PREP(DTE_GCR3_14_12, gcr3 >> 12) |
2089 DTE_FLAG_IR | DTE_FLAG_IW;
2090
2091 new->data[1] |= FIELD_PREP(DTE_DOMID_MASK, dev_data->gcr3_info.domid) |
2092 FIELD_PREP(DTE_GCR3_30_15, gcr3 >> 15) |
2093 (dev_data->ats_enabled ? DTE_FLAG_IOTLB : 0) |
2094 FIELD_PREP(DTE_GCR3_51_31, gcr3 >> 31);
2095
2096 /* Guest page table can only support 4 and 5 levels */
2097 if (amd_iommu_gpt_level == PAGE_MODE_5_LEVEL)
2098 new->data[2] |= FIELD_PREP(DTE_GPT_LEVEL_MASK, GUEST_PGTABLE_5_LEVEL);
2099 else
2100 new->data[2] |= FIELD_PREP(DTE_GPT_LEVEL_MASK, GUEST_PGTABLE_4_LEVEL);
2101}
2102
2103void amd_iommu_set_dte_v1(struct iommu_dev_data *dev_data,
2104 struct protection_domain *domain, u16 domid,
2105 struct pt_iommu_amdv1_hw_info *pt_info,
2106 struct dev_table_entry *new)
2107{
2108 u64 host_pt_root = __sme_set(pt_info->host_pt_root);
2109
2110 /* Note Dirty tracking is used for v1 table only for now */
2111 new->data[0] |= DTE_FLAG_TV |
2112 FIELD_PREP(DTE_MODE_MASK, pt_info->mode) |
2113 (domain->dirty_tracking ? DTE_FLAG_HAD : 0) |
2114 FIELD_PREP(DTE_HOST_TRP, host_pt_root >> 12) |
2115 DTE_FLAG_IR | DTE_FLAG_IW;
2116
2117 new->data[1] |= FIELD_PREP(DTE_DOMID_MASK, domid) |
2118 (dev_data->ats_enabled ? DTE_FLAG_IOTLB : 0);
2119}
2120
2121static void set_dte_v1(struct iommu_dev_data *dev_data,
2122 struct protection_domain *domain, u16 domid,
2123 phys_addr_t top_paddr, unsigned int top_level,
2124 struct dev_table_entry *new)
2125{
2126 struct pt_iommu_amdv1_hw_info pt_info;
2127
2128 /*
2129 * When updating the IO pagetable, the new top and level
2130 * are provided as parameters. For other operations i.e.
2131 * device attach, retrieve the current pagetable info
2132 * via the IOMMU PT API.
2133 */
2134 if (top_paddr) {
2135 pt_info.host_pt_root = top_paddr;
2136 pt_info.mode = top_level + 1;
2137 } else {
2138 WARN_ON(top_paddr || top_level);
2139 pt_iommu_amdv1_hw_info(&domain->amdv1, &pt_info);
2140 }
2141
2142 amd_iommu_set_dte_v1(dev_data, domain, domid, &pt_info, new);
2143}
2144
2145static void set_dte_passthrough(struct iommu_dev_data *dev_data,
2146 struct protection_domain *domain,
2147 struct dev_table_entry *new)
2148{
2149 new->data[0] |= DTE_FLAG_TV | DTE_FLAG_IR | DTE_FLAG_IW;
2150
2151 new->data[1] |= FIELD_PREP(DTE_DOMID_MASK, domain->id) |
2152 (dev_data->ats_enabled ? DTE_FLAG_IOTLB : 0);
2153
2154}
2155
2156static void set_dte_entry(struct amd_iommu *iommu,
2157 struct iommu_dev_data *dev_data,
2158 phys_addr_t top_paddr, unsigned int top_level)
2159{
2160 u32 old_domid;
2161 struct dev_table_entry new = {};
2162 struct protection_domain *domain = dev_data->domain;
2163 struct gcr3_tbl_info *gcr3_info = &dev_data->gcr3_info;
2164 struct dev_table_entry *dte = &get_dev_table(iommu)[dev_data->devid];
2165
2166 amd_iommu_make_clear_dte(dev_data, &new);
2167
2168 old_domid = READ_ONCE(dte->data[1]) & DTE_DOMID_MASK;
2169 if (gcr3_info->gcr3_tbl)
2170 set_dte_gcr3_table(dev_data, &new);
2171 else if (domain->domain.type == IOMMU_DOMAIN_IDENTITY)
2172 set_dte_passthrough(dev_data, domain, &new);
2173 else if ((domain->domain.type & __IOMMU_DOMAIN_PAGING) &&
2174 domain->pd_mode == PD_MODE_V1)
2175 set_dte_v1(dev_data, domain, domain->id, top_paddr, top_level, &new);
2176 else
2177 WARN_ON(true);
2178
2179 amd_iommu_update_dte(iommu, dev_data, &new);
2180
2181 /*
2182 * A kdump kernel might be replacing a domain ID that was copied from
2183 * the previous kernel--if so, it needs to flush the translation cache
2184 * entries for the old domain ID that is being overwritten
2185 */
2186 if (old_domid) {
2187 amd_iommu_flush_tlb_domid(iommu, old_domid);
2188 }
2189}
2190
2191/*
2192 * Clear DMA-remap related flags to block all DMA (blockeded domain)
2193 */
2194static void clear_dte_entry(struct amd_iommu *iommu, struct iommu_dev_data *dev_data)
2195{
2196 struct dev_table_entry new = {};
2197
2198 amd_iommu_make_clear_dte(dev_data, &new);
2199 amd_iommu_update_dte(iommu, dev_data, &new);
2200}
2201
2202/* Update and flush DTE for the given device */
2203static void dev_update_dte(struct iommu_dev_data *dev_data, bool set)
2204{
2205 struct amd_iommu *iommu = get_amd_iommu_from_dev(dev_data->dev);
2206
2207 if (set)
2208 set_dte_entry(iommu, dev_data, 0, 0);
2209 else
2210 clear_dte_entry(iommu, dev_data);
2211}
2212
2213/*
2214 * If domain is SVA capable then initialize GCR3 table. Also if domain is
2215 * in v2 page table mode then update GCR3[0].
2216 */
2217static int init_gcr3_table(struct iommu_dev_data *dev_data,
2218 struct protection_domain *pdom)
2219{
2220 struct amd_iommu *iommu = get_amd_iommu_from_dev_data(dev_data);
2221 int max_pasids = dev_data->max_pasids;
2222 struct pt_iommu_x86_64_hw_info pt_info;
2223 int ret = 0;
2224
2225 /*
2226 * If domain is in pt mode then setup GCR3 table only if device
2227 * is PASID capable
2228 */
2229 if (pdom_is_in_pt_mode(pdom) && !pdev_pasid_supported(dev_data))
2230 return ret;
2231
2232 /*
2233 * By default, setup GCR3 table to support MAX PASIDs
2234 * supported by the device/IOMMU.
2235 */
2236 ret = setup_gcr3_table(&dev_data->gcr3_info, iommu,
2237 max_pasids > 0 ? max_pasids : 1);
2238 if (ret)
2239 return ret;
2240
2241 /* Setup GCR3[0] only if domain is setup with v2 page table mode */
2242 if (!pdom_is_v2_pgtbl_mode(pdom))
2243 return ret;
2244
2245 pt_iommu_x86_64_hw_info(&pdom->amdv2, &pt_info);
2246 ret = update_gcr3(dev_data, 0, __sme_set(pt_info.gcr3_pt), true);
2247 if (ret)
2248 free_gcr3_table(&dev_data->gcr3_info);
2249
2250 return ret;
2251}
2252
2253static void destroy_gcr3_table(struct iommu_dev_data *dev_data,
2254 struct protection_domain *pdom)
2255{
2256 struct gcr3_tbl_info *gcr3_info = &dev_data->gcr3_info;
2257
2258 if (pdom_is_v2_pgtbl_mode(pdom))
2259 update_gcr3(dev_data, 0, 0, false);
2260
2261 if (gcr3_info->gcr3_tbl == NULL)
2262 return;
2263
2264 free_gcr3_table(gcr3_info);
2265}
2266
2267static int pdom_attach_iommu(struct amd_iommu *iommu,
2268 struct protection_domain *pdom)
2269{
2270 struct pdom_iommu_info *pdom_iommu_info, *curr;
2271 unsigned long flags;
2272 int ret = 0;
2273
2274 spin_lock_irqsave(&pdom->lock, flags);
2275
2276 pdom_iommu_info = xa_load(&pdom->iommu_array, iommu->index);
2277 if (pdom_iommu_info) {
2278 pdom_iommu_info->refcnt++;
2279 goto out_unlock;
2280 }
2281
2282 pdom_iommu_info = kzalloc_obj(*pdom_iommu_info, GFP_ATOMIC);
2283 if (!pdom_iommu_info) {
2284 ret = -ENOMEM;
2285 goto out_unlock;
2286 }
2287
2288 pdom_iommu_info->iommu = iommu;
2289 pdom_iommu_info->refcnt = 1;
2290
2291 curr = xa_cmpxchg(&pdom->iommu_array, iommu->index,
2292 NULL, pdom_iommu_info, GFP_ATOMIC);
2293 if (curr) {
2294 kfree(pdom_iommu_info);
2295 ret = -ENOSPC;
2296 goto out_unlock;
2297 }
2298
2299out_unlock:
2300 spin_unlock_irqrestore(&pdom->lock, flags);
2301 return ret;
2302}
2303
2304static void pdom_detach_iommu(struct amd_iommu *iommu,
2305 struct protection_domain *pdom)
2306{
2307 struct pdom_iommu_info *pdom_iommu_info;
2308 unsigned long flags;
2309
2310 spin_lock_irqsave(&pdom->lock, flags);
2311
2312 pdom_iommu_info = xa_load(&pdom->iommu_array, iommu->index);
2313 if (!pdom_iommu_info) {
2314 spin_unlock_irqrestore(&pdom->lock, flags);
2315 return;
2316 }
2317
2318 pdom_iommu_info->refcnt--;
2319 if (pdom_iommu_info->refcnt == 0) {
2320 xa_erase(&pdom->iommu_array, iommu->index);
2321 kfree(pdom_iommu_info);
2322 }
2323
2324 spin_unlock_irqrestore(&pdom->lock, flags);
2325}
2326
2327/*
2328 * If a device is not yet associated with a domain, this function makes the
2329 * device visible in the domain
2330 */
2331static int attach_device(struct device *dev,
2332 struct protection_domain *domain)
2333{
2334 struct iommu_dev_data *dev_data = dev_iommu_priv_get(dev);
2335 struct amd_iommu *iommu = get_amd_iommu_from_dev_data(dev_data);
2336 struct pci_dev *pdev;
2337 unsigned long flags;
2338 int ret = 0;
2339
2340 mutex_lock(&dev_data->mutex);
2341
2342 if (dev_data->domain != NULL) {
2343 ret = -EBUSY;
2344 goto out;
2345 }
2346
2347 /* Do reference counting */
2348 ret = pdom_attach_iommu(iommu, domain);
2349 if (ret)
2350 goto out;
2351
2352 /* Setup GCR3 table */
2353 if (pdom_is_sva_capable(domain)) {
2354 ret = init_gcr3_table(dev_data, domain);
2355 if (ret) {
2356 pdom_detach_iommu(iommu, domain);
2357 goto out;
2358 }
2359 }
2360
2361 pdev = dev_is_pci(dev_data->dev) ? to_pci_dev(dev_data->dev) : NULL;
2362 if (pdev && pdom_is_sva_capable(domain)) {
2363 pdev_enable_caps(pdev);
2364
2365 /*
2366 * Device can continue to function even if IOPF
2367 * enablement failed. Hence in error path just
2368 * disable device PRI support.
2369 */
2370 if (amd_iommu_iopf_add_device(iommu, dev_data))
2371 pdev_disable_cap_pri(pdev);
2372 } else if (pdev) {
2373 pdev_enable_cap_ats(pdev);
2374 }
2375
2376 /* Update data structures */
2377 dev_data->domain = domain;
2378 spin_lock_irqsave(&domain->lock, flags);
2379 list_add(&dev_data->list, &domain->dev_list);
2380 spin_unlock_irqrestore(&domain->lock, flags);
2381
2382 /* Update device table */
2383 dev_update_dte(dev_data, true);
2384
2385out:
2386 mutex_unlock(&dev_data->mutex);
2387
2388 return ret;
2389}
2390
2391/*
2392 * Removes a device from a protection domain (with devtable_lock held)
2393 */
2394static void detach_device(struct device *dev)
2395{
2396 struct iommu_dev_data *dev_data = dev_iommu_priv_get(dev);
2397 struct amd_iommu *iommu = get_amd_iommu_from_dev_data(dev_data);
2398 struct protection_domain *domain = dev_data->domain;
2399 unsigned long flags;
2400
2401 mutex_lock(&dev_data->mutex);
2402
2403 /*
2404 * First check if the device is still attached. It might already
2405 * be detached from its domain because the generic
2406 * iommu_detach_group code detached it and we try again here in
2407 * our alias handling.
2408 */
2409 if (WARN_ON(!dev_data->domain))
2410 goto out;
2411
2412 /* Remove IOPF handler */
2413 if (dev_data->ppr) {
2414 iopf_queue_flush_dev(dev);
2415 amd_iommu_iopf_remove_device(iommu, dev_data);
2416 }
2417
2418 if (dev_is_pci(dev))
2419 pdev_disable_caps(to_pci_dev(dev));
2420
2421 /* Clear DTE and flush the entry */
2422 dev_update_dte(dev_data, false);
2423
2424 /* Flush IOTLB and wait for the flushes to finish */
2425 spin_lock_irqsave(&domain->lock, flags);
2426 amd_iommu_domain_flush_all(domain);
2427 list_del(&dev_data->list);
2428 spin_unlock_irqrestore(&domain->lock, flags);
2429
2430 /* Clear GCR3 table */
2431 if (pdom_is_sva_capable(domain))
2432 destroy_gcr3_table(dev_data, domain);
2433
2434 /* Update data structures */
2435 dev_data->domain = NULL;
2436
2437 /* decrease reference counters - needs to happen after the flushes */
2438 pdom_detach_iommu(iommu, domain);
2439
2440out:
2441 mutex_unlock(&dev_data->mutex);
2442}
2443
2444static struct iommu_device *amd_iommu_probe_device(struct device *dev)
2445{
2446 struct iommu_device *iommu_dev;
2447 struct amd_iommu *iommu;
2448 struct iommu_dev_data *dev_data;
2449 int ret;
2450
2451 if (!check_device(dev))
2452 return ERR_PTR(-ENODEV);
2453
2454 iommu = rlookup_amd_iommu(dev);
2455 if (!iommu)
2456 return ERR_PTR(-ENODEV);
2457
2458 /* Not registered yet? */
2459 if (!iommu->iommu.ops)
2460 return ERR_PTR(-ENODEV);
2461
2462 if (dev_iommu_priv_get(dev))
2463 return &iommu->iommu;
2464
2465 ret = iommu_init_device(iommu, dev);
2466 if (ret) {
2467 dev_err(dev, "Failed to initialize - trying to proceed anyway\n");
2468 iommu_dev = ERR_PTR(ret);
2469 iommu_ignore_device(iommu, dev);
2470 goto out_err;
2471 }
2472
2473 amd_iommu_set_pci_msi_domain(dev, iommu);
2474 iommu_dev = &iommu->iommu;
2475
2476 /*
2477 * If IOMMU and device supports PASID then it will contain max
2478 * supported PASIDs, else it will be zero.
2479 */
2480 dev_data = dev_iommu_priv_get(dev);
2481 if (amd_iommu_pasid_supported() && dev_is_pci(dev) &&
2482 pdev_pasid_supported(dev_data)) {
2483 dev_data->max_pasids = min_t(u32, iommu->iommu.max_pasids,
2484 pci_max_pasids(to_pci_dev(dev)));
2485 }
2486
2487 if (amd_iommu_pgtable == PD_MODE_NONE) {
2488 pr_warn_once("%s: DMA translation not supported by iommu.\n",
2489 __func__);
2490 iommu_dev = ERR_PTR(-ENODEV);
2491 goto out_err;
2492 }
2493
2494 iommu_completion_wait(iommu);
2495
2496 if (FEATURE_NUM_INT_REMAP_SUP_2K(amd_iommu_efr2))
2497 dev_data->max_irqs = MAX_IRQS_PER_TABLE_2K;
2498 else
2499 dev_data->max_irqs = MAX_IRQS_PER_TABLE_512;
2500
2501 if (dev_is_pci(dev))
2502 pci_prepare_ats(to_pci_dev(dev), PAGE_SHIFT);
2503
2504out_err:
2505 return iommu_dev;
2506}
2507
2508static void amd_iommu_release_device(struct device *dev)
2509{
2510 struct iommu_dev_data *dev_data = dev_iommu_priv_get(dev);
2511
2512 WARN_ON(dev_data->domain);
2513
2514 /*
2515 * We keep dev_data around for unplugged devices and reuse it when the
2516 * device is re-plugged - not doing so would introduce a ton of races.
2517 */
2518}
2519
2520static struct iommu_group *amd_iommu_device_group(struct device *dev)
2521{
2522 if (dev_is_pci(dev))
2523 return pci_device_group(dev);
2524
2525 return acpihid_device_group(dev);
2526}
2527
2528/*****************************************************************************
2529 *
2530 * The following functions belong to the exported interface of AMD IOMMU
2531 *
2532 * This interface allows access to lower level functions of the IOMMU
2533 * like protection domain handling and assignement of devices to domains
2534 * which is not possible with the dma_ops interface.
2535 *
2536 *****************************************************************************/
2537
2538static void protection_domain_init(struct protection_domain *domain)
2539{
2540 spin_lock_init(&domain->lock);
2541 INIT_LIST_HEAD(&domain->dev_list);
2542 INIT_LIST_HEAD(&domain->dev_data_list);
2543 INIT_LIST_HEAD(&domain->viommu_list);
2544 xa_init(&domain->iommu_array);
2545}
2546
2547struct protection_domain *protection_domain_alloc(void)
2548{
2549 struct protection_domain *domain;
2550 int domid;
2551
2552 domain = kzalloc_obj(*domain);
2553 if (!domain)
2554 return NULL;
2555
2556 domid = amd_iommu_pdom_id_alloc();
2557 if (domid <= 0) {
2558 kfree(domain);
2559 return NULL;
2560 }
2561 domain->id = domid;
2562
2563 protection_domain_init(domain);
2564
2565 return domain;
2566}
2567
2568static bool amd_iommu_hd_support(struct amd_iommu *iommu)
2569{
2570 if (amd_iommu_hatdis)
2571 return false;
2572
2573 return iommu && (iommu->features & FEATURE_HDSUP);
2574}
2575
2576static spinlock_t *amd_iommu_get_top_lock(struct pt_iommu *iommupt)
2577{
2578 struct protection_domain *pdom =
2579 container_of(iommupt, struct protection_domain, iommu);
2580
2581 return &pdom->lock;
2582}
2583
2584/*
2585 * Update all HW references to the domain with a new pgtable configuration.
2586 */
2587static void amd_iommu_change_top(struct pt_iommu *iommu_table,
2588 phys_addr_t top_paddr, unsigned int top_level)
2589{
2590 struct protection_domain *pdom =
2591 container_of(iommu_table, struct protection_domain, iommu);
2592 struct iommu_dev_data *dev_data;
2593
2594 lockdep_assert_held(&pdom->lock);
2595
2596 /* Update the DTE for all devices attached to this domain */
2597 list_for_each_entry(dev_data, &pdom->dev_list, list) {
2598 struct amd_iommu *iommu = rlookup_amd_iommu(dev_data->dev);
2599
2600 /* Update the HW references with the new level and top ptr */
2601 set_dte_entry(iommu, dev_data, top_paddr, top_level);
2602 clone_aliases(iommu, dev_data->dev);
2603 }
2604
2605 list_for_each_entry(dev_data, &pdom->dev_list, list)
2606 device_flush_dte(dev_data);
2607
2608 domain_flush_complete(pdom);
2609}
2610
2611/*
2612 * amd_iommu_iotlb_sync_map() is used to generate flushes for non-present to
2613 * present (ie mapping) operations. It is a NOP if the IOMMU doesn't have non
2614 * present caching (like hypervisor shadowing).
2615 */
2616static int amd_iommu_iotlb_sync_map(struct iommu_domain *dom,
2617 unsigned long iova, size_t size)
2618{
2619 struct protection_domain *domain = to_pdomain(dom);
2620 unsigned long flags;
2621
2622 if (likely(!amd_iommu_np_cache))
2623 return 0;
2624
2625 spin_lock_irqsave(&domain->lock, flags);
2626 amd_iommu_domain_flush_pages(domain, iova, size);
2627 spin_unlock_irqrestore(&domain->lock, flags);
2628 return 0;
2629}
2630
2631static void amd_iommu_flush_iotlb_all(struct iommu_domain *domain)
2632{
2633 struct protection_domain *dom = to_pdomain(domain);
2634 unsigned long flags;
2635
2636 spin_lock_irqsave(&dom->lock, flags);
2637 amd_iommu_domain_flush_all(dom);
2638 spin_unlock_irqrestore(&dom->lock, flags);
2639}
2640
2641static void amd_iommu_iotlb_sync(struct iommu_domain *domain,
2642 struct iommu_iotlb_gather *gather)
2643{
2644 struct protection_domain *dom = to_pdomain(domain);
2645 unsigned long flags;
2646
2647 spin_lock_irqsave(&dom->lock, flags);
2648 amd_iommu_domain_flush_pages(dom, gather->start,
2649 gather->end - gather->start + 1);
2650 spin_unlock_irqrestore(&dom->lock, flags);
2651 iommu_put_pages_list(&gather->freelist);
2652}
2653
2654static const struct pt_iommu_driver_ops amd_hw_driver_ops_v1 = {
2655 .get_top_lock = amd_iommu_get_top_lock,
2656 .change_top = amd_iommu_change_top,
2657};
2658
2659static const struct iommu_domain_ops amdv1_ops = {
2660 IOMMU_PT_DOMAIN_OPS(amdv1),
2661 .iotlb_sync_map = amd_iommu_iotlb_sync_map,
2662 .flush_iotlb_all = amd_iommu_flush_iotlb_all,
2663 .iotlb_sync = amd_iommu_iotlb_sync,
2664 .attach_dev = amd_iommu_attach_device,
2665 .free = amd_iommu_domain_free,
2666 .enforce_cache_coherency = amd_iommu_enforce_cache_coherency,
2667};
2668
2669static const struct iommu_dirty_ops amdv1_dirty_ops = {
2670 IOMMU_PT_DIRTY_OPS(amdv1),
2671 .set_dirty_tracking = amd_iommu_set_dirty_tracking,
2672};
2673
2674static struct iommu_domain *amd_iommu_domain_alloc_paging_v1(struct device *dev,
2675 u32 flags)
2676{
2677 struct pt_iommu_amdv1_cfg cfg = {};
2678 struct protection_domain *domain;
2679 int ret;
2680
2681 if (amd_iommu_hatdis)
2682 return ERR_PTR(-EOPNOTSUPP);
2683
2684 domain = protection_domain_alloc();
2685 if (!domain)
2686 return ERR_PTR(-ENOMEM);
2687
2688 domain->pd_mode = PD_MODE_V1;
2689 domain->iommu.driver_ops = &amd_hw_driver_ops_v1;
2690 domain->iommu.nid = dev_to_node(dev);
2691 if (flags & IOMMU_HWPT_ALLOC_DIRTY_TRACKING)
2692 domain->domain.dirty_ops = &amdv1_dirty_ops;
2693
2694 /*
2695 * Someday FORCE_COHERENCE should be set by
2696 * amd_iommu_enforce_cache_coherency() like VT-d does.
2697 */
2698 cfg.common.features = BIT(PT_FEAT_DYNAMIC_TOP) |
2699 BIT(PT_FEAT_AMDV1_ENCRYPT_TABLES) |
2700 BIT(PT_FEAT_AMDV1_FORCE_COHERENCE);
2701
2702 /*
2703 * AMD's IOMMU can flush as many pages as necessary in a single flush.
2704 * Unless we run in a virtual machine, which can be inferred according
2705 * to whether "non-present cache" is on, it is probably best to prefer
2706 * (potentially) too extensive TLB flushing (i.e., more misses) over
2707 * multiple TLB flushes (i.e., more flushes). For virtual machines the
2708 * hypervisor needs to synchronize the host IOMMU PTEs with those of
2709 * the guest, and the trade-off is different: unnecessary TLB flushes
2710 * should be avoided.
2711 */
2712 if (amd_iommu_np_cache)
2713 cfg.common.features |= BIT(PT_FEAT_FLUSH_RANGE_NO_GAPS);
2714 else
2715 cfg.common.features |= BIT(PT_FEAT_FLUSH_RANGE);
2716
2717 cfg.common.hw_max_vasz_lg2 =
2718 min(64, (amd_iommu_hpt_level - 1) * 9 + 21);
2719 cfg.common.hw_max_oasz_lg2 = 52;
2720 cfg.starting_level = 2;
2721 domain->domain.ops = &amdv1_ops;
2722
2723 ret = pt_iommu_amdv1_init(&domain->amdv1, &cfg, GFP_KERNEL);
2724 if (ret) {
2725 amd_iommu_domain_free(&domain->domain);
2726 return ERR_PTR(ret);
2727 }
2728
2729 /*
2730 * Narrow the supported page sizes to those selected by the kernel
2731 * command line.
2732 */
2733 domain->domain.pgsize_bitmap &= amd_iommu_pgsize_bitmap;
2734 return &domain->domain;
2735}
2736
2737static const struct iommu_domain_ops amdv2_ops = {
2738 IOMMU_PT_DOMAIN_OPS(x86_64),
2739 .iotlb_sync_map = amd_iommu_iotlb_sync_map,
2740 .flush_iotlb_all = amd_iommu_flush_iotlb_all,
2741 .iotlb_sync = amd_iommu_iotlb_sync,
2742 .attach_dev = amd_iommu_attach_device,
2743 .free = amd_iommu_domain_free,
2744 /*
2745 * Note the AMDv2 page table format does not support a Force Coherency
2746 * bit, so enforce_cache_coherency should not be set. However VFIO is
2747 * not prepared to handle a case where some domains will support
2748 * enforcement and others do not. VFIO and iommufd will have to be fixed
2749 * before it can fully use the V2 page table. See the comment in
2750 * iommufd_hwpt_paging_alloc(). For now leave things as they have
2751 * historically been and lie about enforce_cache_coherencey.
2752 */
2753 .enforce_cache_coherency = amd_iommu_enforce_cache_coherency,
2754};
2755
2756static struct iommu_domain *amd_iommu_domain_alloc_paging_v2(struct device *dev,
2757 u32 flags)
2758{
2759 struct pt_iommu_x86_64_cfg cfg = {};
2760 struct protection_domain *domain;
2761 int ret;
2762
2763 if (!amd_iommu_v2_pgtbl_supported())
2764 return ERR_PTR(-EOPNOTSUPP);
2765
2766 domain = protection_domain_alloc();
2767 if (!domain)
2768 return ERR_PTR(-ENOMEM);
2769
2770 domain->pd_mode = PD_MODE_V2;
2771 domain->iommu.nid = dev_to_node(dev);
2772
2773 cfg.common.features = BIT(PT_FEAT_X86_64_AMD_ENCRYPT_TABLES);
2774 if (amd_iommu_np_cache)
2775 cfg.common.features |= BIT(PT_FEAT_FLUSH_RANGE_NO_GAPS);
2776 else
2777 cfg.common.features |= BIT(PT_FEAT_FLUSH_RANGE);
2778
2779 /*
2780 * The v2 table behaves differently if it is attached to PASID 0 vs a
2781 * non-zero PASID. On PASID 0 it has no sign extension and the full
2782 * 57/48 bits decode the lower addresses. Otherwise it behaves like a
2783 * normal sign extended x86 page table. Since we want the domain to work
2784 * in both modes the top bit is removed and PT_FEAT_SIGN_EXTEND is not
2785 * set which creates a table that is compatible in both modes.
2786 */
2787 if (amd_iommu_gpt_level == PAGE_MODE_5_LEVEL) {
2788 cfg.common.hw_max_vasz_lg2 = 56;
2789 cfg.top_level = 4;
2790 } else {
2791 cfg.common.hw_max_vasz_lg2 = 47;
2792 cfg.top_level = 3;
2793 }
2794 cfg.common.hw_max_oasz_lg2 = 52;
2795 domain->domain.ops = &amdv2_ops;
2796
2797 ret = pt_iommu_x86_64_init(&domain->amdv2, &cfg, GFP_KERNEL);
2798 if (ret) {
2799 amd_iommu_domain_free(&domain->domain);
2800 return ERR_PTR(ret);
2801 }
2802 return &domain->domain;
2803}
2804
2805static inline bool is_nest_parent_supported(u32 flags)
2806{
2807 /* Only allow nest parent when these features are supported */
2808 return check_feature(FEATURE_GT) &&
2809 check_feature(FEATURE_GIOSUP) &&
2810 check_feature2(FEATURE_GCR3TRPMODE);
2811}
2812
2813static struct iommu_domain *
2814amd_iommu_domain_alloc_paging_flags(struct device *dev, u32 flags,
2815 const struct iommu_user_data *user_data)
2816
2817{
2818 struct amd_iommu *iommu = get_amd_iommu_from_dev(dev);
2819 const u32 supported_flags = IOMMU_HWPT_ALLOC_DIRTY_TRACKING |
2820 IOMMU_HWPT_ALLOC_PASID |
2821 IOMMU_HWPT_ALLOC_NEST_PARENT;
2822
2823 if ((flags & ~supported_flags) || user_data)
2824 return ERR_PTR(-EOPNOTSUPP);
2825
2826 switch (flags & supported_flags) {
2827 case IOMMU_HWPT_ALLOC_DIRTY_TRACKING:
2828 case IOMMU_HWPT_ALLOC_NEST_PARENT:
2829 case IOMMU_HWPT_ALLOC_DIRTY_TRACKING | IOMMU_HWPT_ALLOC_NEST_PARENT:
2830 /*
2831 * Allocate domain with v1 page table for dirty tracking
2832 * and/or Nest parent.
2833 */
2834 if ((flags & IOMMU_HWPT_ALLOC_DIRTY_TRACKING) &&
2835 !amd_iommu_hd_support(iommu))
2836 break;
2837
2838 if ((flags & IOMMU_HWPT_ALLOC_NEST_PARENT) &&
2839 !is_nest_parent_supported(flags))
2840 break;
2841
2842 return amd_iommu_domain_alloc_paging_v1(dev, flags);
2843 case IOMMU_HWPT_ALLOC_PASID:
2844 /* Allocate domain with v2 page table if IOMMU supports PASID. */
2845 if (!amd_iommu_pasid_supported())
2846 break;
2847 return amd_iommu_domain_alloc_paging_v2(dev, flags);
2848 case 0: {
2849 struct iommu_domain *ret;
2850
2851 /* If nothing specific is required use the kernel commandline default */
2852 if (amd_iommu_pgtable == PD_MODE_V1) {
2853 ret = amd_iommu_domain_alloc_paging_v1(dev, flags);
2854 if (ret != ERR_PTR(-EOPNOTSUPP))
2855 return ret;
2856 return amd_iommu_domain_alloc_paging_v2(dev, flags);
2857 }
2858 ret = amd_iommu_domain_alloc_paging_v2(dev, flags);
2859 if (ret != ERR_PTR(-EOPNOTSUPP))
2860 return ret;
2861 return amd_iommu_domain_alloc_paging_v1(dev, flags);
2862 }
2863 default:
2864 break;
2865 }
2866 return ERR_PTR(-EOPNOTSUPP);
2867}
2868
2869void amd_iommu_domain_free(struct iommu_domain *dom)
2870{
2871 struct protection_domain *domain = to_pdomain(dom);
2872
2873 WARN_ON(!list_empty(&domain->dev_list));
2874 pt_iommu_deinit(&domain->iommu);
2875 amd_iommu_pdom_id_free(domain->id);
2876 kfree(domain);
2877}
2878
2879static int blocked_domain_attach_device(struct iommu_domain *domain,
2880 struct device *dev,
2881 struct iommu_domain *old)
2882{
2883 struct iommu_dev_data *dev_data = dev_iommu_priv_get(dev);
2884
2885 if (dev_data->domain)
2886 detach_device(dev);
2887
2888 /* Clear DTE and flush the entry */
2889 mutex_lock(&dev_data->mutex);
2890 dev_update_dte(dev_data, false);
2891 mutex_unlock(&dev_data->mutex);
2892
2893 return 0;
2894}
2895
2896static int blocked_domain_set_dev_pasid(struct iommu_domain *domain,
2897 struct device *dev, ioasid_t pasid,
2898 struct iommu_domain *old)
2899{
2900 amd_iommu_remove_dev_pasid(dev, pasid, old);
2901 return 0;
2902}
2903
2904static struct iommu_domain blocked_domain = {
2905 .type = IOMMU_DOMAIN_BLOCKED,
2906 .ops = &(const struct iommu_domain_ops) {
2907 .attach_dev = blocked_domain_attach_device,
2908 .set_dev_pasid = blocked_domain_set_dev_pasid,
2909 }
2910};
2911
2912static struct protection_domain identity_domain;
2913
2914static int amd_iommu_identity_attach(struct iommu_domain *dom, struct device *dev,
2915 struct iommu_domain *old)
2916{
2917 /*
2918 * Don't allow attaching a device to the identity domain if SNP is
2919 * enabled.
2920 */
2921 if (amd_iommu_snp_en)
2922 return -EINVAL;
2923
2924 return amd_iommu_attach_device(dom, dev, old);
2925}
2926
2927static const struct iommu_domain_ops identity_domain_ops = {
2928 .attach_dev = amd_iommu_identity_attach,
2929};
2930
2931void amd_iommu_init_identity_domain(void)
2932{
2933 struct iommu_domain *domain = &identity_domain.domain;
2934
2935 domain->type = IOMMU_DOMAIN_IDENTITY;
2936 domain->ops = &identity_domain_ops;
2937 domain->owner = &amd_iommu_ops;
2938
2939 identity_domain.id = amd_iommu_pdom_id_alloc();
2940
2941 protection_domain_init(&identity_domain);
2942}
2943
2944static int amd_iommu_attach_device(struct iommu_domain *dom, struct device *dev,
2945 struct iommu_domain *old)
2946{
2947 struct iommu_dev_data *dev_data = dev_iommu_priv_get(dev);
2948 struct protection_domain *domain = to_pdomain(dom);
2949 struct amd_iommu *iommu = get_amd_iommu_from_dev(dev);
2950 int ret;
2951
2952 /*
2953 * Skip attach device to domain if new domain is same as
2954 * devices current domain
2955 */
2956 if (dev_data->domain == domain)
2957 return 0;
2958
2959 dev_data->defer_attach = false;
2960
2961 /*
2962 * Restrict to devices with compatible IOMMU hardware support
2963 * when enforcement of dirty tracking is enabled.
2964 */
2965 if (dom->dirty_ops && !amd_iommu_hd_support(iommu))
2966 return -EINVAL;
2967
2968 if (dev_data->domain)
2969 detach_device(dev);
2970
2971 ret = attach_device(dev, domain);
2972
2973#ifdef CONFIG_IRQ_REMAP
2974 if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir)) {
2975 if (dom->type == IOMMU_DOMAIN_UNMANAGED)
2976 dev_data->use_vapic = 1;
2977 else
2978 dev_data->use_vapic = 0;
2979 }
2980#endif
2981
2982 return ret;
2983}
2984
2985static bool amd_iommu_capable(struct device *dev, enum iommu_cap cap)
2986{
2987 switch (cap) {
2988 case IOMMU_CAP_CACHE_COHERENCY:
2989 return true;
2990 case IOMMU_CAP_NOEXEC:
2991 return false;
2992 case IOMMU_CAP_PRE_BOOT_PROTECTION:
2993 return amdr_ivrs_remap_support;
2994 case IOMMU_CAP_ENFORCE_CACHE_COHERENCY:
2995 return true;
2996 case IOMMU_CAP_DIRTY_TRACKING: {
2997 struct amd_iommu *iommu = get_amd_iommu_from_dev(dev);
2998
2999 return amd_iommu_hd_support(iommu);
3000 }
3001 case IOMMU_CAP_PCI_ATS_SUPPORTED: {
3002 struct iommu_dev_data *dev_data = dev_iommu_priv_get(dev);
3003
3004 return amd_iommu_iotlb_sup &&
3005 (dev_data->flags & AMD_IOMMU_DEVICE_FLAG_ATS_SUP);
3006 }
3007 default:
3008 break;
3009 }
3010
3011 return false;
3012}
3013
3014static int amd_iommu_set_dirty_tracking(struct iommu_domain *domain,
3015 bool enable)
3016{
3017 struct protection_domain *pdomain = to_pdomain(domain);
3018 struct dev_table_entry *dte;
3019 struct iommu_dev_data *dev_data;
3020 bool domain_flush = false;
3021 struct amd_iommu *iommu;
3022 unsigned long flags;
3023 u64 new;
3024
3025 spin_lock_irqsave(&pdomain->lock, flags);
3026 if (!(pdomain->dirty_tracking ^ enable)) {
3027 spin_unlock_irqrestore(&pdomain->lock, flags);
3028 return 0;
3029 }
3030
3031 list_for_each_entry(dev_data, &pdomain->dev_list, list) {
3032 spin_lock(&dev_data->dte_lock);
3033 iommu = get_amd_iommu_from_dev_data(dev_data);
3034 dte = &get_dev_table(iommu)[dev_data->devid];
3035 new = dte->data[0];
3036 new = (enable ? new | DTE_FLAG_HAD : new & ~DTE_FLAG_HAD);
3037 dte->data[0] = new;
3038 spin_unlock(&dev_data->dte_lock);
3039
3040 /* Flush device DTE */
3041 device_flush_dte(dev_data);
3042 domain_flush = true;
3043 }
3044
3045 /* Flush IOTLB to mark IOPTE dirty on the next translation(s) */
3046 if (domain_flush)
3047 amd_iommu_domain_flush_all(pdomain);
3048
3049 pdomain->dirty_tracking = enable;
3050 spin_unlock_irqrestore(&pdomain->lock, flags);
3051
3052 return 0;
3053}
3054
3055static void amd_iommu_get_resv_regions(struct device *dev,
3056 struct list_head *head)
3057{
3058 struct iommu_resv_region *region;
3059 struct unity_map_entry *entry;
3060 struct amd_iommu *iommu;
3061 struct amd_iommu_pci_seg *pci_seg;
3062 int devid, sbdf;
3063
3064 sbdf = get_device_sbdf_id(dev);
3065 if (sbdf < 0)
3066 return;
3067
3068 devid = PCI_SBDF_TO_DEVID(sbdf);
3069 iommu = get_amd_iommu_from_dev(dev);
3070 pci_seg = iommu->pci_seg;
3071
3072 list_for_each_entry(entry, &pci_seg->unity_map, list) {
3073 int type, prot = 0;
3074 size_t length;
3075
3076 if (devid < entry->devid_start || devid > entry->devid_end)
3077 continue;
3078
3079 type = IOMMU_RESV_DIRECT;
3080 length = entry->address_end - entry->address_start;
3081 if (entry->prot & IOMMU_PROT_IR)
3082 prot |= IOMMU_READ;
3083 if (entry->prot & IOMMU_PROT_IW)
3084 prot |= IOMMU_WRITE;
3085 if (entry->prot & IOMMU_UNITY_MAP_FLAG_EXCL_RANGE)
3086 /* Exclusion range */
3087 type = IOMMU_RESV_RESERVED;
3088
3089 region = iommu_alloc_resv_region(entry->address_start,
3090 length, prot, type,
3091 GFP_KERNEL);
3092 if (!region) {
3093 dev_err(dev, "Out of memory allocating dm-regions\n");
3094 return;
3095 }
3096 list_add_tail(®ion->list, head);
3097 }
3098
3099 region = iommu_alloc_resv_region(MSI_RANGE_START,
3100 MSI_RANGE_END - MSI_RANGE_START + 1,
3101 0, IOMMU_RESV_MSI, GFP_KERNEL);
3102 if (!region)
3103 return;
3104 list_add_tail(®ion->list, head);
3105
3106 if (amd_iommu_ht_range_ignore())
3107 return;
3108
3109 region = iommu_alloc_resv_region(HT_RANGE_START,
3110 HT_RANGE_END - HT_RANGE_START + 1,
3111 0, IOMMU_RESV_RESERVED, GFP_KERNEL);
3112 if (!region)
3113 return;
3114 list_add_tail(®ion->list, head);
3115}
3116
3117static bool amd_iommu_is_attach_deferred(struct device *dev)
3118{
3119 struct iommu_dev_data *dev_data = dev_iommu_priv_get(dev);
3120
3121 return dev_data->defer_attach;
3122}
3123
3124static int amd_iommu_def_domain_type(struct device *dev)
3125{
3126 struct iommu_dev_data *dev_data;
3127
3128 dev_data = dev_iommu_priv_get(dev);
3129 if (!dev_data)
3130 return 0;
3131
3132 /* Always use DMA domain for untrusted device */
3133 if (dev_is_pci(dev) && to_pci_dev(dev)->untrusted)
3134 return IOMMU_DOMAIN_DMA;
3135
3136 /*
3137 * Do not identity map IOMMUv2 capable devices when:
3138 * - memory encryption is active, because some of those devices
3139 * (AMD GPUs) don't have the encryption bit in their DMA-mask
3140 * and require remapping.
3141 * - SNP is enabled, because it prohibits DTE[Mode]=0.
3142 */
3143 if (pdev_pasid_supported(dev_data) &&
3144 !cc_platform_has(CC_ATTR_MEM_ENCRYPT) &&
3145 !amd_iommu_snp_en) {
3146 return IOMMU_DOMAIN_IDENTITY;
3147 }
3148
3149 return 0;
3150}
3151
3152static bool amd_iommu_enforce_cache_coherency(struct iommu_domain *domain)
3153{
3154 /* IOMMU_PTE_FC is always set */
3155 return true;
3156}
3157
3158const struct iommu_ops amd_iommu_ops = {
3159 .capable = amd_iommu_capable,
3160 .hw_info = amd_iommufd_hw_info,
3161 .blocked_domain = &blocked_domain,
3162 .release_domain = &blocked_domain,
3163 .identity_domain = &identity_domain.domain,
3164 .domain_alloc_paging_flags = amd_iommu_domain_alloc_paging_flags,
3165 .domain_alloc_sva = amd_iommu_domain_alloc_sva,
3166 .probe_device = amd_iommu_probe_device,
3167 .release_device = amd_iommu_release_device,
3168 .device_group = amd_iommu_device_group,
3169 .get_resv_regions = amd_iommu_get_resv_regions,
3170 .is_attach_deferred = amd_iommu_is_attach_deferred,
3171 .def_domain_type = amd_iommu_def_domain_type,
3172 .page_response = amd_iommu_page_response,
3173 .get_viommu_size = amd_iommufd_get_viommu_size,
3174 .viommu_init = amd_iommufd_viommu_init,
3175};
3176
3177#ifdef CONFIG_IRQ_REMAP
3178
3179/*****************************************************************************
3180 *
3181 * Interrupt Remapping Implementation
3182 *
3183 *****************************************************************************/
3184
3185static struct irq_chip amd_ir_chip;
3186static DEFINE_SPINLOCK(iommu_table_lock);
3187
3188static int iommu_flush_dev_irt(struct pci_dev *unused, u16 devid, void *data)
3189{
3190 int ret;
3191 struct iommu_cmd cmd;
3192 struct amd_iommu *iommu = data;
3193
3194 build_inv_irt(&cmd, devid);
3195 ret = __iommu_queue_command_sync(iommu, &cmd, true);
3196 return ret;
3197}
3198
3199static void iommu_flush_irt_and_complete(struct amd_iommu *iommu, u16 devid)
3200{
3201 int ret;
3202 u64 data;
3203 unsigned long flags;
3204 struct iommu_cmd cmd;
3205 struct pci_dev *pdev = NULL;
3206 struct iommu_dev_data *dev_data = search_dev_data(iommu, devid);
3207
3208 if (iommu->irtcachedis_enabled)
3209 return;
3210
3211 if (dev_data && dev_data->dev && dev_is_pci(dev_data->dev))
3212 pdev = to_pci_dev(dev_data->dev);
3213
3214 raw_spin_lock_irqsave(&iommu->lock, flags);
3215 data = get_cmdsem_val(iommu);
3216 build_completion_wait(&cmd, iommu, data);
3217
3218 if (pdev)
3219 ret = pci_for_each_dma_alias(pdev, iommu_flush_dev_irt, iommu);
3220 else
3221 ret = iommu_flush_dev_irt(NULL, devid, iommu);
3222 if (ret)
3223 goto out_err;
3224
3225 ret = __iommu_queue_command_sync(iommu, &cmd, false);
3226 if (ret)
3227 goto out_err;
3228 raw_spin_unlock_irqrestore(&iommu->lock, flags);
3229
3230 wait_on_sem(iommu, data);
3231 return;
3232
3233out_err:
3234 raw_spin_unlock_irqrestore(&iommu->lock, flags);
3235}
3236
3237static inline u8 iommu_get_int_tablen(struct iommu_dev_data *dev_data)
3238{
3239 if (dev_data && dev_data->max_irqs == MAX_IRQS_PER_TABLE_2K)
3240 return DTE_INTTABLEN_2K;
3241 return DTE_INTTABLEN_512;
3242}
3243
3244static void set_dte_irq_entry(struct amd_iommu *iommu, u16 devid,
3245 struct irq_remap_table *table)
3246{
3247 u64 new;
3248 struct dev_table_entry *dte = &get_dev_table(iommu)[devid];
3249 struct iommu_dev_data *dev_data = search_dev_data(iommu, devid);
3250
3251 if (dev_data)
3252 spin_lock(&dev_data->dte_lock);
3253
3254 new = READ_ONCE(dte->data[2]);
3255 new &= ~DTE_IRQ_PHYS_ADDR_MASK;
3256 new |= iommu_virt_to_phys(table->table);
3257 new |= DTE_IRQ_REMAP_INTCTL;
3258 new |= iommu_get_int_tablen(dev_data);
3259 new |= DTE_IRQ_REMAP_ENABLE;
3260 WRITE_ONCE(dte->data[2], new);
3261
3262 if (dev_data)
3263 spin_unlock(&dev_data->dte_lock);
3264}
3265
3266static struct irq_remap_table *get_irq_table(struct amd_iommu *iommu, u16 devid)
3267{
3268 struct irq_remap_table *table;
3269 struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg;
3270
3271 if (WARN_ONCE(!pci_seg->rlookup_table[devid],
3272 "%s: no iommu for devid %x:%x\n",
3273 __func__, pci_seg->id, devid))
3274 return NULL;
3275
3276 table = pci_seg->irq_lookup_table[devid];
3277 if (WARN_ONCE(!table, "%s: no table for devid %x:%x\n",
3278 __func__, pci_seg->id, devid))
3279 return NULL;
3280
3281 return table;
3282}
3283
3284static struct irq_remap_table *__alloc_irq_table(int nid, size_t size)
3285{
3286 struct irq_remap_table *table;
3287
3288 table = kzalloc_obj(*table);
3289 if (!table)
3290 return NULL;
3291
3292 table->table = iommu_alloc_pages_node_sz(
3293 nid, GFP_KERNEL, max(DTE_INTTAB_ALIGNMENT, size));
3294 if (!table->table) {
3295 kfree(table);
3296 return NULL;
3297 }
3298 raw_spin_lock_init(&table->lock);
3299
3300 return table;
3301}
3302
3303static void set_remap_table_entry(struct amd_iommu *iommu, u16 devid,
3304 struct irq_remap_table *table)
3305{
3306 struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg;
3307
3308 pci_seg->irq_lookup_table[devid] = table;
3309 set_dte_irq_entry(iommu, devid, table);
3310 iommu_flush_dte(iommu, devid);
3311}
3312
3313static int set_remap_table_entry_alias(struct pci_dev *pdev, u16 alias,
3314 void *data)
3315{
3316 struct irq_remap_table *table = data;
3317 struct amd_iommu_pci_seg *pci_seg;
3318 struct amd_iommu *iommu = rlookup_amd_iommu(&pdev->dev);
3319
3320 if (!iommu)
3321 return -EINVAL;
3322
3323 pci_seg = iommu->pci_seg;
3324 pci_seg->irq_lookup_table[alias] = table;
3325 set_dte_irq_entry(iommu, alias, table);
3326 iommu_flush_dte(pci_seg->rlookup_table[alias], alias);
3327
3328 return 0;
3329}
3330
3331static inline size_t get_irq_table_size(unsigned int max_irqs)
3332{
3333 if (!AMD_IOMMU_GUEST_IR_GA(amd_iommu_guest_ir))
3334 return max_irqs * sizeof(u32);
3335
3336 return max_irqs * (sizeof(u64) * 2);
3337}
3338
3339static struct irq_remap_table *alloc_irq_table(struct amd_iommu *iommu,
3340 u16 devid, struct pci_dev *pdev,
3341 unsigned int max_irqs)
3342{
3343 struct irq_remap_table *table = NULL;
3344 struct irq_remap_table *new_table = NULL;
3345 struct amd_iommu_pci_seg *pci_seg;
3346 unsigned long flags;
3347 int nid = iommu->dev ? dev_to_node(&iommu->dev->dev) : NUMA_NO_NODE;
3348 u16 alias;
3349
3350 spin_lock_irqsave(&iommu_table_lock, flags);
3351
3352 pci_seg = iommu->pci_seg;
3353 table = pci_seg->irq_lookup_table[devid];
3354 if (table)
3355 goto out_unlock;
3356
3357 alias = pci_seg->alias_table[devid];
3358 table = pci_seg->irq_lookup_table[alias];
3359 if (table) {
3360 set_remap_table_entry(iommu, devid, table);
3361 goto out_wait;
3362 }
3363 spin_unlock_irqrestore(&iommu_table_lock, flags);
3364
3365 /* Nothing there yet, allocate new irq remapping table */
3366 new_table = __alloc_irq_table(nid, get_irq_table_size(max_irqs));
3367 if (!new_table)
3368 return NULL;
3369
3370 spin_lock_irqsave(&iommu_table_lock, flags);
3371
3372 table = pci_seg->irq_lookup_table[devid];
3373 if (table)
3374 goto out_unlock;
3375
3376 table = pci_seg->irq_lookup_table[alias];
3377 if (table) {
3378 set_remap_table_entry(iommu, devid, table);
3379 goto out_wait;
3380 }
3381
3382 table = new_table;
3383 new_table = NULL;
3384
3385 if (pdev)
3386 pci_for_each_dma_alias(pdev, set_remap_table_entry_alias,
3387 table);
3388 else
3389 set_remap_table_entry(iommu, devid, table);
3390
3391 if (devid != alias)
3392 set_remap_table_entry(iommu, alias, table);
3393
3394out_wait:
3395 iommu_completion_wait(iommu);
3396
3397out_unlock:
3398 spin_unlock_irqrestore(&iommu_table_lock, flags);
3399
3400 if (new_table) {
3401 iommu_free_pages(new_table->table);
3402 kfree(new_table);
3403 }
3404 return table;
3405}
3406
3407static int alloc_irq_index(struct amd_iommu *iommu, u16 devid, int count,
3408 bool align, struct pci_dev *pdev,
3409 unsigned long max_irqs)
3410{
3411 struct irq_remap_table *table;
3412 int index, c, alignment = 1;
3413 unsigned long flags;
3414
3415 table = alloc_irq_table(iommu, devid, pdev, max_irqs);
3416 if (!table)
3417 return -ENODEV;
3418
3419 if (align)
3420 alignment = roundup_pow_of_two(count);
3421
3422 raw_spin_lock_irqsave(&table->lock, flags);
3423
3424 /* Scan table for free entries */
3425 for (index = ALIGN(table->min_index, alignment), c = 0;
3426 index < max_irqs;) {
3427 if (!iommu->irte_ops->is_allocated(table, index)) {
3428 c += 1;
3429 } else {
3430 c = 0;
3431 index = ALIGN(index + 1, alignment);
3432 continue;
3433 }
3434
3435 if (c == count) {
3436 for (; c != 0; --c)
3437 iommu->irte_ops->set_allocated(table, index - c + 1);
3438
3439 index -= count - 1;
3440 goto out;
3441 }
3442
3443 index++;
3444 }
3445
3446 index = -ENOSPC;
3447
3448out:
3449 raw_spin_unlock_irqrestore(&table->lock, flags);
3450
3451 return index;
3452}
3453
3454static int __modify_irte_ga(struct amd_iommu *iommu, u16 devid, int index,
3455 struct irte_ga *irte)
3456{
3457 struct irq_remap_table *table;
3458 struct irte_ga *entry;
3459 unsigned long flags;
3460 u128 old;
3461
3462 table = get_irq_table(iommu, devid);
3463 if (!table)
3464 return -ENOMEM;
3465
3466 raw_spin_lock_irqsave(&table->lock, flags);
3467
3468 entry = (struct irte_ga *)table->table;
3469 entry = &entry[index];
3470
3471 /*
3472 * We use cmpxchg16 to atomically update the 128-bit IRTE,
3473 * and it cannot be updated by the hardware or other processors
3474 * behind us, so the return value of cmpxchg16 should be the
3475 * same as the old value.
3476 */
3477 old = entry->irte;
3478 WARN_ON(!try_cmpxchg128(&entry->irte, &old, irte->irte));
3479
3480 raw_spin_unlock_irqrestore(&table->lock, flags);
3481
3482 return 0;
3483}
3484
3485static int modify_irte_ga(struct amd_iommu *iommu, u16 devid, int index,
3486 struct irte_ga *irte)
3487{
3488 int ret;
3489
3490 ret = __modify_irte_ga(iommu, devid, index, irte);
3491 if (ret)
3492 return ret;
3493
3494 iommu_flush_irt_and_complete(iommu, devid);
3495
3496 return 0;
3497}
3498
3499static int modify_irte(struct amd_iommu *iommu,
3500 u16 devid, int index, union irte *irte)
3501{
3502 struct irq_remap_table *table;
3503 unsigned long flags;
3504
3505 table = get_irq_table(iommu, devid);
3506 if (!table)
3507 return -ENOMEM;
3508
3509 raw_spin_lock_irqsave(&table->lock, flags);
3510 table->table[index] = irte->val;
3511 raw_spin_unlock_irqrestore(&table->lock, flags);
3512
3513 iommu_flush_irt_and_complete(iommu, devid);
3514
3515 return 0;
3516}
3517
3518static void free_irte(struct amd_iommu *iommu, u16 devid, int index)
3519{
3520 struct irq_remap_table *table;
3521 unsigned long flags;
3522
3523 table = get_irq_table(iommu, devid);
3524 if (!table)
3525 return;
3526
3527 raw_spin_lock_irqsave(&table->lock, flags);
3528 iommu->irte_ops->clear_allocated(table, index);
3529 raw_spin_unlock_irqrestore(&table->lock, flags);
3530
3531 iommu_flush_irt_and_complete(iommu, devid);
3532}
3533
3534static void irte_prepare(void *entry,
3535 u32 delivery_mode, bool dest_mode,
3536 u8 vector, u32 dest_apicid, int devid)
3537{
3538 union irte *irte = (union irte *) entry;
3539
3540 irte->val = 0;
3541 irte->fields.vector = vector;
3542 irte->fields.int_type = delivery_mode;
3543 irte->fields.destination = dest_apicid;
3544 irte->fields.dm = dest_mode;
3545 irte->fields.valid = 1;
3546}
3547
3548static void irte_ga_prepare(void *entry,
3549 u32 delivery_mode, bool dest_mode,
3550 u8 vector, u32 dest_apicid, int devid)
3551{
3552 struct irte_ga *irte = (struct irte_ga *) entry;
3553
3554 irte->lo.val = 0;
3555 irte->hi.val = 0;
3556 irte->lo.fields_remap.int_type = delivery_mode;
3557 irte->lo.fields_remap.dm = dest_mode;
3558 irte->hi.fields.vector = vector;
3559 irte->lo.fields_remap.destination = APICID_TO_IRTE_DEST_LO(dest_apicid);
3560 irte->hi.fields.destination = APICID_TO_IRTE_DEST_HI(dest_apicid);
3561 irte->lo.fields_remap.valid = 1;
3562}
3563
3564static void irte_activate(struct amd_iommu *iommu, void *entry, u16 devid, u16 index)
3565{
3566 union irte *irte = (union irte *) entry;
3567
3568 irte->fields.valid = 1;
3569 modify_irte(iommu, devid, index, irte);
3570}
3571
3572static void irte_ga_activate(struct amd_iommu *iommu, void *entry, u16 devid, u16 index)
3573{
3574 struct irte_ga *irte = (struct irte_ga *) entry;
3575
3576 irte->lo.fields_remap.valid = 1;
3577 modify_irte_ga(iommu, devid, index, irte);
3578}
3579
3580static void irte_deactivate(struct amd_iommu *iommu, void *entry, u16 devid, u16 index)
3581{
3582 union irte *irte = (union irte *) entry;
3583
3584 irte->fields.valid = 0;
3585 modify_irte(iommu, devid, index, irte);
3586}
3587
3588static void irte_ga_deactivate(struct amd_iommu *iommu, void *entry, u16 devid, u16 index)
3589{
3590 struct irte_ga *irte = (struct irte_ga *) entry;
3591
3592 irte->lo.fields_remap.valid = 0;
3593 modify_irte_ga(iommu, devid, index, irte);
3594}
3595
3596static void irte_set_affinity(struct amd_iommu *iommu, void *entry, u16 devid, u16 index,
3597 u8 vector, u32 dest_apicid)
3598{
3599 union irte *irte = (union irte *) entry;
3600
3601 irte->fields.vector = vector;
3602 irte->fields.destination = dest_apicid;
3603 modify_irte(iommu, devid, index, irte);
3604}
3605
3606static void irte_ga_set_affinity(struct amd_iommu *iommu, void *entry, u16 devid, u16 index,
3607 u8 vector, u32 dest_apicid)
3608{
3609 struct irte_ga *irte = (struct irte_ga *) entry;
3610
3611 if (!irte->lo.fields_remap.guest_mode) {
3612 irte->hi.fields.vector = vector;
3613 irte->lo.fields_remap.destination =
3614 APICID_TO_IRTE_DEST_LO(dest_apicid);
3615 irte->hi.fields.destination =
3616 APICID_TO_IRTE_DEST_HI(dest_apicid);
3617 modify_irte_ga(iommu, devid, index, irte);
3618 }
3619}
3620
3621#define IRTE_ALLOCATED (~1U)
3622static void irte_set_allocated(struct irq_remap_table *table, int index)
3623{
3624 table->table[index] = IRTE_ALLOCATED;
3625}
3626
3627static void irte_ga_set_allocated(struct irq_remap_table *table, int index)
3628{
3629 struct irte_ga *ptr = (struct irte_ga *)table->table;
3630 struct irte_ga *irte = &ptr[index];
3631
3632 memset(&irte->lo.val, 0, sizeof(u64));
3633 memset(&irte->hi.val, 0, sizeof(u64));
3634 irte->hi.fields.vector = 0xff;
3635}
3636
3637static bool irte_is_allocated(struct irq_remap_table *table, int index)
3638{
3639 union irte *ptr = (union irte *)table->table;
3640 union irte *irte = &ptr[index];
3641
3642 return irte->val != 0;
3643}
3644
3645static bool irte_ga_is_allocated(struct irq_remap_table *table, int index)
3646{
3647 struct irte_ga *ptr = (struct irte_ga *)table->table;
3648 struct irte_ga *irte = &ptr[index];
3649
3650 return irte->hi.fields.vector != 0;
3651}
3652
3653static void irte_clear_allocated(struct irq_remap_table *table, int index)
3654{
3655 table->table[index] = 0;
3656}
3657
3658static void irte_ga_clear_allocated(struct irq_remap_table *table, int index)
3659{
3660 struct irte_ga *ptr = (struct irte_ga *)table->table;
3661 struct irte_ga *irte = &ptr[index];
3662
3663 memset(&irte->lo.val, 0, sizeof(u64));
3664 memset(&irte->hi.val, 0, sizeof(u64));
3665}
3666
3667static int get_devid(struct irq_alloc_info *info)
3668{
3669 switch (info->type) {
3670 case X86_IRQ_ALLOC_TYPE_IOAPIC:
3671 return get_ioapic_devid(info->devid);
3672 case X86_IRQ_ALLOC_TYPE_HPET:
3673 return get_hpet_devid(info->devid);
3674 case X86_IRQ_ALLOC_TYPE_PCI_MSI:
3675 case X86_IRQ_ALLOC_TYPE_PCI_MSIX:
3676 return get_device_sbdf_id(msi_desc_to_dev(info->desc));
3677 default:
3678 WARN_ON_ONCE(1);
3679 return -1;
3680 }
3681}
3682
3683struct irq_remap_ops amd_iommu_irq_ops = {
3684 .prepare = amd_iommu_prepare,
3685 .enable = amd_iommu_enable,
3686 .disable = amd_iommu_disable,
3687 .reenable = amd_iommu_reenable,
3688 .enable_faulting = amd_iommu_enable_faulting,
3689};
3690
3691static void fill_msi_msg(struct msi_msg *msg, u32 index)
3692{
3693 msg->data = index;
3694 msg->address_lo = 0;
3695 msg->arch_addr_lo.base_address = X86_MSI_BASE_ADDRESS_LOW;
3696 /*
3697 * The struct msi_msg.dest_mode_logical is used to set the DM bit
3698 * in MSI Message Address Register. For device w/ 2K int-remap support,
3699 * this is bit must be set to 1 regardless of the actual destination
3700 * mode, which is signified by the IRTE[DM].
3701 */
3702 if (FEATURE_NUM_INT_REMAP_SUP_2K(amd_iommu_efr2))
3703 msg->arch_addr_lo.dest_mode_logical = true;
3704 msg->address_hi = X86_MSI_BASE_ADDRESS_HIGH;
3705}
3706
3707static void irq_remapping_prepare_irte(struct amd_ir_data *data,
3708 struct irq_cfg *irq_cfg,
3709 struct irq_alloc_info *info,
3710 int devid, int index, int sub_handle)
3711{
3712 struct irq_2_irte *irte_info = &data->irq_2_irte;
3713 struct amd_iommu *iommu = data->iommu;
3714
3715 if (!iommu)
3716 return;
3717
3718 data->irq_2_irte.devid = devid;
3719 data->irq_2_irte.index = index + sub_handle;
3720 iommu->irte_ops->prepare(data->entry, APIC_DELIVERY_MODE_FIXED,
3721 apic->dest_mode_logical, irq_cfg->vector,
3722 irq_cfg->dest_apicid, devid);
3723
3724 switch (info->type) {
3725 case X86_IRQ_ALLOC_TYPE_IOAPIC:
3726 case X86_IRQ_ALLOC_TYPE_HPET:
3727 case X86_IRQ_ALLOC_TYPE_PCI_MSI:
3728 case X86_IRQ_ALLOC_TYPE_PCI_MSIX:
3729 fill_msi_msg(&data->msi_entry, irte_info->index);
3730 break;
3731
3732 default:
3733 BUG_ON(1);
3734 break;
3735 }
3736}
3737
3738struct amd_irte_ops irte_32_ops = {
3739 .prepare = irte_prepare,
3740 .activate = irte_activate,
3741 .deactivate = irte_deactivate,
3742 .set_affinity = irte_set_affinity,
3743 .set_allocated = irte_set_allocated,
3744 .is_allocated = irte_is_allocated,
3745 .clear_allocated = irte_clear_allocated,
3746};
3747
3748struct amd_irte_ops irte_128_ops = {
3749 .prepare = irte_ga_prepare,
3750 .activate = irte_ga_activate,
3751 .deactivate = irte_ga_deactivate,
3752 .set_affinity = irte_ga_set_affinity,
3753 .set_allocated = irte_ga_set_allocated,
3754 .is_allocated = irte_ga_is_allocated,
3755 .clear_allocated = irte_ga_clear_allocated,
3756};
3757
3758static int irq_remapping_alloc(struct irq_domain *domain, unsigned int virq,
3759 unsigned int nr_irqs, void *arg)
3760{
3761 struct irq_alloc_info *info = arg;
3762 struct irq_data *irq_data;
3763 struct amd_ir_data *data = NULL;
3764 struct amd_iommu *iommu;
3765 struct irq_cfg *cfg;
3766 struct iommu_dev_data *dev_data;
3767 unsigned long max_irqs;
3768 int i, ret, devid, seg, sbdf;
3769 int index;
3770
3771 if (!info)
3772 return -EINVAL;
3773 if (nr_irqs > 1 && info->type != X86_IRQ_ALLOC_TYPE_PCI_MSI)
3774 return -EINVAL;
3775
3776 sbdf = get_devid(info);
3777 if (sbdf < 0)
3778 return -EINVAL;
3779
3780 seg = PCI_SBDF_TO_SEGID(sbdf);
3781 devid = PCI_SBDF_TO_DEVID(sbdf);
3782 iommu = __rlookup_amd_iommu(seg, devid);
3783 if (!iommu)
3784 return -EINVAL;
3785
3786 dev_data = search_dev_data(iommu, devid);
3787 max_irqs = dev_data ? dev_data->max_irqs : MAX_IRQS_PER_TABLE_512;
3788
3789 ret = irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, arg);
3790 if (ret < 0)
3791 return ret;
3792
3793 if (info->type == X86_IRQ_ALLOC_TYPE_IOAPIC) {
3794 struct irq_remap_table *table;
3795
3796 table = alloc_irq_table(iommu, devid, NULL, max_irqs);
3797 if (table) {
3798 if (!table->min_index) {
3799 /*
3800 * Keep the first 32 indexes free for IOAPIC
3801 * interrupts.
3802 */
3803 table->min_index = 32;
3804 for (i = 0; i < 32; ++i)
3805 iommu->irte_ops->set_allocated(table, i);
3806 }
3807 WARN_ON(table->min_index != 32);
3808 index = info->ioapic.pin;
3809 } else {
3810 index = -ENOMEM;
3811 }
3812 } else if (info->type == X86_IRQ_ALLOC_TYPE_PCI_MSI ||
3813 info->type == X86_IRQ_ALLOC_TYPE_PCI_MSIX) {
3814 bool align = (info->type == X86_IRQ_ALLOC_TYPE_PCI_MSI);
3815
3816 index = alloc_irq_index(iommu, devid, nr_irqs, align,
3817 msi_desc_to_pci_dev(info->desc),
3818 max_irqs);
3819 } else {
3820 index = alloc_irq_index(iommu, devid, nr_irqs, false, NULL,
3821 max_irqs);
3822 }
3823
3824 if (index < 0) {
3825 pr_warn("Failed to allocate IRTE\n");
3826 ret = index;
3827 goto out_free_parent;
3828 }
3829
3830 for (i = 0; i < nr_irqs; i++) {
3831 irq_data = irq_domain_get_irq_data(domain, virq + i);
3832 cfg = irq_data ? irqd_cfg(irq_data) : NULL;
3833 if (!cfg) {
3834 ret = -EINVAL;
3835 goto out_free_data;
3836 }
3837
3838 ret = -ENOMEM;
3839 data = kzalloc_obj(*data);
3840 if (!data)
3841 goto out_free_data;
3842
3843 if (!AMD_IOMMU_GUEST_IR_GA(amd_iommu_guest_ir))
3844 data->entry = kzalloc_obj(union irte);
3845 else
3846 data->entry = kzalloc_obj(struct irte_ga);
3847 if (!data->entry) {
3848 kfree(data);
3849 goto out_free_data;
3850 }
3851
3852 data->iommu = iommu;
3853 irq_data->hwirq = (devid << 16) + i;
3854 irq_data->chip_data = data;
3855 irq_data->chip = &amd_ir_chip;
3856 irq_remapping_prepare_irte(data, cfg, info, devid, index, i);
3857 }
3858
3859 return 0;
3860
3861out_free_data:
3862 for (i--; i >= 0; i--) {
3863 irq_data = irq_domain_get_irq_data(domain, virq + i);
3864 if (irq_data)
3865 kfree(irq_data->chip_data);
3866 }
3867 for (i = 0; i < nr_irqs; i++)
3868 free_irte(iommu, devid, index + i);
3869out_free_parent:
3870 irq_domain_free_irqs_common(domain, virq, nr_irqs);
3871 return ret;
3872}
3873
3874static void irq_remapping_free(struct irq_domain *domain, unsigned int virq,
3875 unsigned int nr_irqs)
3876{
3877 struct irq_2_irte *irte_info;
3878 struct irq_data *irq_data;
3879 struct amd_ir_data *data;
3880 int i;
3881
3882 for (i = 0; i < nr_irqs; i++) {
3883 irq_data = irq_domain_get_irq_data(domain, virq + i);
3884 if (irq_data && irq_data->chip_data) {
3885 data = irq_data->chip_data;
3886 irte_info = &data->irq_2_irte;
3887 free_irte(data->iommu, irte_info->devid, irte_info->index);
3888 kfree(data->entry);
3889 kfree(data);
3890 }
3891 }
3892 irq_domain_free_irqs_common(domain, virq, nr_irqs);
3893}
3894
3895static void amd_ir_update_irte(struct irq_data *irqd, struct amd_iommu *iommu,
3896 struct amd_ir_data *ir_data,
3897 struct irq_2_irte *irte_info,
3898 struct irq_cfg *cfg);
3899
3900static int irq_remapping_activate(struct irq_domain *domain,
3901 struct irq_data *irq_data, bool reserve)
3902{
3903 struct amd_ir_data *data = irq_data->chip_data;
3904 struct irq_2_irte *irte_info = &data->irq_2_irte;
3905 struct amd_iommu *iommu = data->iommu;
3906 struct irq_cfg *cfg = irqd_cfg(irq_data);
3907
3908 if (!iommu)
3909 return 0;
3910
3911 iommu->irte_ops->activate(iommu, data->entry, irte_info->devid,
3912 irte_info->index);
3913 amd_ir_update_irte(irq_data, iommu, data, irte_info, cfg);
3914 return 0;
3915}
3916
3917static void irq_remapping_deactivate(struct irq_domain *domain,
3918 struct irq_data *irq_data)
3919{
3920 struct amd_ir_data *data = irq_data->chip_data;
3921 struct irq_2_irte *irte_info = &data->irq_2_irte;
3922 struct amd_iommu *iommu = data->iommu;
3923
3924 if (iommu)
3925 iommu->irte_ops->deactivate(iommu, data->entry, irte_info->devid,
3926 irte_info->index);
3927}
3928
3929static int irq_remapping_select(struct irq_domain *d, struct irq_fwspec *fwspec,
3930 enum irq_domain_bus_token bus_token)
3931{
3932 struct amd_iommu *iommu;
3933 int devid = -1;
3934
3935 if (!amd_iommu_irq_remap)
3936 return 0;
3937
3938 if (x86_fwspec_is_ioapic(fwspec))
3939 devid = get_ioapic_devid(fwspec->param[0]);
3940 else if (x86_fwspec_is_hpet(fwspec))
3941 devid = get_hpet_devid(fwspec->param[0]);
3942
3943 if (devid < 0)
3944 return 0;
3945 iommu = __rlookup_amd_iommu((devid >> 16), (devid & 0xffff));
3946
3947 return iommu && iommu->ir_domain == d;
3948}
3949
3950static const struct irq_domain_ops amd_ir_domain_ops = {
3951 .select = irq_remapping_select,
3952 .alloc = irq_remapping_alloc,
3953 .free = irq_remapping_free,
3954 .activate = irq_remapping_activate,
3955 .deactivate = irq_remapping_deactivate,
3956};
3957
3958static void __amd_iommu_update_ga(struct irte_ga *entry, int cpu,
3959 bool ga_log_intr)
3960{
3961 if (cpu >= 0) {
3962 entry->lo.fields_vapic.destination =
3963 APICID_TO_IRTE_DEST_LO(cpu);
3964 entry->hi.fields.destination =
3965 APICID_TO_IRTE_DEST_HI(cpu);
3966 entry->lo.fields_vapic.is_run = true;
3967 entry->lo.fields_vapic.ga_log_intr = false;
3968 } else {
3969 entry->lo.fields_vapic.is_run = false;
3970 entry->lo.fields_vapic.ga_log_intr = ga_log_intr;
3971 }
3972}
3973
3974/*
3975 * Update the pCPU information for an IRTE that is configured to post IRQs to
3976 * a vCPU, without issuing an IOMMU invalidation for the IRTE.
3977 *
3978 * If the vCPU is associated with a pCPU (@cpu >= 0), configure the Destination
3979 * with the pCPU's APIC ID, set IsRun, and clear GALogIntr. If the vCPU isn't
3980 * associated with a pCPU (@cpu < 0), clear IsRun and set/clear GALogIntr based
3981 * on input from the caller (e.g. KVM only requests GALogIntr when the vCPU is
3982 * blocking and requires a notification wake event). I.e. treat vCPUs that are
3983 * associated with a pCPU as running. This API is intended to be used when a
3984 * vCPU is scheduled in/out (or stops running for any reason), to do a fast
3985 * update of IsRun, GALogIntr, and (conditionally) Destination.
3986 *
3987 * Per the IOMMU spec, the Destination, IsRun, and GATag fields are not cached
3988 * and thus don't require an invalidation to ensure the IOMMU consumes fresh
3989 * information.
3990 */
3991int amd_iommu_update_ga(void *data, int cpu, bool ga_log_intr)
3992{
3993 struct amd_ir_data *ir_data = (struct amd_ir_data *)data;
3994 struct irte_ga *entry = (struct irte_ga *) ir_data->entry;
3995
3996 if (WARN_ON_ONCE(!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir)))
3997 return -EINVAL;
3998
3999 if (!entry || !entry->lo.fields_vapic.guest_mode)
4000 return 0;
4001
4002 if (!ir_data->iommu)
4003 return -ENODEV;
4004
4005 __amd_iommu_update_ga(entry, cpu, ga_log_intr);
4006
4007 return __modify_irte_ga(ir_data->iommu, ir_data->irq_2_irte.devid,
4008 ir_data->irq_2_irte.index, entry);
4009}
4010EXPORT_SYMBOL(amd_iommu_update_ga);
4011
4012int amd_iommu_activate_guest_mode(void *data, int cpu, bool ga_log_intr)
4013{
4014 struct amd_ir_data *ir_data = (struct amd_ir_data *)data;
4015 struct irte_ga *entry = (struct irte_ga *) ir_data->entry;
4016 u64 valid;
4017
4018 if (WARN_ON_ONCE(!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir)))
4019 return -EINVAL;
4020
4021 if (!entry)
4022 return 0;
4023
4024 valid = entry->lo.fields_vapic.valid;
4025
4026 entry->lo.val = 0;
4027 entry->hi.val = 0;
4028
4029 entry->lo.fields_vapic.valid = valid;
4030 entry->lo.fields_vapic.guest_mode = 1;
4031 entry->hi.fields.ga_root_ptr = ir_data->ga_root_ptr;
4032 entry->hi.fields.vector = ir_data->ga_vector;
4033 entry->lo.fields_vapic.ga_tag = ir_data->ga_tag;
4034
4035 __amd_iommu_update_ga(entry, cpu, ga_log_intr);
4036
4037 return modify_irte_ga(ir_data->iommu, ir_data->irq_2_irte.devid,
4038 ir_data->irq_2_irte.index, entry);
4039}
4040EXPORT_SYMBOL(amd_iommu_activate_guest_mode);
4041
4042int amd_iommu_deactivate_guest_mode(void *data)
4043{
4044 struct amd_ir_data *ir_data = (struct amd_ir_data *)data;
4045 struct irte_ga *entry = (struct irte_ga *) ir_data->entry;
4046 struct irq_cfg *cfg = ir_data->cfg;
4047 u64 valid;
4048
4049 if (WARN_ON_ONCE(!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir)))
4050 return -EINVAL;
4051
4052 if (!entry || !entry->lo.fields_vapic.guest_mode)
4053 return 0;
4054
4055 valid = entry->lo.fields_remap.valid;
4056
4057 entry->lo.val = 0;
4058 entry->hi.val = 0;
4059
4060 entry->lo.fields_remap.valid = valid;
4061 entry->lo.fields_remap.dm = apic->dest_mode_logical;
4062 entry->lo.fields_remap.int_type = APIC_DELIVERY_MODE_FIXED;
4063 entry->hi.fields.vector = cfg->vector;
4064 entry->lo.fields_remap.destination =
4065 APICID_TO_IRTE_DEST_LO(cfg->dest_apicid);
4066 entry->hi.fields.destination =
4067 APICID_TO_IRTE_DEST_HI(cfg->dest_apicid);
4068
4069 return modify_irte_ga(ir_data->iommu, ir_data->irq_2_irte.devid,
4070 ir_data->irq_2_irte.index, entry);
4071}
4072EXPORT_SYMBOL(amd_iommu_deactivate_guest_mode);
4073
4074static int amd_ir_set_vcpu_affinity(struct irq_data *data, void *info)
4075{
4076 int ret;
4077 struct amd_iommu_pi_data *pi_data = info;
4078 struct amd_ir_data *ir_data = data->chip_data;
4079 struct irq_2_irte *irte_info = &ir_data->irq_2_irte;
4080 struct iommu_dev_data *dev_data;
4081
4082 if (WARN_ON_ONCE(!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir)))
4083 return -EINVAL;
4084
4085 if (ir_data->iommu == NULL)
4086 return -EINVAL;
4087
4088 dev_data = search_dev_data(ir_data->iommu, irte_info->devid);
4089
4090 /* Note:
4091 * This device has never been set up for guest mode.
4092 * we should not modify the IRTE
4093 */
4094 if (!dev_data || !dev_data->use_vapic)
4095 return -EINVAL;
4096
4097 ir_data->cfg = irqd_cfg(data);
4098
4099 if (pi_data) {
4100 pi_data->ir_data = ir_data;
4101
4102 ir_data->ga_root_ptr = (pi_data->vapic_addr >> 12);
4103 ir_data->ga_vector = pi_data->vector;
4104 ir_data->ga_tag = pi_data->ga_tag;
4105 if (pi_data->is_guest_mode)
4106 ret = amd_iommu_activate_guest_mode(ir_data, pi_data->cpu,
4107 pi_data->ga_log_intr);
4108 else
4109 ret = amd_iommu_deactivate_guest_mode(ir_data);
4110 } else {
4111 ret = amd_iommu_deactivate_guest_mode(ir_data);
4112 }
4113
4114 return ret;
4115}
4116
4117
4118static void amd_ir_update_irte(struct irq_data *irqd, struct amd_iommu *iommu,
4119 struct amd_ir_data *ir_data,
4120 struct irq_2_irte *irte_info,
4121 struct irq_cfg *cfg)
4122{
4123
4124 /*
4125 * Atomically updates the IRTE with the new destination, vector
4126 * and flushes the interrupt entry cache.
4127 */
4128 iommu->irte_ops->set_affinity(iommu, ir_data->entry, irte_info->devid,
4129 irte_info->index, cfg->vector,
4130 cfg->dest_apicid);
4131}
4132
4133static int amd_ir_set_affinity(struct irq_data *data,
4134 const struct cpumask *mask, bool force)
4135{
4136 struct amd_ir_data *ir_data = data->chip_data;
4137 struct irq_2_irte *irte_info = &ir_data->irq_2_irte;
4138 struct irq_cfg *cfg = irqd_cfg(data);
4139 struct irq_data *parent = data->parent_data;
4140 struct amd_iommu *iommu = ir_data->iommu;
4141 int ret;
4142
4143 if (!iommu)
4144 return -ENODEV;
4145
4146 ret = parent->chip->irq_set_affinity(parent, mask, force);
4147 if (ret < 0 || ret == IRQ_SET_MASK_OK_DONE)
4148 return ret;
4149
4150 amd_ir_update_irte(data, iommu, ir_data, irte_info, cfg);
4151 /*
4152 * After this point, all the interrupts will start arriving
4153 * at the new destination. So, time to cleanup the previous
4154 * vector allocation.
4155 */
4156 vector_schedule_cleanup(cfg);
4157
4158 return IRQ_SET_MASK_OK_DONE;
4159}
4160
4161static void ir_compose_msi_msg(struct irq_data *irq_data, struct msi_msg *msg)
4162{
4163 struct amd_ir_data *ir_data = irq_data->chip_data;
4164
4165 *msg = ir_data->msi_entry;
4166}
4167
4168static struct irq_chip amd_ir_chip = {
4169 .name = "AMD-IR",
4170 .irq_ack = apic_ack_irq,
4171 .irq_set_affinity = amd_ir_set_affinity,
4172 .irq_set_vcpu_affinity = amd_ir_set_vcpu_affinity,
4173 .irq_compose_msi_msg = ir_compose_msi_msg,
4174};
4175
4176static const struct msi_parent_ops amdvi_msi_parent_ops = {
4177 .supported_flags = X86_VECTOR_MSI_FLAGS_SUPPORTED | MSI_FLAG_MULTI_PCI_MSI,
4178 .bus_select_token = DOMAIN_BUS_AMDVI,
4179 .bus_select_mask = MATCH_PCI_MSI,
4180 .prefix = "IR-",
4181 .init_dev_msi_info = msi_parent_init_dev_msi_info,
4182};
4183
4184int amd_iommu_create_irq_domain(struct amd_iommu *iommu)
4185{
4186 struct irq_domain_info info = {
4187 .fwnode = irq_domain_alloc_named_id_fwnode("AMD-IR", iommu->index),
4188 .ops = &amd_ir_domain_ops,
4189 .domain_flags = IRQ_DOMAIN_FLAG_ISOLATED_MSI,
4190 .host_data = iommu,
4191 .parent = arch_get_ir_parent_domain(),
4192 };
4193
4194 if (!info.fwnode)
4195 return -ENOMEM;
4196
4197 iommu->ir_domain = msi_create_parent_irq_domain(&info, &amdvi_msi_parent_ops);
4198 if (!iommu->ir_domain) {
4199 irq_domain_free_fwnode(info.fwnode);
4200 return -ENOMEM;
4201 }
4202 return 0;
4203}
4204#endif
4205
4206MODULE_IMPORT_NS("GENERIC_PT_IOMMU");