Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) 2011-2014 NVIDIA CORPORATION. All rights reserved.
4 */
5
6#include <linux/bitops.h>
7#include <linux/debugfs.h>
8#include <linux/err.h>
9#include <linux/iommu.h>
10#include <linux/kernel.h>
11#include <linux/of.h>
12#include <linux/of_platform.h>
13#include <linux/pci.h>
14#include <linux/platform_device.h>
15#include <linux/slab.h>
16#include <linux/spinlock.h>
17#include <linux/dma-mapping.h>
18
19#include <soc/tegra/ahb.h>
20#include <soc/tegra/mc.h>
21
22#include "iommu-pages.h"
23
24struct tegra_smmu_group {
25 struct list_head list;
26 struct tegra_smmu *smmu;
27 const struct tegra_smmu_group_soc *soc;
28 struct iommu_group *group;
29 unsigned int swgroup;
30};
31
32struct tegra_smmu {
33 void __iomem *regs;
34 struct device *dev;
35
36 struct tegra_mc *mc;
37 const struct tegra_smmu_soc *soc;
38
39 struct list_head groups;
40
41 unsigned long pfn_mask;
42 unsigned long tlb_mask;
43
44 unsigned long *asids;
45 struct mutex lock;
46
47 struct list_head list;
48
49 struct dentry *debugfs;
50
51 struct iommu_device iommu; /* IOMMU Core code handle */
52};
53
54struct tegra_pd;
55struct tegra_pt;
56
57struct tegra_smmu_as {
58 struct iommu_domain domain;
59 struct tegra_smmu *smmu;
60 unsigned int use_count;
61 spinlock_t lock;
62 u32 *count;
63 struct tegra_pt **pts;
64 struct tegra_pd *pd;
65 dma_addr_t pd_dma;
66 unsigned id;
67 u32 attr;
68};
69
70static struct tegra_smmu_as *to_smmu_as(struct iommu_domain *dom)
71{
72 return container_of(dom, struct tegra_smmu_as, domain);
73}
74
75static inline void smmu_writel(struct tegra_smmu *smmu, u32 value,
76 unsigned long offset)
77{
78 writel(value, smmu->regs + offset);
79}
80
81static inline u32 smmu_readl(struct tegra_smmu *smmu, unsigned long offset)
82{
83 return readl(smmu->regs + offset);
84}
85
86#define SMMU_CONFIG 0x010
87#define SMMU_CONFIG_ENABLE (1 << 0)
88
89#define SMMU_TLB_CONFIG 0x14
90#define SMMU_TLB_CONFIG_HIT_UNDER_MISS (1 << 29)
91#define SMMU_TLB_CONFIG_ROUND_ROBIN_ARBITRATION (1 << 28)
92#define SMMU_TLB_CONFIG_ACTIVE_LINES(smmu) \
93 ((smmu)->soc->num_tlb_lines & (smmu)->tlb_mask)
94
95#define SMMU_PTC_CONFIG 0x18
96#define SMMU_PTC_CONFIG_ENABLE (1 << 29)
97#define SMMU_PTC_CONFIG_REQ_LIMIT(x) (((x) & 0x0f) << 24)
98#define SMMU_PTC_CONFIG_INDEX_MAP(x) ((x) & 0x3f)
99
100#define SMMU_PTB_ASID 0x01c
101#define SMMU_PTB_ASID_VALUE(x) ((x) & 0x7f)
102
103#define SMMU_PTB_DATA 0x020
104#define SMMU_PTB_DATA_VALUE(dma, attr) ((dma) >> 12 | (attr))
105
106#define SMMU_MK_PDE(dma, attr) ((dma) >> SMMU_PTE_SHIFT | (attr))
107
108#define SMMU_TLB_FLUSH 0x030
109#define SMMU_TLB_FLUSH_VA_MATCH_ALL (0 << 0)
110#define SMMU_TLB_FLUSH_VA_MATCH_SECTION (2 << 0)
111#define SMMU_TLB_FLUSH_VA_MATCH_GROUP (3 << 0)
112#define SMMU_TLB_FLUSH_VA_SECTION(addr) ((((addr) & 0xffc00000) >> 12) | \
113 SMMU_TLB_FLUSH_VA_MATCH_SECTION)
114#define SMMU_TLB_FLUSH_VA_GROUP(addr) ((((addr) & 0xffffc000) >> 12) | \
115 SMMU_TLB_FLUSH_VA_MATCH_GROUP)
116#define SMMU_TLB_FLUSH_ASID_MATCH (1 << 31)
117
118#define SMMU_PTC_FLUSH 0x034
119#define SMMU_PTC_FLUSH_TYPE_ALL (0 << 0)
120#define SMMU_PTC_FLUSH_TYPE_ADR (1 << 0)
121
122#define SMMU_PTC_FLUSH_HI 0x9b8
123#define SMMU_PTC_FLUSH_HI_MASK 0x3
124
125/* per-SWGROUP SMMU_*_ASID register */
126#define SMMU_ASID_ENABLE (1 << 31)
127#define SMMU_ASID_MASK 0x7f
128#define SMMU_ASID_VALUE(x) ((x) & SMMU_ASID_MASK)
129
130/* page table definitions */
131#define SMMU_NUM_PDE 1024
132#define SMMU_NUM_PTE 1024
133
134#define SMMU_SIZE_PD (SMMU_NUM_PDE * 4)
135#define SMMU_SIZE_PT (SMMU_NUM_PTE * 4)
136
137#define SMMU_PDE_SHIFT 22
138#define SMMU_PTE_SHIFT 12
139
140#define SMMU_PAGE_MASK (~(SMMU_SIZE_PT-1))
141#define SMMU_OFFSET_IN_PAGE(x) ((unsigned long)(x) & ~SMMU_PAGE_MASK)
142#define SMMU_PFN_PHYS(x) ((phys_addr_t)(x) << SMMU_PTE_SHIFT)
143#define SMMU_PHYS_PFN(x) ((unsigned long)((x) >> SMMU_PTE_SHIFT))
144
145#define SMMU_PD_READABLE (1 << 31)
146#define SMMU_PD_WRITABLE (1 << 30)
147#define SMMU_PD_NONSECURE (1 << 29)
148
149#define SMMU_PDE_READABLE (1 << 31)
150#define SMMU_PDE_WRITABLE (1 << 30)
151#define SMMU_PDE_NONSECURE (1 << 29)
152#define SMMU_PDE_NEXT (1 << 28)
153
154#define SMMU_PTE_READABLE (1 << 31)
155#define SMMU_PTE_WRITABLE (1 << 30)
156#define SMMU_PTE_NONSECURE (1 << 29)
157
158#define SMMU_PDE_ATTR (SMMU_PDE_READABLE | SMMU_PDE_WRITABLE | \
159 SMMU_PDE_NONSECURE)
160
161struct tegra_pd {
162 u32 val[SMMU_NUM_PDE];
163};
164
165struct tegra_pt {
166 u32 val[SMMU_NUM_PTE];
167};
168
169static unsigned int iova_pd_index(unsigned long iova)
170{
171 return (iova >> SMMU_PDE_SHIFT) & (SMMU_NUM_PDE - 1);
172}
173
174static unsigned int iova_pt_index(unsigned long iova)
175{
176 return (iova >> SMMU_PTE_SHIFT) & (SMMU_NUM_PTE - 1);
177}
178
179static bool smmu_dma_addr_valid(struct tegra_smmu *smmu, dma_addr_t addr)
180{
181 addr >>= 12;
182 return (addr & smmu->pfn_mask) == addr;
183}
184
185static dma_addr_t smmu_pde_to_dma(struct tegra_smmu *smmu, u32 pde)
186{
187 return (dma_addr_t)(pde & smmu->pfn_mask) << 12;
188}
189
190static void smmu_flush_ptc_all(struct tegra_smmu *smmu)
191{
192 smmu_writel(smmu, SMMU_PTC_FLUSH_TYPE_ALL, SMMU_PTC_FLUSH);
193}
194
195static inline void smmu_flush_ptc(struct tegra_smmu *smmu, dma_addr_t dma,
196 unsigned long offset)
197{
198 u32 value;
199
200 offset &= ~(smmu->mc->soc->atom_size - 1);
201
202 if (smmu->mc->soc->num_address_bits > 32) {
203#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
204 value = (dma >> 32) & SMMU_PTC_FLUSH_HI_MASK;
205#else
206 value = 0;
207#endif
208 smmu_writel(smmu, value, SMMU_PTC_FLUSH_HI);
209 }
210
211 value = (dma + offset) | SMMU_PTC_FLUSH_TYPE_ADR;
212 smmu_writel(smmu, value, SMMU_PTC_FLUSH);
213}
214
215static inline void smmu_flush_tlb(struct tegra_smmu *smmu)
216{
217 smmu_writel(smmu, SMMU_TLB_FLUSH_VA_MATCH_ALL, SMMU_TLB_FLUSH);
218}
219
220static inline void smmu_flush_tlb_asid(struct tegra_smmu *smmu,
221 unsigned long asid)
222{
223 u32 value;
224
225 if (smmu->soc->num_asids == 4)
226 value = (asid & 0x3) << 29;
227 else
228 value = (asid & 0x7f) << 24;
229
230 value |= SMMU_TLB_FLUSH_ASID_MATCH | SMMU_TLB_FLUSH_VA_MATCH_ALL;
231 smmu_writel(smmu, value, SMMU_TLB_FLUSH);
232}
233
234static inline void smmu_flush_tlb_section(struct tegra_smmu *smmu,
235 unsigned long asid,
236 unsigned long iova)
237{
238 u32 value;
239
240 if (smmu->soc->num_asids == 4)
241 value = (asid & 0x3) << 29;
242 else
243 value = (asid & 0x7f) << 24;
244
245 value |= SMMU_TLB_FLUSH_ASID_MATCH | SMMU_TLB_FLUSH_VA_SECTION(iova);
246 smmu_writel(smmu, value, SMMU_TLB_FLUSH);
247}
248
249static inline void smmu_flush_tlb_group(struct tegra_smmu *smmu,
250 unsigned long asid,
251 unsigned long iova)
252{
253 u32 value;
254
255 if (smmu->soc->num_asids == 4)
256 value = (asid & 0x3) << 29;
257 else
258 value = (asid & 0x7f) << 24;
259
260 value |= SMMU_TLB_FLUSH_ASID_MATCH | SMMU_TLB_FLUSH_VA_GROUP(iova);
261 smmu_writel(smmu, value, SMMU_TLB_FLUSH);
262}
263
264static inline void smmu_flush(struct tegra_smmu *smmu)
265{
266 smmu_readl(smmu, SMMU_PTB_ASID);
267}
268
269static int tegra_smmu_alloc_asid(struct tegra_smmu *smmu, unsigned int *idp)
270{
271 unsigned long id;
272
273 id = find_first_zero_bit(smmu->asids, smmu->soc->num_asids);
274 if (id >= smmu->soc->num_asids)
275 return -ENOSPC;
276
277 set_bit(id, smmu->asids);
278 *idp = id;
279
280 return 0;
281}
282
283static void tegra_smmu_free_asid(struct tegra_smmu *smmu, unsigned int id)
284{
285 clear_bit(id, smmu->asids);
286}
287
288static struct iommu_domain *tegra_smmu_domain_alloc_paging(struct device *dev)
289{
290 struct tegra_smmu_as *as;
291
292 as = kzalloc_obj(*as);
293 if (!as)
294 return NULL;
295
296 as->attr = SMMU_PD_READABLE | SMMU_PD_WRITABLE | SMMU_PD_NONSECURE;
297
298 as->pd = iommu_alloc_pages_sz(GFP_KERNEL | __GFP_DMA, SMMU_SIZE_PD);
299 if (!as->pd) {
300 kfree(as);
301 return NULL;
302 }
303
304 as->count = kcalloc(SMMU_NUM_PDE, sizeof(u32), GFP_KERNEL);
305 if (!as->count) {
306 iommu_free_pages(as->pd);
307 kfree(as);
308 return NULL;
309 }
310
311 as->pts = kzalloc_objs(*as->pts, SMMU_NUM_PDE);
312 if (!as->pts) {
313 kfree(as->count);
314 iommu_free_pages(as->pd);
315 kfree(as);
316 return NULL;
317 }
318
319 spin_lock_init(&as->lock);
320
321 as->domain.pgsize_bitmap = SZ_4K;
322
323 /* setup aperture */
324 as->domain.geometry.aperture_start = 0;
325 as->domain.geometry.aperture_end = 0xffffffff;
326 as->domain.geometry.force_aperture = true;
327
328 return &as->domain;
329}
330
331static void tegra_smmu_domain_free(struct iommu_domain *domain)
332{
333 struct tegra_smmu_as *as = to_smmu_as(domain);
334
335 /* TODO: free page directory and page tables */
336
337 WARN_ON_ONCE(as->use_count);
338 kfree(as->count);
339 kfree(as->pts);
340 kfree(as);
341}
342
343static const struct tegra_smmu_swgroup *
344tegra_smmu_find_swgroup(struct tegra_smmu *smmu, unsigned int swgroup)
345{
346 const struct tegra_smmu_swgroup *group = NULL;
347 unsigned int i;
348
349 for (i = 0; i < smmu->soc->num_swgroups; i++) {
350 if (smmu->soc->swgroups[i].swgroup == swgroup) {
351 group = &smmu->soc->swgroups[i];
352 break;
353 }
354 }
355
356 return group;
357}
358
359static void tegra_smmu_enable(struct tegra_smmu *smmu, unsigned int swgroup,
360 unsigned int asid)
361{
362 const struct tegra_smmu_swgroup *group;
363 unsigned int i;
364 u32 value;
365
366 group = tegra_smmu_find_swgroup(smmu, swgroup);
367 if (group) {
368 value = smmu_readl(smmu, group->reg);
369 value &= ~SMMU_ASID_MASK;
370 value |= SMMU_ASID_VALUE(asid);
371 value |= SMMU_ASID_ENABLE;
372 smmu_writel(smmu, value, group->reg);
373 } else {
374 pr_warn("%s group from swgroup %u not found\n", __func__,
375 swgroup);
376 /* No point moving ahead if group was not found */
377 return;
378 }
379
380 for (i = 0; i < smmu->soc->num_clients; i++) {
381 const struct tegra_mc_client *client = &smmu->soc->clients[i];
382
383 if (client->swgroup != swgroup)
384 continue;
385
386 value = smmu_readl(smmu, client->regs.smmu.reg);
387 value |= BIT(client->regs.smmu.bit);
388 smmu_writel(smmu, value, client->regs.smmu.reg);
389 }
390}
391
392static void tegra_smmu_disable(struct tegra_smmu *smmu, unsigned int swgroup,
393 unsigned int asid)
394{
395 const struct tegra_smmu_swgroup *group;
396 unsigned int i;
397 u32 value;
398
399 group = tegra_smmu_find_swgroup(smmu, swgroup);
400 if (group) {
401 value = smmu_readl(smmu, group->reg);
402 value &= ~SMMU_ASID_MASK;
403 value |= SMMU_ASID_VALUE(asid);
404 value &= ~SMMU_ASID_ENABLE;
405 smmu_writel(smmu, value, group->reg);
406 }
407
408 for (i = 0; i < smmu->soc->num_clients; i++) {
409 const struct tegra_mc_client *client = &smmu->soc->clients[i];
410
411 if (client->swgroup != swgroup)
412 continue;
413
414 value = smmu_readl(smmu, client->regs.smmu.reg);
415 value &= ~BIT(client->regs.smmu.bit);
416 smmu_writel(smmu, value, client->regs.smmu.reg);
417 }
418}
419
420static int tegra_smmu_as_prepare(struct tegra_smmu *smmu,
421 struct tegra_smmu_as *as)
422{
423 u32 value;
424 int err = 0;
425
426 mutex_lock(&smmu->lock);
427
428 if (as->use_count > 0) {
429 as->use_count++;
430 goto unlock;
431 }
432
433 as->pd_dma =
434 dma_map_single(smmu->dev, as->pd, SMMU_SIZE_PD, DMA_TO_DEVICE);
435 if (dma_mapping_error(smmu->dev, as->pd_dma)) {
436 err = -ENOMEM;
437 goto unlock;
438 }
439
440 /* We can't handle 64-bit DMA addresses */
441 if (!smmu_dma_addr_valid(smmu, as->pd_dma)) {
442 err = -ENOMEM;
443 goto err_unmap;
444 }
445
446 err = tegra_smmu_alloc_asid(smmu, &as->id);
447 if (err < 0)
448 goto err_unmap;
449
450 smmu_flush_ptc(smmu, as->pd_dma, 0);
451 smmu_flush_tlb_asid(smmu, as->id);
452
453 smmu_writel(smmu, as->id & 0x7f, SMMU_PTB_ASID);
454 value = SMMU_PTB_DATA_VALUE(as->pd_dma, as->attr);
455 smmu_writel(smmu, value, SMMU_PTB_DATA);
456 smmu_flush(smmu);
457
458 as->smmu = smmu;
459 as->use_count++;
460
461 mutex_unlock(&smmu->lock);
462
463 return 0;
464
465err_unmap:
466 dma_unmap_single(smmu->dev, as->pd_dma, SMMU_SIZE_PD, DMA_TO_DEVICE);
467unlock:
468 mutex_unlock(&smmu->lock);
469
470 return err;
471}
472
473static void tegra_smmu_as_unprepare(struct tegra_smmu *smmu,
474 struct tegra_smmu_as *as)
475{
476 mutex_lock(&smmu->lock);
477
478 if (--as->use_count > 0) {
479 mutex_unlock(&smmu->lock);
480 return;
481 }
482
483 tegra_smmu_free_asid(smmu, as->id);
484
485 dma_unmap_single(smmu->dev, as->pd_dma, SMMU_SIZE_PD, DMA_TO_DEVICE);
486
487 as->smmu = NULL;
488
489 mutex_unlock(&smmu->lock);
490}
491
492static int tegra_smmu_attach_dev(struct iommu_domain *domain,
493 struct device *dev, struct iommu_domain *old)
494{
495 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
496 struct tegra_smmu *smmu = dev_iommu_priv_get(dev);
497 struct tegra_smmu_as *as = to_smmu_as(domain);
498 unsigned int index;
499 int err;
500
501 if (!fwspec)
502 return -ENOENT;
503
504 for (index = 0; index < fwspec->num_ids; index++) {
505 err = tegra_smmu_as_prepare(smmu, as);
506 if (err)
507 goto disable;
508
509 tegra_smmu_enable(smmu, fwspec->ids[index], as->id);
510 }
511
512 if (index == 0)
513 return -ENODEV;
514
515 return 0;
516
517disable:
518 while (index--) {
519 tegra_smmu_disable(smmu, fwspec->ids[index], as->id);
520 tegra_smmu_as_unprepare(smmu, as);
521 }
522
523 return err;
524}
525
526static int tegra_smmu_identity_attach(struct iommu_domain *identity_domain,
527 struct device *dev,
528 struct iommu_domain *old)
529{
530 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
531 struct tegra_smmu_as *as;
532 struct tegra_smmu *smmu;
533 unsigned int index;
534
535 if (!fwspec)
536 return -ENODEV;
537
538 if (old == identity_domain || !old)
539 return 0;
540
541 as = to_smmu_as(old);
542 smmu = as->smmu;
543 for (index = 0; index < fwspec->num_ids; index++) {
544 tegra_smmu_disable(smmu, fwspec->ids[index], as->id);
545 tegra_smmu_as_unprepare(smmu, as);
546 }
547 return 0;
548}
549
550static struct iommu_domain_ops tegra_smmu_identity_ops = {
551 .attach_dev = tegra_smmu_identity_attach,
552};
553
554static struct iommu_domain tegra_smmu_identity_domain = {
555 .type = IOMMU_DOMAIN_IDENTITY,
556 .ops = &tegra_smmu_identity_ops,
557};
558
559static void tegra_smmu_set_pde(struct tegra_smmu_as *as, unsigned long iova,
560 u32 value)
561{
562 unsigned int pd_index = iova_pd_index(iova);
563 struct tegra_smmu *smmu = as->smmu;
564 u32 *pd = &as->pd->val[pd_index];
565 unsigned long offset = pd_index * sizeof(*pd);
566
567 /* Set the page directory entry first */
568 *pd = value;
569
570 /* The flush the page directory entry from caches */
571 dma_sync_single_range_for_device(smmu->dev, as->pd_dma, offset,
572 sizeof(*pd), DMA_TO_DEVICE);
573
574 /* And flush the iommu */
575 smmu_flush_ptc(smmu, as->pd_dma, offset);
576 smmu_flush_tlb_section(smmu, as->id, iova);
577 smmu_flush(smmu);
578}
579
580static u32 *tegra_smmu_pte_offset(struct tegra_pt *pt, unsigned long iova)
581{
582 return &pt->val[iova_pt_index(iova)];
583}
584
585static u32 *tegra_smmu_pte_lookup(struct tegra_smmu_as *as, unsigned long iova,
586 dma_addr_t *dmap)
587{
588 unsigned int pd_index = iova_pd_index(iova);
589 struct tegra_smmu *smmu = as->smmu;
590 struct tegra_pt *pt;
591
592 pt = as->pts[pd_index];
593 if (!pt)
594 return NULL;
595
596 *dmap = smmu_pde_to_dma(smmu, as->pd->val[pd_index]);
597
598 return tegra_smmu_pte_offset(pt, iova);
599}
600
601static u32 *as_get_pte(struct tegra_smmu_as *as, dma_addr_t iova,
602 dma_addr_t *dmap, struct tegra_pt *pt)
603{
604 unsigned int pde = iova_pd_index(iova);
605 struct tegra_smmu *smmu = as->smmu;
606
607 if (!as->pts[pde]) {
608 dma_addr_t dma;
609
610 dma = dma_map_single(smmu->dev, pt, SMMU_SIZE_PT,
611 DMA_TO_DEVICE);
612 if (dma_mapping_error(smmu->dev, dma)) {
613 iommu_free_pages(pt);
614 return NULL;
615 }
616
617 if (!smmu_dma_addr_valid(smmu, dma)) {
618 dma_unmap_single(smmu->dev, dma, SMMU_SIZE_PT,
619 DMA_TO_DEVICE);
620 iommu_free_pages(pt);
621 return NULL;
622 }
623
624 as->pts[pde] = pt;
625
626 tegra_smmu_set_pde(as, iova, SMMU_MK_PDE(dma, SMMU_PDE_ATTR |
627 SMMU_PDE_NEXT));
628
629 *dmap = dma;
630 } else {
631 *dmap = smmu_pde_to_dma(smmu, as->pd->val[pde]);
632 }
633
634 return tegra_smmu_pte_offset(as->pts[pde], iova);
635}
636
637static void tegra_smmu_pte_get_use(struct tegra_smmu_as *as, unsigned long iova)
638{
639 unsigned int pd_index = iova_pd_index(iova);
640
641 as->count[pd_index]++;
642}
643
644static void tegra_smmu_pte_put_use(struct tegra_smmu_as *as, unsigned long iova)
645{
646 unsigned int pde = iova_pd_index(iova);
647 struct tegra_pt *pt = as->pts[pde];
648
649 /*
650 * When no entries in this page table are used anymore, return the
651 * memory page to the system.
652 */
653 if (--as->count[pde] == 0) {
654 struct tegra_smmu *smmu = as->smmu;
655 dma_addr_t pte_dma = smmu_pde_to_dma(smmu, as->pd->val[pde]);
656
657 tegra_smmu_set_pde(as, iova, 0);
658
659 dma_unmap_single(smmu->dev, pte_dma, SMMU_SIZE_PT,
660 DMA_TO_DEVICE);
661 iommu_free_pages(pt);
662 as->pts[pde] = NULL;
663 }
664}
665
666static void tegra_smmu_set_pte(struct tegra_smmu_as *as, unsigned long iova,
667 u32 *pte, dma_addr_t pte_dma, u32 val)
668{
669 struct tegra_smmu *smmu = as->smmu;
670 unsigned long offset = SMMU_OFFSET_IN_PAGE(pte);
671
672 *pte = val;
673
674 dma_sync_single_range_for_device(smmu->dev, pte_dma, offset,
675 4, DMA_TO_DEVICE);
676 smmu_flush_ptc(smmu, pte_dma, offset);
677 smmu_flush_tlb_group(smmu, as->id, iova);
678 smmu_flush(smmu);
679}
680
681static struct tegra_pt *as_get_pde_page(struct tegra_smmu_as *as,
682 unsigned long iova, gfp_t gfp,
683 unsigned long *flags)
684{
685 unsigned int pde = iova_pd_index(iova);
686 struct tegra_pt *pt = as->pts[pde];
687
688 /* at first check whether allocation needs to be done at all */
689 if (pt)
690 return pt;
691
692 /*
693 * In order to prevent exhaustion of the atomic memory pool, we
694 * allocate page in a sleeping context if GFP flags permit. Hence
695 * spinlock needs to be unlocked and re-locked after allocation.
696 */
697 if (gfpflags_allow_blocking(gfp))
698 spin_unlock_irqrestore(&as->lock, *flags);
699
700 pt = iommu_alloc_pages_sz(gfp | __GFP_DMA, SMMU_SIZE_PT);
701
702 if (gfpflags_allow_blocking(gfp))
703 spin_lock_irqsave(&as->lock, *flags);
704
705 /*
706 * In a case of blocking allocation, a concurrent mapping may win
707 * the PDE allocation. In this case the allocated page isn't needed
708 * if allocation succeeded and the allocation failure isn't fatal.
709 */
710 if (as->pts[pde]) {
711 if (pt)
712 iommu_free_pages(pt);
713
714 pt = as->pts[pde];
715 }
716
717 return pt;
718}
719
720static int
721__tegra_smmu_map(struct iommu_domain *domain, unsigned long iova,
722 phys_addr_t paddr, size_t size, int prot, gfp_t gfp,
723 unsigned long *flags)
724{
725 struct tegra_smmu_as *as = to_smmu_as(domain);
726 dma_addr_t pte_dma;
727 struct tegra_pt *pt;
728 u32 pte_attrs;
729 u32 *pte;
730
731 pt = as_get_pde_page(as, iova, gfp, flags);
732 if (!pt)
733 return -ENOMEM;
734
735 pte = as_get_pte(as, iova, &pte_dma, pt);
736 if (!pte)
737 return -ENOMEM;
738
739 /* If we aren't overwriting a pre-existing entry, increment use */
740 if (*pte == 0)
741 tegra_smmu_pte_get_use(as, iova);
742
743 pte_attrs = SMMU_PTE_NONSECURE;
744
745 if (prot & IOMMU_READ)
746 pte_attrs |= SMMU_PTE_READABLE;
747
748 if (prot & IOMMU_WRITE)
749 pte_attrs |= SMMU_PTE_WRITABLE;
750
751 tegra_smmu_set_pte(as, iova, pte, pte_dma,
752 SMMU_PHYS_PFN(paddr) | pte_attrs);
753
754 return 0;
755}
756
757static size_t
758__tegra_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
759 size_t size, struct iommu_iotlb_gather *gather)
760{
761 struct tegra_smmu_as *as = to_smmu_as(domain);
762 dma_addr_t pte_dma;
763 u32 *pte;
764
765 pte = tegra_smmu_pte_lookup(as, iova, &pte_dma);
766 if (!pte || !*pte)
767 return 0;
768
769 tegra_smmu_set_pte(as, iova, pte, pte_dma, 0);
770 tegra_smmu_pte_put_use(as, iova);
771
772 return size;
773}
774
775static int tegra_smmu_map(struct iommu_domain *domain, unsigned long iova,
776 phys_addr_t paddr, size_t size, size_t count,
777 int prot, gfp_t gfp, size_t *mapped)
778{
779 struct tegra_smmu_as *as = to_smmu_as(domain);
780 unsigned long flags;
781 int ret;
782
783 spin_lock_irqsave(&as->lock, flags);
784 ret = __tegra_smmu_map(domain, iova, paddr, size, prot, gfp, &flags);
785 spin_unlock_irqrestore(&as->lock, flags);
786
787 if (!ret)
788 *mapped = size;
789
790 return ret;
791}
792
793static size_t tegra_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
794 size_t size, size_t count, struct iommu_iotlb_gather *gather)
795{
796 struct tegra_smmu_as *as = to_smmu_as(domain);
797 unsigned long flags;
798
799 spin_lock_irqsave(&as->lock, flags);
800 size = __tegra_smmu_unmap(domain, iova, size, gather);
801 spin_unlock_irqrestore(&as->lock, flags);
802
803 return size;
804}
805
806static phys_addr_t tegra_smmu_iova_to_phys(struct iommu_domain *domain,
807 dma_addr_t iova)
808{
809 struct tegra_smmu_as *as = to_smmu_as(domain);
810 unsigned long pfn;
811 dma_addr_t pte_dma;
812 u32 *pte;
813
814 pte = tegra_smmu_pte_lookup(as, iova, &pte_dma);
815 if (!pte || !*pte)
816 return 0;
817
818 pfn = *pte & as->smmu->pfn_mask;
819
820 return SMMU_PFN_PHYS(pfn) + SMMU_OFFSET_IN_PAGE(iova);
821}
822
823static struct tegra_smmu *tegra_smmu_find(struct device_node *np)
824{
825 struct platform_device *pdev;
826 struct tegra_mc *mc;
827
828 pdev = of_find_device_by_node(np);
829 if (!pdev)
830 return NULL;
831
832 mc = platform_get_drvdata(pdev);
833 put_device(&pdev->dev);
834 if (!mc)
835 return NULL;
836
837 return mc->smmu;
838}
839
840static int tegra_smmu_configure(struct tegra_smmu *smmu, struct device *dev,
841 const struct of_phandle_args *args)
842{
843 const struct iommu_ops *ops = smmu->iommu.ops;
844 int err;
845
846 err = iommu_fwspec_init(dev, dev_fwnode(smmu->dev));
847 if (err < 0) {
848 dev_err(dev, "failed to initialize fwspec: %d\n", err);
849 return err;
850 }
851
852 err = ops->of_xlate(dev, args);
853 if (err < 0) {
854 dev_err(dev, "failed to parse SW group ID: %d\n", err);
855 return err;
856 }
857
858 return 0;
859}
860
861static struct iommu_device *tegra_smmu_probe_device(struct device *dev)
862{
863 struct device_node *np = dev->of_node;
864 struct tegra_smmu *smmu = NULL;
865 struct of_phandle_args args;
866 unsigned int index = 0;
867 int err;
868
869 while (of_parse_phandle_with_args(np, "iommus", "#iommu-cells", index,
870 &args) == 0) {
871 smmu = tegra_smmu_find(args.np);
872 if (smmu) {
873 err = tegra_smmu_configure(smmu, dev, &args);
874
875 if (err < 0) {
876 of_node_put(args.np);
877 return ERR_PTR(err);
878 }
879 }
880
881 of_node_put(args.np);
882 index++;
883 }
884
885 smmu = dev_iommu_priv_get(dev);
886 if (!smmu)
887 return ERR_PTR(-ENODEV);
888
889 return &smmu->iommu;
890}
891
892static const struct tegra_smmu_group_soc *
893tegra_smmu_find_group(struct tegra_smmu *smmu, unsigned int swgroup)
894{
895 unsigned int i, j;
896
897 for (i = 0; i < smmu->soc->num_groups; i++)
898 for (j = 0; j < smmu->soc->groups[i].num_swgroups; j++)
899 if (smmu->soc->groups[i].swgroups[j] == swgroup)
900 return &smmu->soc->groups[i];
901
902 return NULL;
903}
904
905static void tegra_smmu_group_release(void *iommu_data)
906{
907 struct tegra_smmu_group *group = iommu_data;
908 struct tegra_smmu *smmu = group->smmu;
909
910 mutex_lock(&smmu->lock);
911 list_del(&group->list);
912 mutex_unlock(&smmu->lock);
913}
914
915static struct iommu_group *tegra_smmu_device_group(struct device *dev)
916{
917 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
918 struct tegra_smmu *smmu = dev_iommu_priv_get(dev);
919 const struct tegra_smmu_group_soc *soc;
920 unsigned int swgroup = fwspec->ids[0];
921 struct tegra_smmu_group *group;
922 struct iommu_group *grp;
923
924 /* Find group_soc associating with swgroup */
925 soc = tegra_smmu_find_group(smmu, swgroup);
926
927 mutex_lock(&smmu->lock);
928
929 /* Find existing iommu_group associating with swgroup or group_soc */
930 list_for_each_entry(group, &smmu->groups, list)
931 if ((group->swgroup == swgroup) || (soc && group->soc == soc)) {
932 grp = iommu_group_ref_get(group->group);
933 mutex_unlock(&smmu->lock);
934 return grp;
935 }
936
937 group = devm_kzalloc(smmu->dev, sizeof(*group), GFP_KERNEL);
938 if (!group) {
939 mutex_unlock(&smmu->lock);
940 return NULL;
941 }
942
943 INIT_LIST_HEAD(&group->list);
944 group->swgroup = swgroup;
945 group->smmu = smmu;
946 group->soc = soc;
947
948 if (dev_is_pci(dev))
949 group->group = pci_device_group(dev);
950 else
951 group->group = generic_device_group(dev);
952
953 if (IS_ERR(group->group)) {
954 devm_kfree(smmu->dev, group);
955 mutex_unlock(&smmu->lock);
956 return NULL;
957 }
958
959 iommu_group_set_iommudata(group->group, group, tegra_smmu_group_release);
960 if (soc)
961 iommu_group_set_name(group->group, soc->name);
962 list_add_tail(&group->list, &smmu->groups);
963 mutex_unlock(&smmu->lock);
964
965 return group->group;
966}
967
968static int tegra_smmu_of_xlate(struct device *dev,
969 const struct of_phandle_args *args)
970{
971 struct platform_device *iommu_pdev = of_find_device_by_node(args->np);
972 struct tegra_mc *mc = platform_get_drvdata(iommu_pdev);
973 u32 id = args->args[0];
974
975 /*
976 * Note: we are here releasing the reference of &iommu_pdev->dev, which
977 * is mc->dev. Although some functions in tegra_smmu_ops may keep using
978 * its private data beyond this point, it's still safe to do so because
979 * the SMMU parent device is the same as the MC, so the reference count
980 * isn't strictly necessary.
981 */
982 put_device(&iommu_pdev->dev);
983
984 dev_iommu_priv_set(dev, mc->smmu);
985
986 return iommu_fwspec_add_ids(dev, &id, 1);
987}
988
989static int tegra_smmu_def_domain_type(struct device *dev)
990{
991 /*
992 * FIXME: For now we want to run all translation in IDENTITY mode, due
993 * to some device quirks. Better would be to just quirk the troubled
994 * devices.
995 */
996 return IOMMU_DOMAIN_IDENTITY;
997}
998
999static const struct iommu_ops tegra_smmu_ops = {
1000 .identity_domain = &tegra_smmu_identity_domain,
1001 .def_domain_type = &tegra_smmu_def_domain_type,
1002 .domain_alloc_paging = tegra_smmu_domain_alloc_paging,
1003 .probe_device = tegra_smmu_probe_device,
1004 .device_group = tegra_smmu_device_group,
1005 .of_xlate = tegra_smmu_of_xlate,
1006 .default_domain_ops = &(const struct iommu_domain_ops) {
1007 .attach_dev = tegra_smmu_attach_dev,
1008 .map_pages = tegra_smmu_map,
1009 .unmap_pages = tegra_smmu_unmap,
1010 .iova_to_phys = tegra_smmu_iova_to_phys,
1011 .free = tegra_smmu_domain_free,
1012 }
1013};
1014
1015static void tegra_smmu_ahb_enable(void)
1016{
1017 static const struct of_device_id ahb_match[] = {
1018 { .compatible = "nvidia,tegra30-ahb", },
1019 { }
1020 };
1021 struct device_node *ahb;
1022
1023 ahb = of_find_matching_node(NULL, ahb_match);
1024 if (ahb) {
1025 tegra_ahb_enable_smmu(ahb);
1026 of_node_put(ahb);
1027 }
1028}
1029
1030static int tegra_smmu_swgroups_show(struct seq_file *s, void *data)
1031{
1032 struct tegra_smmu *smmu = s->private;
1033 unsigned int i;
1034 u32 value;
1035
1036 seq_printf(s, "swgroup enabled ASID\n");
1037 seq_printf(s, "------------------------\n");
1038
1039 for (i = 0; i < smmu->soc->num_swgroups; i++) {
1040 const struct tegra_smmu_swgroup *group = &smmu->soc->swgroups[i];
1041 const char *status;
1042 unsigned int asid;
1043
1044 value = smmu_readl(smmu, group->reg);
1045
1046 if (value & SMMU_ASID_ENABLE)
1047 status = "yes";
1048 else
1049 status = "no";
1050
1051 asid = value & SMMU_ASID_MASK;
1052
1053 seq_printf(s, "%-9s %-7s %#04x\n", group->name, status,
1054 asid);
1055 }
1056
1057 return 0;
1058}
1059
1060DEFINE_SHOW_ATTRIBUTE(tegra_smmu_swgroups);
1061
1062static int tegra_smmu_clients_show(struct seq_file *s, void *data)
1063{
1064 struct tegra_smmu *smmu = s->private;
1065 unsigned int i;
1066 u32 value;
1067
1068 seq_printf(s, "client enabled\n");
1069 seq_printf(s, "--------------------\n");
1070
1071 for (i = 0; i < smmu->soc->num_clients; i++) {
1072 const struct tegra_mc_client *client = &smmu->soc->clients[i];
1073 const char *status;
1074
1075 value = smmu_readl(smmu, client->regs.smmu.reg);
1076
1077 if (value & BIT(client->regs.smmu.bit))
1078 status = "yes";
1079 else
1080 status = "no";
1081
1082 seq_printf(s, "%-12s %s\n", client->name, status);
1083 }
1084
1085 return 0;
1086}
1087
1088DEFINE_SHOW_ATTRIBUTE(tegra_smmu_clients);
1089
1090static void tegra_smmu_debugfs_init(struct tegra_smmu *smmu)
1091{
1092 smmu->debugfs = debugfs_create_dir("smmu", NULL);
1093
1094 debugfs_create_file("swgroups", S_IRUGO, smmu->debugfs, smmu,
1095 &tegra_smmu_swgroups_fops);
1096 debugfs_create_file("clients", S_IRUGO, smmu->debugfs, smmu,
1097 &tegra_smmu_clients_fops);
1098}
1099
1100static void tegra_smmu_debugfs_exit(struct tegra_smmu *smmu)
1101{
1102 debugfs_remove_recursive(smmu->debugfs);
1103}
1104
1105struct tegra_smmu *tegra_smmu_probe(struct device *dev,
1106 const struct tegra_smmu_soc *soc,
1107 struct tegra_mc *mc)
1108{
1109 struct tegra_smmu *smmu;
1110 u32 value;
1111 int err;
1112
1113 smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
1114 if (!smmu)
1115 return ERR_PTR(-ENOMEM);
1116
1117 /*
1118 * This is a bit of a hack. Ideally we'd want to simply return this
1119 * value. However iommu_device_register() will attempt to add
1120 * all devices to the IOMMU before we get that far. In order
1121 * not to rely on global variables to track the IOMMU instance, we
1122 * set it here so that it can be looked up from the .probe_device()
1123 * callback via the IOMMU device's .drvdata field.
1124 */
1125 mc->smmu = smmu;
1126
1127 smmu->asids = devm_bitmap_zalloc(dev, soc->num_asids, GFP_KERNEL);
1128 if (!smmu->asids)
1129 return ERR_PTR(-ENOMEM);
1130
1131 INIT_LIST_HEAD(&smmu->groups);
1132 mutex_init(&smmu->lock);
1133
1134 smmu->regs = mc->regs;
1135 smmu->soc = soc;
1136 smmu->dev = dev;
1137 smmu->mc = mc;
1138
1139 smmu->pfn_mask =
1140 BIT_MASK(mc->soc->num_address_bits - SMMU_PTE_SHIFT) - 1;
1141 dev_dbg(dev, "address bits: %u, PFN mask: %#lx\n",
1142 mc->soc->num_address_bits, smmu->pfn_mask);
1143 smmu->tlb_mask = (1 << fls(smmu->soc->num_tlb_lines)) - 1;
1144 dev_dbg(dev, "TLB lines: %u, mask: %#lx\n", smmu->soc->num_tlb_lines,
1145 smmu->tlb_mask);
1146
1147 value = SMMU_PTC_CONFIG_ENABLE | SMMU_PTC_CONFIG_INDEX_MAP(0x3f);
1148
1149 if (soc->supports_request_limit)
1150 value |= SMMU_PTC_CONFIG_REQ_LIMIT(8);
1151
1152 smmu_writel(smmu, value, SMMU_PTC_CONFIG);
1153
1154 value = SMMU_TLB_CONFIG_HIT_UNDER_MISS |
1155 SMMU_TLB_CONFIG_ACTIVE_LINES(smmu);
1156
1157 if (soc->supports_round_robin_arbitration)
1158 value |= SMMU_TLB_CONFIG_ROUND_ROBIN_ARBITRATION;
1159
1160 smmu_writel(smmu, value, SMMU_TLB_CONFIG);
1161
1162 smmu_flush_ptc_all(smmu);
1163 smmu_flush_tlb(smmu);
1164 smmu_writel(smmu, SMMU_CONFIG_ENABLE, SMMU_CONFIG);
1165 smmu_flush(smmu);
1166
1167 tegra_smmu_ahb_enable();
1168
1169 err = iommu_device_sysfs_add(&smmu->iommu, dev, NULL, dev_name(dev));
1170 if (err)
1171 return ERR_PTR(err);
1172
1173 err = iommu_device_register(&smmu->iommu, &tegra_smmu_ops, dev);
1174 if (err) {
1175 iommu_device_sysfs_remove(&smmu->iommu);
1176 return ERR_PTR(err);
1177 }
1178
1179 if (IS_ENABLED(CONFIG_DEBUG_FS))
1180 tegra_smmu_debugfs_init(smmu);
1181
1182 return smmu;
1183}
1184
1185void tegra_smmu_remove(struct tegra_smmu *smmu)
1186{
1187 iommu_device_unregister(&smmu->iommu);
1188 iommu_device_sysfs_remove(&smmu->iommu);
1189
1190 if (IS_ENABLED(CONFIG_DEBUG_FS))
1191 tegra_smmu_debugfs_exit(smmu);
1192}