Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge branch 'add-second-qdma-support-for-en7581-eth-controller'

Lorenzo Bianconi says:

====================
Add second QDMA support for EN7581 eth controller

EN7581 SoC supports two independent QDMA controllers to connect the
Ethernet Frame Engine (FE) to the CPU. Introduce support for the second
QDMA controller. This is a preliminary series to support multiple FE ports
(e.g. connected to a second PHY controller).
====================

Link: https://patch.msgid.link/cover.1722522582.git.lorenzo@kernel.org
Signed-off-by: Jakub Kicinski <kuba@kernel.org>

+264 -226
+264 -226
drivers/net/ethernet/mediatek/airoha_eth.c
··· 18 18 #include <uapi/linux/ppp_defs.h> 19 19 20 20 #define AIROHA_MAX_NUM_GDM_PORTS 1 21 + #define AIROHA_MAX_NUM_QDMA 2 21 22 #define AIROHA_MAX_NUM_RSTS 3 22 23 #define AIROHA_MAX_NUM_XSI_RSTS 5 23 24 #define AIROHA_MAX_MTU 2000 ··· 728 727 }; 729 728 730 729 struct airoha_queue { 731 - struct airoha_eth *eth; 730 + struct airoha_qdma *qdma; 732 731 733 732 /* protect concurrent queue accesses */ 734 733 spinlock_t lock; ··· 747 746 }; 748 747 749 748 struct airoha_tx_irq_queue { 750 - struct airoha_eth *eth; 749 + struct airoha_qdma *qdma; 751 750 752 751 struct napi_struct napi; 753 752 u32 *q; ··· 783 782 u64 rx_len[7]; 784 783 }; 785 784 786 - struct airoha_gdm_port { 787 - struct net_device *dev; 785 + struct airoha_qdma { 788 786 struct airoha_eth *eth; 787 + void __iomem *regs; 788 + 789 + /* protect concurrent irqmask accesses */ 790 + spinlock_t irq_lock; 791 + u32 irqmask[QDMA_INT_REG_MAX]; 792 + int irq; 793 + 794 + struct airoha_tx_irq_queue q_tx_irq[AIROHA_NUM_TX_IRQ]; 795 + 796 + struct airoha_queue q_tx[AIROHA_NUM_TX_RING]; 797 + struct airoha_queue q_rx[AIROHA_NUM_RX_RING]; 798 + 799 + /* descriptor and packet buffers for qdma hw forward */ 800 + struct { 801 + void *desc; 802 + void *q; 803 + } hfwd; 804 + }; 805 + 806 + struct airoha_gdm_port { 807 + struct airoha_qdma *qdma; 808 + struct net_device *dev; 789 809 int id; 790 810 791 811 struct airoha_hw_stats stats; ··· 816 794 struct device *dev; 817 795 818 796 unsigned long state; 819 - 820 - void __iomem *qdma_regs; 821 797 void __iomem *fe_regs; 822 - 823 - /* protect concurrent irqmask accesses */ 824 - spinlock_t irq_lock; 825 - u32 irqmask[QDMA_INT_REG_MAX]; 826 - int irq; 827 798 828 799 struct reset_control_bulk_data rsts[AIROHA_MAX_NUM_RSTS]; 829 800 struct reset_control_bulk_data xsi_rsts[AIROHA_MAX_NUM_XSI_RSTS]; 830 801 831 - struct airoha_gdm_port *ports[AIROHA_MAX_NUM_GDM_PORTS]; 832 - 833 802 struct net_device *napi_dev; 834 - struct airoha_queue q_tx[AIROHA_NUM_TX_RING]; 835 - struct airoha_queue q_rx[AIROHA_NUM_RX_RING]; 836 803 837 - struct airoha_tx_irq_queue q_tx_irq[AIROHA_NUM_TX_IRQ]; 838 - 839 - /* descriptor and packet buffers for qdma hw forward */ 840 - struct { 841 - void *desc; 842 - void *q; 843 - } hfwd; 804 + struct airoha_qdma qdma[AIROHA_MAX_NUM_QDMA]; 805 + struct airoha_gdm_port *ports[AIROHA_MAX_NUM_GDM_PORTS]; 844 806 }; 845 807 846 808 static u32 airoha_rr(void __iomem *base, u32 offset) ··· 856 850 #define airoha_fe_clear(eth, offset, val) \ 857 851 airoha_rmw((eth)->fe_regs, (offset), (val), 0) 858 852 859 - #define airoha_qdma_rr(eth, offset) \ 860 - airoha_rr((eth)->qdma_regs, (offset)) 861 - #define airoha_qdma_wr(eth, offset, val) \ 862 - airoha_wr((eth)->qdma_regs, (offset), (val)) 863 - #define airoha_qdma_rmw(eth, offset, mask, val) \ 864 - airoha_rmw((eth)->qdma_regs, (offset), (mask), (val)) 865 - #define airoha_qdma_set(eth, offset, val) \ 866 - airoha_rmw((eth)->qdma_regs, (offset), 0, (val)) 867 - #define airoha_qdma_clear(eth, offset, val) \ 868 - airoha_rmw((eth)->qdma_regs, (offset), (val), 0) 853 + #define airoha_qdma_rr(qdma, offset) \ 854 + airoha_rr((qdma)->regs, (offset)) 855 + #define airoha_qdma_wr(qdma, offset, val) \ 856 + airoha_wr((qdma)->regs, (offset), (val)) 857 + #define airoha_qdma_rmw(qdma, offset, mask, val) \ 858 + airoha_rmw((qdma)->regs, (offset), (mask), (val)) 859 + #define airoha_qdma_set(qdma, offset, val) \ 860 + airoha_rmw((qdma)->regs, (offset), 0, (val)) 861 + #define airoha_qdma_clear(qdma, offset, val) \ 862 + airoha_rmw((qdma)->regs, (offset), (val), 0) 869 863 870 - static void airoha_qdma_set_irqmask(struct airoha_eth *eth, int index, 864 + static void airoha_qdma_set_irqmask(struct airoha_qdma *qdma, int index, 871 865 u32 clear, u32 set) 872 866 { 873 867 unsigned long flags; 874 868 875 - if (WARN_ON_ONCE(index >= ARRAY_SIZE(eth->irqmask))) 869 + if (WARN_ON_ONCE(index >= ARRAY_SIZE(qdma->irqmask))) 876 870 return; 877 871 878 - spin_lock_irqsave(&eth->irq_lock, flags); 872 + spin_lock_irqsave(&qdma->irq_lock, flags); 879 873 880 - eth->irqmask[index] &= ~clear; 881 - eth->irqmask[index] |= set; 882 - airoha_qdma_wr(eth, REG_INT_ENABLE(index), eth->irqmask[index]); 874 + qdma->irqmask[index] &= ~clear; 875 + qdma->irqmask[index] |= set; 876 + airoha_qdma_wr(qdma, REG_INT_ENABLE(index), qdma->irqmask[index]); 883 877 /* Read irq_enable register in order to guarantee the update above 884 878 * completes in the spinlock critical section. 885 879 */ 886 - airoha_qdma_rr(eth, REG_INT_ENABLE(index)); 880 + airoha_qdma_rr(qdma, REG_INT_ENABLE(index)); 887 881 888 - spin_unlock_irqrestore(&eth->irq_lock, flags); 882 + spin_unlock_irqrestore(&qdma->irq_lock, flags); 889 883 } 890 884 891 - static void airoha_qdma_irq_enable(struct airoha_eth *eth, int index, 885 + static void airoha_qdma_irq_enable(struct airoha_qdma *qdma, int index, 892 886 u32 mask) 893 887 { 894 - airoha_qdma_set_irqmask(eth, index, 0, mask); 888 + airoha_qdma_set_irqmask(qdma, index, 0, mask); 895 889 } 896 890 897 - static void airoha_qdma_irq_disable(struct airoha_eth *eth, int index, 891 + static void airoha_qdma_irq_disable(struct airoha_qdma *qdma, int index, 898 892 u32 mask) 899 893 { 900 - airoha_qdma_set_irqmask(eth, index, mask, 0); 894 + airoha_qdma_set_irqmask(qdma, index, mask, 0); 901 895 } 902 896 903 897 static void airoha_set_macaddr(struct airoha_eth *eth, const u8 *addr) ··· 1389 1383 static int airoha_qdma_fill_rx_queue(struct airoha_queue *q) 1390 1384 { 1391 1385 enum dma_data_direction dir = page_pool_get_dma_dir(q->page_pool); 1392 - struct airoha_eth *eth = q->eth; 1393 - int qid = q - &eth->q_rx[0]; 1386 + struct airoha_qdma *qdma = q->qdma; 1387 + struct airoha_eth *eth = qdma->eth; 1388 + int qid = q - &qdma->q_rx[0]; 1394 1389 int nframes = 0; 1395 1390 1396 1391 while (q->queued < q->ndesc - 1) { ··· 1427 1420 WRITE_ONCE(desc->msg2, 0); 1428 1421 WRITE_ONCE(desc->msg3, 0); 1429 1422 1430 - airoha_qdma_rmw(eth, REG_RX_CPU_IDX(qid), RX_RING_CPU_IDX_MASK, 1423 + airoha_qdma_rmw(qdma, REG_RX_CPU_IDX(qid), 1424 + RX_RING_CPU_IDX_MASK, 1431 1425 FIELD_PREP(RX_RING_CPU_IDX_MASK, q->head)); 1432 1426 } 1433 1427 ··· 1458 1450 static int airoha_qdma_rx_process(struct airoha_queue *q, int budget) 1459 1451 { 1460 1452 enum dma_data_direction dir = page_pool_get_dma_dir(q->page_pool); 1461 - struct airoha_eth *eth = q->eth; 1462 - int qid = q - &eth->q_rx[0]; 1453 + struct airoha_qdma *qdma = q->qdma; 1454 + struct airoha_eth *eth = qdma->eth; 1455 + int qid = q - &qdma->q_rx[0]; 1463 1456 int done = 0; 1464 1457 1465 1458 while (done < budget) { ··· 1522 1513 static int airoha_qdma_rx_napi_poll(struct napi_struct *napi, int budget) 1523 1514 { 1524 1515 struct airoha_queue *q = container_of(napi, struct airoha_queue, napi); 1525 - struct airoha_eth *eth = q->eth; 1526 1516 int cur, done = 0; 1527 1517 1528 1518 do { ··· 1530 1522 } while (cur && done < budget); 1531 1523 1532 1524 if (done < budget && napi_complete(napi)) 1533 - airoha_qdma_irq_enable(eth, QDMA_INT_REG_IDX1, 1525 + airoha_qdma_irq_enable(q->qdma, QDMA_INT_REG_IDX1, 1534 1526 RX_DONE_INT_MASK); 1535 1527 1536 1528 return done; 1537 1529 } 1538 1530 1539 - static int airoha_qdma_init_rx_queue(struct airoha_eth *eth, 1540 - struct airoha_queue *q, int ndesc) 1531 + static int airoha_qdma_init_rx_queue(struct airoha_queue *q, 1532 + struct airoha_qdma *qdma, int ndesc) 1541 1533 { 1542 1534 const struct page_pool_params pp_params = { 1543 1535 .order = 0, ··· 1546 1538 .dma_dir = DMA_FROM_DEVICE, 1547 1539 .max_len = PAGE_SIZE, 1548 1540 .nid = NUMA_NO_NODE, 1549 - .dev = eth->dev, 1541 + .dev = qdma->eth->dev, 1550 1542 .napi = &q->napi, 1551 1543 }; 1552 - int qid = q - &eth->q_rx[0], thr; 1544 + struct airoha_eth *eth = qdma->eth; 1545 + int qid = q - &qdma->q_rx[0], thr; 1553 1546 dma_addr_t dma_addr; 1554 1547 1555 1548 q->buf_size = PAGE_SIZE / 2; 1556 1549 q->ndesc = ndesc; 1557 - q->eth = eth; 1550 + q->qdma = qdma; 1558 1551 1559 1552 q->entry = devm_kzalloc(eth->dev, q->ndesc * sizeof(*q->entry), 1560 1553 GFP_KERNEL); ··· 1577 1568 1578 1569 netif_napi_add(eth->napi_dev, &q->napi, airoha_qdma_rx_napi_poll); 1579 1570 1580 - airoha_qdma_wr(eth, REG_RX_RING_BASE(qid), dma_addr); 1581 - airoha_qdma_rmw(eth, REG_RX_RING_SIZE(qid), RX_RING_SIZE_MASK, 1571 + airoha_qdma_wr(qdma, REG_RX_RING_BASE(qid), dma_addr); 1572 + airoha_qdma_rmw(qdma, REG_RX_RING_SIZE(qid), 1573 + RX_RING_SIZE_MASK, 1582 1574 FIELD_PREP(RX_RING_SIZE_MASK, ndesc)); 1583 1575 1584 1576 thr = clamp(ndesc >> 3, 1, 32); 1585 - airoha_qdma_rmw(eth, REG_RX_RING_SIZE(qid), RX_RING_THR_MASK, 1577 + airoha_qdma_rmw(qdma, REG_RX_RING_SIZE(qid), RX_RING_THR_MASK, 1586 1578 FIELD_PREP(RX_RING_THR_MASK, thr)); 1587 - airoha_qdma_rmw(eth, REG_RX_DMA_IDX(qid), RX_RING_DMA_IDX_MASK, 1579 + airoha_qdma_rmw(qdma, REG_RX_DMA_IDX(qid), RX_RING_DMA_IDX_MASK, 1588 1580 FIELD_PREP(RX_RING_DMA_IDX_MASK, q->head)); 1589 1581 1590 1582 airoha_qdma_fill_rx_queue(q); ··· 1595 1585 1596 1586 static void airoha_qdma_cleanup_rx_queue(struct airoha_queue *q) 1597 1587 { 1598 - struct airoha_eth *eth = q->eth; 1588 + struct airoha_eth *eth = q->qdma->eth; 1599 1589 1600 1590 while (q->queued) { 1601 1591 struct airoha_queue_entry *e = &q->entry[q->tail]; ··· 1609 1599 } 1610 1600 } 1611 1601 1612 - static int airoha_qdma_init_rx(struct airoha_eth *eth) 1602 + static int airoha_qdma_init_rx(struct airoha_qdma *qdma) 1613 1603 { 1614 1604 int i; 1615 1605 1616 - for (i = 0; i < ARRAY_SIZE(eth->q_rx); i++) { 1606 + for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) { 1617 1607 int err; 1618 1608 1619 1609 if (!(RX_DONE_INT_MASK & BIT(i))) { ··· 1621 1611 continue; 1622 1612 } 1623 1613 1624 - err = airoha_qdma_init_rx_queue(eth, &eth->q_rx[i], 1614 + err = airoha_qdma_init_rx_queue(&qdma->q_rx[i], qdma, 1625 1615 RX_DSCP_NUM(i)); 1626 1616 if (err) 1627 1617 return err; ··· 1633 1623 static int airoha_qdma_tx_napi_poll(struct napi_struct *napi, int budget) 1634 1624 { 1635 1625 struct airoha_tx_irq_queue *irq_q; 1626 + struct airoha_qdma *qdma; 1636 1627 struct airoha_eth *eth; 1637 1628 int id, done = 0; 1638 1629 1639 1630 irq_q = container_of(napi, struct airoha_tx_irq_queue, napi); 1640 - eth = irq_q->eth; 1641 - id = irq_q - &eth->q_tx_irq[0]; 1631 + qdma = irq_q->qdma; 1632 + id = irq_q - &qdma->q_tx_irq[0]; 1633 + eth = qdma->eth; 1642 1634 1643 1635 while (irq_q->queued > 0 && done < budget) { 1644 1636 u32 qid, last, val = irq_q->q[irq_q->head]; ··· 1657 1645 last = FIELD_GET(IRQ_DESC_IDX_MASK, val); 1658 1646 qid = FIELD_GET(IRQ_RING_IDX_MASK, val); 1659 1647 1660 - if (qid >= ARRAY_SIZE(eth->q_tx)) 1648 + if (qid >= ARRAY_SIZE(qdma->q_tx)) 1661 1649 continue; 1662 1650 1663 - q = &eth->q_tx[qid]; 1651 + q = &qdma->q_tx[qid]; 1664 1652 if (!q->ndesc) 1665 1653 continue; 1666 1654 ··· 1709 1697 int i, len = done >> 7; 1710 1698 1711 1699 for (i = 0; i < len; i++) 1712 - airoha_qdma_rmw(eth, REG_IRQ_CLEAR_LEN(id), 1700 + airoha_qdma_rmw(qdma, REG_IRQ_CLEAR_LEN(id), 1713 1701 IRQ_CLEAR_LEN_MASK, 0x80); 1714 - airoha_qdma_rmw(eth, REG_IRQ_CLEAR_LEN(id), 1702 + airoha_qdma_rmw(qdma, REG_IRQ_CLEAR_LEN(id), 1715 1703 IRQ_CLEAR_LEN_MASK, (done & 0x7f)); 1716 1704 } 1717 1705 1718 1706 if (done < budget && napi_complete(napi)) 1719 - airoha_qdma_irq_enable(eth, QDMA_INT_REG_IDX0, 1707 + airoha_qdma_irq_enable(qdma, QDMA_INT_REG_IDX0, 1720 1708 TX_DONE_INT_MASK(id)); 1721 1709 1722 1710 return done; 1723 1711 } 1724 1712 1725 - static int airoha_qdma_init_tx_queue(struct airoha_eth *eth, 1726 - struct airoha_queue *q, int size) 1713 + static int airoha_qdma_init_tx_queue(struct airoha_queue *q, 1714 + struct airoha_qdma *qdma, int size) 1727 1715 { 1728 - int i, qid = q - &eth->q_tx[0]; 1716 + struct airoha_eth *eth = qdma->eth; 1717 + int i, qid = q - &qdma->q_tx[0]; 1729 1718 dma_addr_t dma_addr; 1730 1719 1731 1720 spin_lock_init(&q->lock); 1732 1721 q->ndesc = size; 1733 - q->eth = eth; 1722 + q->qdma = qdma; 1734 1723 q->free_thr = 1 + MAX_SKB_FRAGS; 1735 1724 1736 1725 q->entry = devm_kzalloc(eth->dev, q->ndesc * sizeof(*q->entry), ··· 1751 1738 WRITE_ONCE(q->desc[i].ctrl, cpu_to_le32(val)); 1752 1739 } 1753 1740 1754 - airoha_qdma_wr(eth, REG_TX_RING_BASE(qid), dma_addr); 1755 - airoha_qdma_rmw(eth, REG_TX_CPU_IDX(qid), TX_RING_CPU_IDX_MASK, 1741 + airoha_qdma_wr(qdma, REG_TX_RING_BASE(qid), dma_addr); 1742 + airoha_qdma_rmw(qdma, REG_TX_CPU_IDX(qid), TX_RING_CPU_IDX_MASK, 1756 1743 FIELD_PREP(TX_RING_CPU_IDX_MASK, q->head)); 1757 - airoha_qdma_rmw(eth, REG_TX_DMA_IDX(qid), TX_RING_DMA_IDX_MASK, 1744 + airoha_qdma_rmw(qdma, REG_TX_DMA_IDX(qid), TX_RING_DMA_IDX_MASK, 1758 1745 FIELD_PREP(TX_RING_DMA_IDX_MASK, q->head)); 1759 1746 1760 1747 return 0; 1761 1748 } 1762 1749 1763 - static int airoha_qdma_tx_irq_init(struct airoha_eth *eth, 1764 - struct airoha_tx_irq_queue *irq_q, 1765 - int size) 1750 + static int airoha_qdma_tx_irq_init(struct airoha_tx_irq_queue *irq_q, 1751 + struct airoha_qdma *qdma, int size) 1766 1752 { 1767 - int id = irq_q - &eth->q_tx_irq[0]; 1753 + int id = irq_q - &qdma->q_tx_irq[0]; 1754 + struct airoha_eth *eth = qdma->eth; 1768 1755 dma_addr_t dma_addr; 1769 1756 1770 1757 netif_napi_add_tx(eth->napi_dev, &irq_q->napi, ··· 1776 1763 1777 1764 memset(irq_q->q, 0xff, size * sizeof(u32)); 1778 1765 irq_q->size = size; 1779 - irq_q->eth = eth; 1766 + irq_q->qdma = qdma; 1780 1767 1781 - airoha_qdma_wr(eth, REG_TX_IRQ_BASE(id), dma_addr); 1782 - airoha_qdma_rmw(eth, REG_TX_IRQ_CFG(id), TX_IRQ_DEPTH_MASK, 1768 + airoha_qdma_wr(qdma, REG_TX_IRQ_BASE(id), dma_addr); 1769 + airoha_qdma_rmw(qdma, REG_TX_IRQ_CFG(id), TX_IRQ_DEPTH_MASK, 1783 1770 FIELD_PREP(TX_IRQ_DEPTH_MASK, size)); 1784 - airoha_qdma_rmw(eth, REG_TX_IRQ_CFG(id), TX_IRQ_THR_MASK, 1771 + airoha_qdma_rmw(qdma, REG_TX_IRQ_CFG(id), TX_IRQ_THR_MASK, 1785 1772 FIELD_PREP(TX_IRQ_THR_MASK, 1)); 1786 1773 1787 1774 return 0; 1788 1775 } 1789 1776 1790 - static int airoha_qdma_init_tx(struct airoha_eth *eth) 1777 + static int airoha_qdma_init_tx(struct airoha_qdma *qdma) 1791 1778 { 1792 1779 int i, err; 1793 1780 1794 - for (i = 0; i < ARRAY_SIZE(eth->q_tx_irq); i++) { 1795 - err = airoha_qdma_tx_irq_init(eth, &eth->q_tx_irq[i], 1781 + for (i = 0; i < ARRAY_SIZE(qdma->q_tx_irq); i++) { 1782 + err = airoha_qdma_tx_irq_init(&qdma->q_tx_irq[i], qdma, 1796 1783 IRQ_QUEUE_LEN(i)); 1797 1784 if (err) 1798 1785 return err; 1799 1786 } 1800 1787 1801 - for (i = 0; i < ARRAY_SIZE(eth->q_tx); i++) { 1802 - err = airoha_qdma_init_tx_queue(eth, &eth->q_tx[i], 1788 + for (i = 0; i < ARRAY_SIZE(qdma->q_tx); i++) { 1789 + err = airoha_qdma_init_tx_queue(&qdma->q_tx[i], qdma, 1803 1790 TX_DSCP_NUM); 1804 1791 if (err) 1805 1792 return err; ··· 1810 1797 1811 1798 static void airoha_qdma_cleanup_tx_queue(struct airoha_queue *q) 1812 1799 { 1813 - struct airoha_eth *eth = q->eth; 1800 + struct airoha_eth *eth = q->qdma->eth; 1814 1801 1815 1802 spin_lock_bh(&q->lock); 1816 1803 while (q->queued) { ··· 1827 1814 spin_unlock_bh(&q->lock); 1828 1815 } 1829 1816 1830 - static int airoha_qdma_init_hfwd_queues(struct airoha_eth *eth) 1817 + static int airoha_qdma_init_hfwd_queues(struct airoha_qdma *qdma) 1831 1818 { 1819 + struct airoha_eth *eth = qdma->eth; 1832 1820 dma_addr_t dma_addr; 1833 1821 u32 status; 1834 1822 int size; 1835 1823 1836 1824 size = HW_DSCP_NUM * sizeof(struct airoha_qdma_fwd_desc); 1837 - eth->hfwd.desc = dmam_alloc_coherent(eth->dev, size, &dma_addr, 1838 - GFP_KERNEL); 1839 - if (!eth->hfwd.desc) 1825 + qdma->hfwd.desc = dmam_alloc_coherent(eth->dev, size, &dma_addr, 1826 + GFP_KERNEL); 1827 + if (!qdma->hfwd.desc) 1840 1828 return -ENOMEM; 1841 1829 1842 - airoha_qdma_wr(eth, REG_FWD_DSCP_BASE, dma_addr); 1830 + airoha_qdma_wr(qdma, REG_FWD_DSCP_BASE, dma_addr); 1843 1831 1844 1832 size = AIROHA_MAX_PACKET_SIZE * HW_DSCP_NUM; 1845 - eth->hfwd.q = dmam_alloc_coherent(eth->dev, size, &dma_addr, 1846 - GFP_KERNEL); 1847 - if (!eth->hfwd.q) 1833 + qdma->hfwd.q = dmam_alloc_coherent(eth->dev, size, &dma_addr, 1834 + GFP_KERNEL); 1835 + if (!qdma->hfwd.q) 1848 1836 return -ENOMEM; 1849 1837 1850 - airoha_qdma_wr(eth, REG_FWD_BUF_BASE, dma_addr); 1838 + airoha_qdma_wr(qdma, REG_FWD_BUF_BASE, dma_addr); 1851 1839 1852 - airoha_qdma_rmw(eth, REG_HW_FWD_DSCP_CFG, 1840 + airoha_qdma_rmw(qdma, REG_HW_FWD_DSCP_CFG, 1853 1841 HW_FWD_DSCP_PAYLOAD_SIZE_MASK, 1854 1842 FIELD_PREP(HW_FWD_DSCP_PAYLOAD_SIZE_MASK, 0)); 1855 - airoha_qdma_rmw(eth, REG_FWD_DSCP_LOW_THR, FWD_DSCP_LOW_THR_MASK, 1843 + airoha_qdma_rmw(qdma, REG_FWD_DSCP_LOW_THR, FWD_DSCP_LOW_THR_MASK, 1856 1844 FIELD_PREP(FWD_DSCP_LOW_THR_MASK, 128)); 1857 - airoha_qdma_rmw(eth, REG_LMGR_INIT_CFG, 1845 + airoha_qdma_rmw(qdma, REG_LMGR_INIT_CFG, 1858 1846 LMGR_INIT_START | LMGR_SRAM_MODE_MASK | 1859 1847 HW_FWD_DESC_NUM_MASK, 1860 1848 FIELD_PREP(HW_FWD_DESC_NUM_MASK, HW_DSCP_NUM) | ··· 1863 1849 1864 1850 return read_poll_timeout(airoha_qdma_rr, status, 1865 1851 !(status & LMGR_INIT_START), USEC_PER_MSEC, 1866 - 30 * USEC_PER_MSEC, true, eth, 1852 + 30 * USEC_PER_MSEC, true, qdma, 1867 1853 REG_LMGR_INIT_CFG); 1868 1854 } 1869 1855 1870 - static void airoha_qdma_init_qos(struct airoha_eth *eth) 1856 + static void airoha_qdma_init_qos(struct airoha_qdma *qdma) 1871 1857 { 1872 - airoha_qdma_clear(eth, REG_TXWRR_MODE_CFG, TWRR_WEIGHT_SCALE_MASK); 1873 - airoha_qdma_set(eth, REG_TXWRR_MODE_CFG, TWRR_WEIGHT_BASE_MASK); 1858 + airoha_qdma_clear(qdma, REG_TXWRR_MODE_CFG, TWRR_WEIGHT_SCALE_MASK); 1859 + airoha_qdma_set(qdma, REG_TXWRR_MODE_CFG, TWRR_WEIGHT_BASE_MASK); 1874 1860 1875 - airoha_qdma_clear(eth, REG_PSE_BUF_USAGE_CFG, 1861 + airoha_qdma_clear(qdma, REG_PSE_BUF_USAGE_CFG, 1876 1862 PSE_BUF_ESTIMATE_EN_MASK); 1877 1863 1878 - airoha_qdma_set(eth, REG_EGRESS_RATE_METER_CFG, 1864 + airoha_qdma_set(qdma, REG_EGRESS_RATE_METER_CFG, 1879 1865 EGRESS_RATE_METER_EN_MASK | 1880 1866 EGRESS_RATE_METER_EQ_RATE_EN_MASK); 1881 1867 /* 2047us x 31 = 63.457ms */ 1882 - airoha_qdma_rmw(eth, REG_EGRESS_RATE_METER_CFG, 1868 + airoha_qdma_rmw(qdma, REG_EGRESS_RATE_METER_CFG, 1883 1869 EGRESS_RATE_METER_WINDOW_SZ_MASK, 1884 1870 FIELD_PREP(EGRESS_RATE_METER_WINDOW_SZ_MASK, 0x1f)); 1885 - airoha_qdma_rmw(eth, REG_EGRESS_RATE_METER_CFG, 1871 + airoha_qdma_rmw(qdma, REG_EGRESS_RATE_METER_CFG, 1886 1872 EGRESS_RATE_METER_TIMESLICE_MASK, 1887 1873 FIELD_PREP(EGRESS_RATE_METER_TIMESLICE_MASK, 0x7ff)); 1888 1874 1889 1875 /* ratelimit init */ 1890 - airoha_qdma_set(eth, REG_GLB_TRTCM_CFG, GLB_TRTCM_EN_MASK); 1876 + airoha_qdma_set(qdma, REG_GLB_TRTCM_CFG, GLB_TRTCM_EN_MASK); 1891 1877 /* fast-tick 25us */ 1892 - airoha_qdma_rmw(eth, REG_GLB_TRTCM_CFG, GLB_FAST_TICK_MASK, 1878 + airoha_qdma_rmw(qdma, REG_GLB_TRTCM_CFG, GLB_FAST_TICK_MASK, 1893 1879 FIELD_PREP(GLB_FAST_TICK_MASK, 25)); 1894 - airoha_qdma_rmw(eth, REG_GLB_TRTCM_CFG, GLB_SLOW_TICK_RATIO_MASK, 1880 + airoha_qdma_rmw(qdma, REG_GLB_TRTCM_CFG, GLB_SLOW_TICK_RATIO_MASK, 1895 1881 FIELD_PREP(GLB_SLOW_TICK_RATIO_MASK, 40)); 1896 1882 1897 - airoha_qdma_set(eth, REG_EGRESS_TRTCM_CFG, EGRESS_TRTCM_EN_MASK); 1898 - airoha_qdma_rmw(eth, REG_EGRESS_TRTCM_CFG, EGRESS_FAST_TICK_MASK, 1883 + airoha_qdma_set(qdma, REG_EGRESS_TRTCM_CFG, EGRESS_TRTCM_EN_MASK); 1884 + airoha_qdma_rmw(qdma, REG_EGRESS_TRTCM_CFG, EGRESS_FAST_TICK_MASK, 1899 1885 FIELD_PREP(EGRESS_FAST_TICK_MASK, 25)); 1900 - airoha_qdma_rmw(eth, REG_EGRESS_TRTCM_CFG, 1886 + airoha_qdma_rmw(qdma, REG_EGRESS_TRTCM_CFG, 1901 1887 EGRESS_SLOW_TICK_RATIO_MASK, 1902 1888 FIELD_PREP(EGRESS_SLOW_TICK_RATIO_MASK, 40)); 1903 1889 1904 - airoha_qdma_set(eth, REG_INGRESS_TRTCM_CFG, INGRESS_TRTCM_EN_MASK); 1905 - airoha_qdma_clear(eth, REG_INGRESS_TRTCM_CFG, 1890 + airoha_qdma_set(qdma, REG_INGRESS_TRTCM_CFG, INGRESS_TRTCM_EN_MASK); 1891 + airoha_qdma_clear(qdma, REG_INGRESS_TRTCM_CFG, 1906 1892 INGRESS_TRTCM_MODE_MASK); 1907 - airoha_qdma_rmw(eth, REG_INGRESS_TRTCM_CFG, INGRESS_FAST_TICK_MASK, 1893 + airoha_qdma_rmw(qdma, REG_INGRESS_TRTCM_CFG, INGRESS_FAST_TICK_MASK, 1908 1894 FIELD_PREP(INGRESS_FAST_TICK_MASK, 125)); 1909 - airoha_qdma_rmw(eth, REG_INGRESS_TRTCM_CFG, 1895 + airoha_qdma_rmw(qdma, REG_INGRESS_TRTCM_CFG, 1910 1896 INGRESS_SLOW_TICK_RATIO_MASK, 1911 1897 FIELD_PREP(INGRESS_SLOW_TICK_RATIO_MASK, 8)); 1912 1898 1913 - airoha_qdma_set(eth, REG_SLA_TRTCM_CFG, SLA_TRTCM_EN_MASK); 1914 - airoha_qdma_rmw(eth, REG_SLA_TRTCM_CFG, SLA_FAST_TICK_MASK, 1899 + airoha_qdma_set(qdma, REG_SLA_TRTCM_CFG, SLA_TRTCM_EN_MASK); 1900 + airoha_qdma_rmw(qdma, REG_SLA_TRTCM_CFG, SLA_FAST_TICK_MASK, 1915 1901 FIELD_PREP(SLA_FAST_TICK_MASK, 25)); 1916 - airoha_qdma_rmw(eth, REG_SLA_TRTCM_CFG, SLA_SLOW_TICK_RATIO_MASK, 1902 + airoha_qdma_rmw(qdma, REG_SLA_TRTCM_CFG, SLA_SLOW_TICK_RATIO_MASK, 1917 1903 FIELD_PREP(SLA_SLOW_TICK_RATIO_MASK, 40)); 1918 1904 } 1919 1905 1920 - static int airoha_qdma_hw_init(struct airoha_eth *eth) 1906 + static int airoha_qdma_hw_init(struct airoha_qdma *qdma) 1921 1907 { 1922 1908 int i; 1923 1909 1924 1910 /* clear pending irqs */ 1925 - for (i = 0; i < ARRAY_SIZE(eth->irqmask); i++) 1926 - airoha_qdma_wr(eth, REG_INT_STATUS(i), 0xffffffff); 1911 + for (i = 0; i < ARRAY_SIZE(qdma->irqmask); i++) 1912 + airoha_qdma_wr(qdma, REG_INT_STATUS(i), 0xffffffff); 1927 1913 1928 1914 /* setup irqs */ 1929 - airoha_qdma_irq_enable(eth, QDMA_INT_REG_IDX0, INT_IDX0_MASK); 1930 - airoha_qdma_irq_enable(eth, QDMA_INT_REG_IDX1, INT_IDX1_MASK); 1931 - airoha_qdma_irq_enable(eth, QDMA_INT_REG_IDX4, INT_IDX4_MASK); 1915 + airoha_qdma_irq_enable(qdma, QDMA_INT_REG_IDX0, INT_IDX0_MASK); 1916 + airoha_qdma_irq_enable(qdma, QDMA_INT_REG_IDX1, INT_IDX1_MASK); 1917 + airoha_qdma_irq_enable(qdma, QDMA_INT_REG_IDX4, INT_IDX4_MASK); 1932 1918 1933 1919 /* setup irq binding */ 1934 - for (i = 0; i < ARRAY_SIZE(eth->q_tx); i++) { 1935 - if (!eth->q_tx[i].ndesc) 1920 + for (i = 0; i < ARRAY_SIZE(qdma->q_tx); i++) { 1921 + if (!qdma->q_tx[i].ndesc) 1936 1922 continue; 1937 1923 1938 1924 if (TX_RING_IRQ_BLOCKING_MAP_MASK & BIT(i)) 1939 - airoha_qdma_set(eth, REG_TX_RING_BLOCKING(i), 1925 + airoha_qdma_set(qdma, REG_TX_RING_BLOCKING(i), 1940 1926 TX_RING_IRQ_BLOCKING_CFG_MASK); 1941 1927 else 1942 - airoha_qdma_clear(eth, REG_TX_RING_BLOCKING(i), 1928 + airoha_qdma_clear(qdma, REG_TX_RING_BLOCKING(i), 1943 1929 TX_RING_IRQ_BLOCKING_CFG_MASK); 1944 1930 } 1945 1931 1946 - airoha_qdma_wr(eth, REG_QDMA_GLOBAL_CFG, 1932 + airoha_qdma_wr(qdma, REG_QDMA_GLOBAL_CFG, 1947 1933 GLOBAL_CFG_RX_2B_OFFSET_MASK | 1948 1934 FIELD_PREP(GLOBAL_CFG_DMA_PREFERENCE_MASK, 3) | 1949 1935 GLOBAL_CFG_CPU_TXR_RR_MASK | ··· 1954 1940 GLOBAL_CFG_TX_WB_DONE_MASK | 1955 1941 FIELD_PREP(GLOBAL_CFG_MAX_ISSUE_NUM_MASK, 2)); 1956 1942 1957 - airoha_qdma_init_qos(eth); 1943 + airoha_qdma_init_qos(qdma); 1958 1944 1959 1945 /* disable qdma rx delay interrupt */ 1960 - for (i = 0; i < ARRAY_SIZE(eth->q_rx); i++) { 1961 - if (!eth->q_rx[i].ndesc) 1946 + for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) { 1947 + if (!qdma->q_rx[i].ndesc) 1962 1948 continue; 1963 1949 1964 - airoha_qdma_clear(eth, REG_RX_DELAY_INT_IDX(i), 1950 + airoha_qdma_clear(qdma, REG_RX_DELAY_INT_IDX(i), 1965 1951 RX_DELAY_INT_MASK); 1966 1952 } 1967 1953 1968 - airoha_qdma_set(eth, REG_TXQ_CNGST_CFG, 1954 + airoha_qdma_set(qdma, REG_TXQ_CNGST_CFG, 1969 1955 TXQ_CNGST_DROP_EN | TXQ_CNGST_DEI_DROP_EN); 1970 1956 1971 1957 return 0; ··· 1973 1959 1974 1960 static irqreturn_t airoha_irq_handler(int irq, void *dev_instance) 1975 1961 { 1976 - struct airoha_eth *eth = dev_instance; 1977 - u32 intr[ARRAY_SIZE(eth->irqmask)]; 1962 + struct airoha_qdma *qdma = dev_instance; 1963 + u32 intr[ARRAY_SIZE(qdma->irqmask)]; 1978 1964 int i; 1979 1965 1980 - for (i = 0; i < ARRAY_SIZE(eth->irqmask); i++) { 1981 - intr[i] = airoha_qdma_rr(eth, REG_INT_STATUS(i)); 1982 - intr[i] &= eth->irqmask[i]; 1983 - airoha_qdma_wr(eth, REG_INT_STATUS(i), intr[i]); 1966 + for (i = 0; i < ARRAY_SIZE(qdma->irqmask); i++) { 1967 + intr[i] = airoha_qdma_rr(qdma, REG_INT_STATUS(i)); 1968 + intr[i] &= qdma->irqmask[i]; 1969 + airoha_qdma_wr(qdma, REG_INT_STATUS(i), intr[i]); 1984 1970 } 1985 1971 1986 - if (!test_bit(DEV_STATE_INITIALIZED, &eth->state)) 1972 + if (!test_bit(DEV_STATE_INITIALIZED, &qdma->eth->state)) 1987 1973 return IRQ_NONE; 1988 1974 1989 1975 if (intr[1] & RX_DONE_INT_MASK) { 1990 - airoha_qdma_irq_disable(eth, QDMA_INT_REG_IDX1, 1976 + airoha_qdma_irq_disable(qdma, QDMA_INT_REG_IDX1, 1991 1977 RX_DONE_INT_MASK); 1992 1978 1993 - for (i = 0; i < ARRAY_SIZE(eth->q_rx); i++) { 1994 - if (!eth->q_rx[i].ndesc) 1979 + for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) { 1980 + if (!qdma->q_rx[i].ndesc) 1995 1981 continue; 1996 1982 1997 1983 if (intr[1] & BIT(i)) 1998 - napi_schedule(&eth->q_rx[i].napi); 1984 + napi_schedule(&qdma->q_rx[i].napi); 1999 1985 } 2000 1986 } 2001 1987 2002 1988 if (intr[0] & INT_TX_MASK) { 2003 - for (i = 0; i < ARRAY_SIZE(eth->q_tx_irq); i++) { 2004 - struct airoha_tx_irq_queue *irq_q = &eth->q_tx_irq[i]; 1989 + for (i = 0; i < ARRAY_SIZE(qdma->q_tx_irq); i++) { 1990 + struct airoha_tx_irq_queue *irq_q = &qdma->q_tx_irq[i]; 2005 1991 u32 status, head; 2006 1992 2007 1993 if (!(intr[0] & TX_DONE_INT_MASK(i))) 2008 1994 continue; 2009 1995 2010 - airoha_qdma_irq_disable(eth, QDMA_INT_REG_IDX0, 1996 + airoha_qdma_irq_disable(qdma, QDMA_INT_REG_IDX0, 2011 1997 TX_DONE_INT_MASK(i)); 2012 1998 2013 - status = airoha_qdma_rr(eth, REG_IRQ_STATUS(i)); 1999 + status = airoha_qdma_rr(qdma, REG_IRQ_STATUS(i)); 2014 2000 head = FIELD_GET(IRQ_HEAD_IDX_MASK, status); 2015 2001 irq_q->head = head % irq_q->size; 2016 2002 irq_q->queued = FIELD_GET(IRQ_ENTRY_LEN_MASK, status); 2017 2003 2018 - napi_schedule(&eth->q_tx_irq[i].napi); 2004 + napi_schedule(&qdma->q_tx_irq[i].napi); 2019 2005 } 2020 2006 } 2021 2007 2022 2008 return IRQ_HANDLED; 2023 2009 } 2024 2010 2025 - static int airoha_qdma_init(struct airoha_eth *eth) 2011 + static int airoha_qdma_init(struct platform_device *pdev, 2012 + struct airoha_eth *eth, 2013 + struct airoha_qdma *qdma) 2026 2014 { 2027 - int err; 2015 + int err, id = qdma - &eth->qdma[0]; 2016 + const char *res; 2028 2017 2029 - err = devm_request_irq(eth->dev, eth->irq, airoha_irq_handler, 2030 - IRQF_SHARED, KBUILD_MODNAME, eth); 2018 + spin_lock_init(&qdma->irq_lock); 2019 + qdma->eth = eth; 2020 + 2021 + res = devm_kasprintf(eth->dev, GFP_KERNEL, "qdma%d", id); 2022 + if (!res) 2023 + return -ENOMEM; 2024 + 2025 + qdma->regs = devm_platform_ioremap_resource_byname(pdev, res); 2026 + if (IS_ERR(qdma->regs)) 2027 + return dev_err_probe(eth->dev, PTR_ERR(qdma->regs), 2028 + "failed to iomap qdma%d regs\n", id); 2029 + 2030 + qdma->irq = platform_get_irq(pdev, 4 * id); 2031 + if (qdma->irq < 0) 2032 + return qdma->irq; 2033 + 2034 + err = devm_request_irq(eth->dev, qdma->irq, airoha_irq_handler, 2035 + IRQF_SHARED, KBUILD_MODNAME, qdma); 2031 2036 if (err) 2032 2037 return err; 2033 2038 2034 - err = airoha_qdma_init_rx(eth); 2039 + err = airoha_qdma_init_rx(qdma); 2035 2040 if (err) 2036 2041 return err; 2037 2042 2038 - err = airoha_qdma_init_tx(eth); 2043 + err = airoha_qdma_init_tx(qdma); 2039 2044 if (err) 2040 2045 return err; 2041 2046 2042 - err = airoha_qdma_init_hfwd_queues(eth); 2047 + err = airoha_qdma_init_hfwd_queues(qdma); 2043 2048 if (err) 2044 2049 return err; 2045 2050 2046 - err = airoha_qdma_hw_init(eth); 2047 - if (err) 2048 - return err; 2049 - 2050 - set_bit(DEV_STATE_INITIALIZED, &eth->state); 2051 - 2052 - return 0; 2051 + return airoha_qdma_hw_init(qdma); 2053 2052 } 2054 2053 2055 - static int airoha_hw_init(struct airoha_eth *eth) 2054 + static int airoha_hw_init(struct platform_device *pdev, 2055 + struct airoha_eth *eth) 2056 2056 { 2057 - int err; 2057 + int err, i; 2058 2058 2059 2059 /* disable xsi */ 2060 2060 reset_control_bulk_assert(ARRAY_SIZE(eth->xsi_rsts), eth->xsi_rsts); ··· 2082 2054 if (err) 2083 2055 return err; 2084 2056 2085 - return airoha_qdma_init(eth); 2057 + for (i = 0; i < ARRAY_SIZE(eth->qdma); i++) { 2058 + err = airoha_qdma_init(pdev, eth, &eth->qdma[i]); 2059 + if (err) 2060 + return err; 2061 + } 2062 + 2063 + set_bit(DEV_STATE_INITIALIZED, &eth->state); 2064 + 2065 + return 0; 2086 2066 } 2087 2067 2088 - static void airoha_hw_cleanup(struct airoha_eth *eth) 2068 + static void airoha_hw_cleanup(struct airoha_qdma *qdma) 2089 2069 { 2090 2070 int i; 2091 2071 2092 - for (i = 0; i < ARRAY_SIZE(eth->q_rx); i++) { 2093 - if (!eth->q_rx[i].ndesc) 2072 + for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) { 2073 + if (!qdma->q_rx[i].ndesc) 2094 2074 continue; 2095 2075 2096 - napi_disable(&eth->q_rx[i].napi); 2097 - netif_napi_del(&eth->q_rx[i].napi); 2098 - airoha_qdma_cleanup_rx_queue(&eth->q_rx[i]); 2099 - if (eth->q_rx[i].page_pool) 2100 - page_pool_destroy(eth->q_rx[i].page_pool); 2076 + napi_disable(&qdma->q_rx[i].napi); 2077 + netif_napi_del(&qdma->q_rx[i].napi); 2078 + airoha_qdma_cleanup_rx_queue(&qdma->q_rx[i]); 2079 + if (qdma->q_rx[i].page_pool) 2080 + page_pool_destroy(qdma->q_rx[i].page_pool); 2101 2081 } 2102 2082 2103 - for (i = 0; i < ARRAY_SIZE(eth->q_tx_irq); i++) { 2104 - napi_disable(&eth->q_tx_irq[i].napi); 2105 - netif_napi_del(&eth->q_tx_irq[i].napi); 2083 + for (i = 0; i < ARRAY_SIZE(qdma->q_tx_irq); i++) { 2084 + napi_disable(&qdma->q_tx_irq[i].napi); 2085 + netif_napi_del(&qdma->q_tx_irq[i].napi); 2106 2086 } 2107 2087 2108 - for (i = 0; i < ARRAY_SIZE(eth->q_tx); i++) { 2109 - if (!eth->q_tx[i].ndesc) 2088 + for (i = 0; i < ARRAY_SIZE(qdma->q_tx); i++) { 2089 + if (!qdma->q_tx[i].ndesc) 2110 2090 continue; 2111 2091 2112 - airoha_qdma_cleanup_tx_queue(&eth->q_tx[i]); 2092 + airoha_qdma_cleanup_tx_queue(&qdma->q_tx[i]); 2113 2093 } 2114 2094 } 2115 2095 2116 - static void airoha_qdma_start_napi(struct airoha_eth *eth) 2096 + static void airoha_qdma_start_napi(struct airoha_qdma *qdma) 2117 2097 { 2118 2098 int i; 2119 2099 2120 - for (i = 0; i < ARRAY_SIZE(eth->q_tx_irq); i++) 2121 - napi_enable(&eth->q_tx_irq[i].napi); 2100 + for (i = 0; i < ARRAY_SIZE(qdma->q_tx_irq); i++) 2101 + napi_enable(&qdma->q_tx_irq[i].napi); 2122 2102 2123 - for (i = 0; i < ARRAY_SIZE(eth->q_rx); i++) { 2124 - if (!eth->q_rx[i].ndesc) 2103 + for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) { 2104 + if (!qdma->q_rx[i].ndesc) 2125 2105 continue; 2126 2106 2127 - napi_enable(&eth->q_rx[i].napi); 2107 + napi_enable(&qdma->q_rx[i].napi); 2128 2108 } 2129 2109 } 2130 2110 2131 2111 static void airoha_update_hw_stats(struct airoha_gdm_port *port) 2132 2112 { 2133 - struct airoha_eth *eth = port->eth; 2113 + struct airoha_eth *eth = port->qdma->eth; 2134 2114 u32 val, i = 0; 2135 2115 2136 2116 spin_lock(&port->stats.lock); ··· 2283 2247 static int airoha_dev_open(struct net_device *dev) 2284 2248 { 2285 2249 struct airoha_gdm_port *port = netdev_priv(dev); 2286 - struct airoha_eth *eth = port->eth; 2250 + struct airoha_qdma *qdma = port->qdma; 2287 2251 int err; 2288 2252 2289 2253 netif_tx_start_all_queues(dev); 2290 - err = airoha_set_gdm_ports(eth, true); 2254 + err = airoha_set_gdm_ports(qdma->eth, true); 2291 2255 if (err) 2292 2256 return err; 2293 2257 2294 2258 if (netdev_uses_dsa(dev)) 2295 - airoha_fe_set(eth, REG_GDM_INGRESS_CFG(port->id), 2259 + airoha_fe_set(qdma->eth, REG_GDM_INGRESS_CFG(port->id), 2296 2260 GDM_STAG_EN_MASK); 2297 2261 else 2298 - airoha_fe_clear(eth, REG_GDM_INGRESS_CFG(port->id), 2262 + airoha_fe_clear(qdma->eth, REG_GDM_INGRESS_CFG(port->id), 2299 2263 GDM_STAG_EN_MASK); 2300 2264 2301 - airoha_qdma_set(eth, REG_QDMA_GLOBAL_CFG, GLOBAL_CFG_TX_DMA_EN_MASK); 2302 - airoha_qdma_set(eth, REG_QDMA_GLOBAL_CFG, GLOBAL_CFG_RX_DMA_EN_MASK); 2265 + airoha_qdma_set(qdma, REG_QDMA_GLOBAL_CFG, 2266 + GLOBAL_CFG_TX_DMA_EN_MASK | 2267 + GLOBAL_CFG_RX_DMA_EN_MASK); 2303 2268 2304 2269 return 0; 2305 2270 } ··· 2308 2271 static int airoha_dev_stop(struct net_device *dev) 2309 2272 { 2310 2273 struct airoha_gdm_port *port = netdev_priv(dev); 2311 - struct airoha_eth *eth = port->eth; 2274 + struct airoha_qdma *qdma = port->qdma; 2312 2275 int err; 2313 2276 2314 2277 netif_tx_disable(dev); 2315 - err = airoha_set_gdm_ports(eth, false); 2278 + err = airoha_set_gdm_ports(qdma->eth, false); 2316 2279 if (err) 2317 2280 return err; 2318 2281 2319 - airoha_qdma_clear(eth, REG_QDMA_GLOBAL_CFG, GLOBAL_CFG_TX_DMA_EN_MASK); 2320 - airoha_qdma_clear(eth, REG_QDMA_GLOBAL_CFG, GLOBAL_CFG_RX_DMA_EN_MASK); 2282 + airoha_qdma_clear(qdma, REG_QDMA_GLOBAL_CFG, 2283 + GLOBAL_CFG_TX_DMA_EN_MASK | 2284 + GLOBAL_CFG_RX_DMA_EN_MASK); 2321 2285 2322 2286 return 0; 2323 2287 } ··· 2332 2294 if (err) 2333 2295 return err; 2334 2296 2335 - airoha_set_macaddr(port->eth, dev->dev_addr); 2297 + airoha_set_macaddr(port->qdma->eth, dev->dev_addr); 2336 2298 2337 2299 return 0; 2338 2300 } ··· 2341 2303 { 2342 2304 struct airoha_gdm_port *port = netdev_priv(dev); 2343 2305 2344 - airoha_set_macaddr(port->eth, dev->dev_addr); 2306 + airoha_set_macaddr(port->qdma->eth, dev->dev_addr); 2345 2307 2346 2308 return 0; 2347 2309 } ··· 2375 2337 struct airoha_gdm_port *port = netdev_priv(dev); 2376 2338 u32 msg0 = 0, msg1, len = skb_headlen(skb); 2377 2339 int i, qid = skb_get_queue_mapping(skb); 2378 - struct airoha_eth *eth = port->eth; 2340 + struct airoha_qdma *qdma = port->qdma; 2379 2341 u32 nr_frags = 1 + sinfo->nr_frags; 2380 2342 struct netdev_queue *txq; 2381 2343 struct airoha_queue *q; ··· 2405 2367 msg1 = FIELD_PREP(QDMA_ETH_TXMSG_FPORT_MASK, fport) | 2406 2368 FIELD_PREP(QDMA_ETH_TXMSG_METER_MASK, 0x7f); 2407 2369 2408 - q = &eth->q_tx[qid]; 2370 + q = &qdma->q_tx[qid]; 2409 2371 if (WARN_ON_ONCE(!q->ndesc)) 2410 2372 goto error; 2411 2373 ··· 2449 2411 e->dma_addr = addr; 2450 2412 e->dma_len = len; 2451 2413 2452 - airoha_qdma_rmw(eth, REG_TX_CPU_IDX(qid), TX_RING_CPU_IDX_MASK, 2414 + airoha_qdma_rmw(qdma, REG_TX_CPU_IDX(qid), 2415 + TX_RING_CPU_IDX_MASK, 2453 2416 FIELD_PREP(TX_RING_CPU_IDX_MASK, index)); 2454 2417 2455 2418 data = skb_frag_address(frag); ··· 2487 2448 struct ethtool_drvinfo *info) 2488 2449 { 2489 2450 struct airoha_gdm_port *port = netdev_priv(dev); 2490 - struct airoha_eth *eth = port->eth; 2451 + struct airoha_eth *eth = port->qdma->eth; 2491 2452 2492 2453 strscpy(info->driver, eth->dev->driver->name, sizeof(info->driver)); 2493 2454 strscpy(info->bus_info, dev_name(eth->dev), sizeof(info->bus_info)); ··· 2568 2529 { 2569 2530 const __be32 *id_ptr = of_get_property(np, "reg", NULL); 2570 2531 struct airoha_gdm_port *port; 2532 + struct airoha_qdma *qdma; 2571 2533 struct net_device *dev; 2572 2534 int err, index; 2573 2535 u32 id; ··· 2598 2558 return -ENOMEM; 2599 2559 } 2600 2560 2561 + qdma = &eth->qdma[index % AIROHA_MAX_NUM_QDMA]; 2601 2562 dev->netdev_ops = &airoha_netdev_ops; 2602 2563 dev->ethtool_ops = &airoha_ethtool_ops; 2603 2564 dev->max_mtu = AIROHA_MAX_MTU; ··· 2608 2567 NETIF_F_SG | NETIF_F_TSO; 2609 2568 dev->features |= dev->hw_features; 2610 2569 dev->dev.of_node = np; 2570 + dev->irq = qdma->irq; 2611 2571 SET_NETDEV_DEV(dev, eth->dev); 2612 2572 2613 2573 err = of_get_ethdev_address(np, dev); ··· 2624 2582 port = netdev_priv(dev); 2625 2583 u64_stats_init(&port->stats.syncp); 2626 2584 spin_lock_init(&port->stats.lock); 2585 + port->qdma = qdma; 2627 2586 port->dev = dev; 2628 - port->eth = eth; 2629 2587 port->id = id; 2630 2588 eth->ports[index] = port; 2631 2589 ··· 2655 2613 return dev_err_probe(eth->dev, PTR_ERR(eth->fe_regs), 2656 2614 "failed to iomap fe regs\n"); 2657 2615 2658 - eth->qdma_regs = devm_platform_ioremap_resource_byname(pdev, "qdma0"); 2659 - if (IS_ERR(eth->qdma_regs)) 2660 - return dev_err_probe(eth->dev, PTR_ERR(eth->qdma_regs), 2661 - "failed to iomap qdma regs\n"); 2662 - 2663 2616 eth->rsts[0].id = "fe"; 2664 2617 eth->rsts[1].id = "pdma"; 2665 2618 eth->rsts[2].id = "qdma"; ··· 2679 2642 return err; 2680 2643 } 2681 2644 2682 - spin_lock_init(&eth->irq_lock); 2683 - eth->irq = platform_get_irq(pdev, 0); 2684 - if (eth->irq < 0) 2685 - return eth->irq; 2686 - 2687 2645 eth->napi_dev = alloc_netdev_dummy(0); 2688 2646 if (!eth->napi_dev) 2689 2647 return -ENOMEM; ··· 2688 2656 strscpy(eth->napi_dev->name, "qdma_eth", sizeof(eth->napi_dev->name)); 2689 2657 platform_set_drvdata(pdev, eth); 2690 2658 2691 - err = airoha_hw_init(eth); 2659 + err = airoha_hw_init(pdev, eth); 2692 2660 if (err) 2693 2661 goto error; 2694 2662 2695 - airoha_qdma_start_napi(eth); 2663 + for (i = 0; i < ARRAY_SIZE(eth->qdma); i++) 2664 + airoha_qdma_start_napi(&eth->qdma[i]); 2665 + 2696 2666 for_each_child_of_node(pdev->dev.of_node, np) { 2697 2667 if (!of_device_is_compatible(np, "airoha,eth-mac")) 2698 2668 continue; ··· 2712 2678 return 0; 2713 2679 2714 2680 error: 2715 - airoha_hw_cleanup(eth); 2681 + for (i = 0; i < ARRAY_SIZE(eth->qdma); i++) 2682 + airoha_hw_cleanup(&eth->qdma[i]); 2683 + 2716 2684 for (i = 0; i < ARRAY_SIZE(eth->ports); i++) { 2717 2685 struct airoha_gdm_port *port = eth->ports[i]; 2718 2686 ··· 2732 2696 struct airoha_eth *eth = platform_get_drvdata(pdev); 2733 2697 int i; 2734 2698 2735 - airoha_hw_cleanup(eth); 2699 + for (i = 0; i < ARRAY_SIZE(eth->qdma); i++) 2700 + airoha_hw_cleanup(&eth->qdma[i]); 2701 + 2736 2702 for (i = 0; i < ARRAY_SIZE(eth->ports); i++) { 2737 2703 struct airoha_gdm_port *port = eth->ports[i]; 2738 2704