Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge branch 'add-devm_clk_bulk_get_optional_enable-helper-and-use-in-axi-ethernet-driver'

Suraj Gupta says:

====================
Add devm_clk_bulk_get_optional_enable() helper and use in AXI Ethernet driver

This patch series introduces a new managed clock framework helper function
and demonstrates its usage in AXI ethernet driver.

Device drivers frequently need to get optional bulk clocks, prepare them,
and enable them during probe, while ensuring automatic cleanup on device
unbind. Currently, this requires three separate operations with manual
cleanup handling.

The new devm_clk_bulk_get_optional_enable() helper combines these
operations into a single managed call, eliminating boilerplate code and
following the established pattern of devm_clk_bulk_get_all_enabled().
====================

Link: https://patch.msgid.link/20260116192725.972966-1-suraj.gupta2@amd.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>

+100 -56
+50
drivers/clk/clk-devres.c
··· 179 179 } 180 180 EXPORT_SYMBOL_GPL(devm_clk_bulk_get_optional); 181 181 182 + static void devm_clk_bulk_release_enable(struct device *dev, void *res) 183 + { 184 + struct clk_bulk_devres *devres = res; 185 + 186 + clk_bulk_disable_unprepare(devres->num_clks, devres->clks); 187 + clk_bulk_put(devres->num_clks, devres->clks); 188 + } 189 + 190 + static int __devm_clk_bulk_get_enable(struct device *dev, int num_clks, 191 + struct clk_bulk_data *clks, bool optional) 192 + { 193 + struct clk_bulk_devres *devres; 194 + int ret; 195 + 196 + devres = devres_alloc(devm_clk_bulk_release_enable, 197 + sizeof(*devres), GFP_KERNEL); 198 + if (!devres) 199 + return -ENOMEM; 200 + 201 + if (optional) 202 + ret = clk_bulk_get_optional(dev, num_clks, clks); 203 + else 204 + ret = clk_bulk_get(dev, num_clks, clks); 205 + if (ret) 206 + goto err_clk_get; 207 + 208 + ret = clk_bulk_prepare_enable(num_clks, clks); 209 + if (ret) 210 + goto err_clk_prepare; 211 + 212 + devres->clks = clks; 213 + devres->num_clks = num_clks; 214 + devres_add(dev, devres); 215 + 216 + return 0; 217 + 218 + err_clk_prepare: 219 + clk_bulk_put(num_clks, clks); 220 + err_clk_get: 221 + devres_free(devres); 222 + return ret; 223 + } 224 + 225 + int __must_check devm_clk_bulk_get_optional_enable(struct device *dev, int num_clks, 226 + struct clk_bulk_data *clks) 227 + { 228 + return __devm_clk_bulk_get_enable(dev, num_clks, clks, true); 229 + } 230 + EXPORT_SYMBOL_GPL(devm_clk_bulk_get_optional_enable); 231 + 182 232 static void devm_clk_bulk_release_all(struct device *dev, void *res) 183 233 { 184 234 struct clk_bulk_devres *devres = res;
+27 -56
drivers/net/ethernet/xilinx/xilinx_axienet_main.c
··· 2787 2787 int addr_width = 32; 2788 2788 u32 value; 2789 2789 2790 - ndev = alloc_etherdev(sizeof(*lp)); 2790 + ndev = devm_alloc_etherdev(&pdev->dev, sizeof(*lp)); 2791 2791 if (!ndev) 2792 2792 return -ENOMEM; 2793 2793 ··· 2815 2815 seqcount_mutex_init(&lp->hw_stats_seqcount, &lp->stats_lock); 2816 2816 INIT_DEFERRABLE_WORK(&lp->stats_work, axienet_refresh_stats); 2817 2817 2818 - lp->axi_clk = devm_clk_get_optional(&pdev->dev, "s_axi_lite_clk"); 2818 + lp->axi_clk = devm_clk_get_optional_enabled(&pdev->dev, 2819 + "s_axi_lite_clk"); 2819 2820 if (!lp->axi_clk) { 2820 2821 /* For backward compatibility, if named AXI clock is not present, 2821 2822 * treat the first clock specified as the AXI clock. 2822 2823 */ 2823 - lp->axi_clk = devm_clk_get_optional(&pdev->dev, NULL); 2824 + lp->axi_clk = devm_clk_get_optional_enabled(&pdev->dev, NULL); 2824 2825 } 2825 - if (IS_ERR(lp->axi_clk)) { 2826 - ret = PTR_ERR(lp->axi_clk); 2827 - goto free_netdev; 2828 - } 2829 - ret = clk_prepare_enable(lp->axi_clk); 2830 - if (ret) { 2831 - dev_err(&pdev->dev, "Unable to enable AXI clock: %d\n", ret); 2832 - goto free_netdev; 2833 - } 2826 + if (IS_ERR(lp->axi_clk)) 2827 + return dev_err_probe(&pdev->dev, PTR_ERR(lp->axi_clk), 2828 + "could not get AXI clock\n"); 2834 2829 2835 2830 lp->misc_clks[0].id = "axis_clk"; 2836 2831 lp->misc_clks[1].id = "ref_clk"; 2837 2832 lp->misc_clks[2].id = "mgt_clk"; 2838 2833 2839 - ret = devm_clk_bulk_get_optional(&pdev->dev, XAE_NUM_MISC_CLOCKS, lp->misc_clks); 2834 + ret = devm_clk_bulk_get_optional_enable(&pdev->dev, XAE_NUM_MISC_CLOCKS, 2835 + lp->misc_clks); 2840 2836 if (ret) 2841 - goto cleanup_clk; 2842 - 2843 - ret = clk_bulk_prepare_enable(XAE_NUM_MISC_CLOCKS, lp->misc_clks); 2844 - if (ret) 2845 - goto cleanup_clk; 2837 + return dev_err_probe(&pdev->dev, ret, 2838 + "could not get/enable misc. clocks\n"); 2846 2839 2847 2840 /* Map device registers */ 2848 2841 lp->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &ethres); 2849 - if (IS_ERR(lp->regs)) { 2850 - ret = PTR_ERR(lp->regs); 2851 - goto cleanup_clk; 2852 - } 2842 + if (IS_ERR(lp->regs)) 2843 + return PTR_ERR(lp->regs); 2853 2844 lp->regs_start = ethres->start; 2854 2845 2855 2846 /* Setup checksum offload, but default to off if not specified */ ··· 2909 2918 lp->phy_mode = PHY_INTERFACE_MODE_1000BASEX; 2910 2919 break; 2911 2920 default: 2912 - ret = -EINVAL; 2913 - goto cleanup_clk; 2921 + return -EINVAL; 2914 2922 } 2915 2923 } else { 2916 2924 ret = of_get_phy_mode(pdev->dev.of_node, &lp->phy_mode); 2917 2925 if (ret) 2918 - goto cleanup_clk; 2926 + return ret; 2919 2927 } 2920 2928 if (lp->switch_x_sgmii && lp->phy_mode != PHY_INTERFACE_MODE_SGMII && 2921 2929 lp->phy_mode != PHY_INTERFACE_MODE_1000BASEX) { 2922 2930 dev_err(&pdev->dev, "xlnx,switch-x-sgmii only supported with SGMII or 1000BaseX\n"); 2923 - ret = -EINVAL; 2924 - goto cleanup_clk; 2931 + return -EINVAL; 2925 2932 } 2926 2933 2927 2934 if (!of_property_present(pdev->dev.of_node, "dmas")) { ··· 2934 2945 dev_err(&pdev->dev, 2935 2946 "unable to get DMA resource\n"); 2936 2947 of_node_put(np); 2937 - goto cleanup_clk; 2948 + return ret; 2938 2949 } 2939 2950 lp->dma_regs = devm_ioremap_resource(&pdev->dev, 2940 2951 &dmares); ··· 2951 2962 } 2952 2963 if (IS_ERR(lp->dma_regs)) { 2953 2964 dev_err(&pdev->dev, "could not map DMA regs\n"); 2954 - ret = PTR_ERR(lp->dma_regs); 2955 - goto cleanup_clk; 2965 + return PTR_ERR(lp->dma_regs); 2956 2966 } 2957 2967 if (lp->rx_irq <= 0 || lp->tx_irq <= 0) { 2958 2968 dev_err(&pdev->dev, "could not determine irqs\n"); 2959 - ret = -ENOMEM; 2960 - goto cleanup_clk; 2969 + return -ENOMEM; 2961 2970 } 2962 2971 2963 2972 /* Reset core now that clocks are enabled, prior to accessing MDIO */ 2964 2973 ret = __axienet_device_reset(lp); 2965 2974 if (ret) 2966 - goto cleanup_clk; 2975 + return ret; 2967 2976 2968 2977 /* Autodetect the need for 64-bit DMA pointers. 2969 2978 * When the IP is configured for a bus width bigger than 32 bits, ··· 2988 3001 } 2989 3002 if (!IS_ENABLED(CONFIG_64BIT) && lp->features & XAE_FEATURE_DMA_64BIT) { 2990 3003 dev_err(&pdev->dev, "64-bit addressable DMA is not compatible with 32-bit architecture\n"); 2991 - ret = -EINVAL; 2992 - goto cleanup_clk; 3004 + return -EINVAL; 2993 3005 } 2994 3006 2995 3007 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(addr_width)); 2996 3008 if (ret) { 2997 3009 dev_err(&pdev->dev, "No suitable DMA available\n"); 2998 - goto cleanup_clk; 3010 + return ret; 2999 3011 } 3000 3012 netif_napi_add(ndev, &lp->napi_rx, axienet_rx_poll); 3001 3013 netif_napi_add(ndev, &lp->napi_tx, axienet_tx_poll); ··· 3004 3018 3005 3019 lp->eth_irq = platform_get_irq_optional(pdev, 0); 3006 3020 if (lp->eth_irq < 0 && lp->eth_irq != -ENXIO) { 3007 - ret = lp->eth_irq; 3008 - goto cleanup_clk; 3021 + return lp->eth_irq; 3009 3022 } 3010 3023 tx_chan = dma_request_chan(lp->dev, "tx_chan0"); 3011 - if (IS_ERR(tx_chan)) { 3012 - ret = PTR_ERR(tx_chan); 3013 - dev_err_probe(lp->dev, ret, "No Ethernet DMA (TX) channel found\n"); 3014 - goto cleanup_clk; 3015 - } 3024 + if (IS_ERR(tx_chan)) 3025 + return dev_err_probe(lp->dev, PTR_ERR(tx_chan), 3026 + "No Ethernet DMA (TX) channel found\n"); 3016 3027 3017 3028 cfg.reset = 1; 3018 3029 /* As name says VDMA but it has support for DMA channel reset */ ··· 3017 3034 if (ret < 0) { 3018 3035 dev_err(&pdev->dev, "Reset channel failed\n"); 3019 3036 dma_release_channel(tx_chan); 3020 - goto cleanup_clk; 3037 + return ret; 3021 3038 } 3022 3039 3023 3040 dma_release_channel(tx_chan); ··· 3122 3139 put_device(&lp->pcs_phy->dev); 3123 3140 if (lp->mii_bus) 3124 3141 axienet_mdio_teardown(lp); 3125 - cleanup_clk: 3126 - clk_bulk_disable_unprepare(XAE_NUM_MISC_CLOCKS, lp->misc_clks); 3127 - clk_disable_unprepare(lp->axi_clk); 3128 - 3129 - free_netdev: 3130 - free_netdev(ndev); 3131 - 3132 3142 return ret; 3133 3143 } 3134 3144 ··· 3139 3163 put_device(&lp->pcs_phy->dev); 3140 3164 3141 3165 axienet_mdio_teardown(lp); 3142 - 3143 - clk_bulk_disable_unprepare(XAE_NUM_MISC_CLOCKS, lp->misc_clks); 3144 - clk_disable_unprepare(lp->axi_clk); 3145 - 3146 - free_netdev(ndev); 3147 3166 } 3148 3167 3149 3168 static void axienet_shutdown(struct platform_device *pdev)
+23
include/linux/clk.h
··· 479 479 int __must_check devm_clk_bulk_get_optional(struct device *dev, int num_clks, 480 480 struct clk_bulk_data *clks); 481 481 /** 482 + * devm_clk_bulk_get_optional_enable - Get and enable optional bulk clocks (managed) 483 + * @dev: device for clock "consumer" 484 + * @num_clks: the number of clk_bulk_data 485 + * @clks: pointer to the clk_bulk_data table of consumer 486 + * 487 + * Behaves the same as devm_clk_bulk_get_optional() but also prepares and enables 488 + * the clocks in one operation with management. The clks will automatically be 489 + * disabled, unprepared and freed when the device is unbound. 490 + * 491 + * Return: 0 if all clocks specified in clk_bulk_data table are obtained 492 + * and enabled successfully, or for any clk there was no clk provider available. 493 + * Otherwise returns valid IS_ERR() condition containing errno. 494 + */ 495 + int __must_check devm_clk_bulk_get_optional_enable(struct device *dev, int num_clks, 496 + struct clk_bulk_data *clks); 497 + /** 482 498 * devm_clk_bulk_get_all - managed get multiple clk consumers 483 499 * @dev: device for clock "consumer" 484 500 * @clks: pointer to the clk_bulk_data table of consumer ··· 1041 1025 1042 1026 static inline int __must_check devm_clk_bulk_get_optional(struct device *dev, 1043 1027 int num_clks, struct clk_bulk_data *clks) 1028 + { 1029 + return 0; 1030 + } 1031 + 1032 + static inline int __must_check devm_clk_bulk_get_optional_enable(struct device *dev, 1033 + int num_clks, 1034 + struct clk_bulk_data *clks) 1044 1035 { 1045 1036 return 0; 1046 1037 }