Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 * linux/include/linux/clk.h
4 *
5 * Copyright (C) 2004 ARM Limited.
6 * Written by Deep Blue Solutions Limited.
7 * Copyright (C) 2011-2012 Linaro Ltd <mturquette@linaro.org>
8 */
9#ifndef __LINUX_CLK_H
10#define __LINUX_CLK_H
11
12#include <linux/err.h>
13#include <linux/kernel.h>
14#include <linux/notifier.h>
15
16struct device;
17struct clk;
18struct device_node;
19struct of_phandle_args;
20
21/**
22 * DOC: clk notifier callback types
23 *
24 * PRE_RATE_CHANGE - called immediately before the clk rate is changed,
25 * to indicate that the rate change will proceed. Drivers must
26 * immediately terminate any operations that will be affected by the
27 * rate change. Callbacks may either return NOTIFY_DONE, NOTIFY_OK,
28 * NOTIFY_STOP or NOTIFY_BAD.
29 *
30 * ABORT_RATE_CHANGE: called if the rate change failed for some reason
31 * after PRE_RATE_CHANGE. In this case, all registered notifiers on
32 * the clk will be called with ABORT_RATE_CHANGE. Callbacks must
33 * always return NOTIFY_DONE or NOTIFY_OK.
34 *
35 * POST_RATE_CHANGE - called after the clk rate change has successfully
36 * completed. Callbacks must always return NOTIFY_DONE or NOTIFY_OK.
37 *
38 */
39#define PRE_RATE_CHANGE BIT(0)
40#define POST_RATE_CHANGE BIT(1)
41#define ABORT_RATE_CHANGE BIT(2)
42
43/**
44 * struct clk_notifier - associate a clk with a notifier
45 * @clk: struct clk * to associate the notifier with
46 * @notifier_head: a blocking_notifier_head for this clk
47 * @node: linked list pointers
48 *
49 * A list of struct clk_notifier is maintained by the notifier code.
50 * An entry is created whenever code registers the first notifier on a
51 * particular @clk. Future notifiers on that @clk are added to the
52 * @notifier_head.
53 */
54struct clk_notifier {
55 struct clk *clk;
56 struct srcu_notifier_head notifier_head;
57 struct list_head node;
58};
59
60/**
61 * struct clk_notifier_data - rate data to pass to the notifier callback
62 * @clk: struct clk * being changed
63 * @old_rate: previous rate of this clk
64 * @new_rate: new rate of this clk
65 *
66 * For a pre-notifier, old_rate is the clk's rate before this rate
67 * change, and new_rate is what the rate will be in the future. For a
68 * post-notifier, old_rate and new_rate are both set to the clk's
69 * current rate (this was done to optimize the implementation).
70 */
71struct clk_notifier_data {
72 struct clk *clk;
73 unsigned long old_rate;
74 unsigned long new_rate;
75};
76
77/**
78 * struct clk_bulk_data - Data used for bulk clk operations.
79 *
80 * @id: clock consumer ID
81 * @clk: struct clk * to store the associated clock
82 *
83 * The CLK APIs provide a series of clk_bulk_() API calls as
84 * a convenience to consumers which require multiple clks. This
85 * structure is used to manage data for these calls.
86 */
87struct clk_bulk_data {
88 const char *id;
89 struct clk *clk;
90};
91
92#ifdef CONFIG_COMMON_CLK
93
94/**
95 * clk_notifier_register - register a clock rate-change notifier callback
96 * @clk: clock whose rate we are interested in
97 * @nb: notifier block with callback function pointer
98 *
99 * ProTip: debugging across notifier chains can be frustrating. Make sure that
100 * your notifier callback function prints a nice big warning in case of
101 * failure.
102 */
103int clk_notifier_register(struct clk *clk, struct notifier_block *nb);
104
105/**
106 * clk_notifier_unregister - unregister a clock rate-change notifier callback
107 * @clk: clock whose rate we are no longer interested in
108 * @nb: notifier block which will be unregistered
109 */
110int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb);
111
112/**
113 * devm_clk_notifier_register - register a managed rate-change notifier callback
114 * @dev: device for clock "consumer"
115 * @clk: clock whose rate we are interested in
116 * @nb: notifier block with callback function pointer
117 *
118 * Returns 0 on success, -EERROR otherwise
119 */
120int devm_clk_notifier_register(struct device *dev, struct clk *clk,
121 struct notifier_block *nb);
122
123/**
124 * clk_get_accuracy - obtain the clock accuracy in ppb (parts per billion)
125 * for a clock source.
126 * @clk: clock source
127 *
128 * This gets the clock source accuracy expressed in ppb.
129 * A perfect clock returns 0.
130 */
131long clk_get_accuracy(struct clk *clk);
132
133/**
134 * clk_set_phase - adjust the phase shift of a clock signal
135 * @clk: clock signal source
136 * @degrees: number of degrees the signal is shifted
137 *
138 * Shifts the phase of a clock signal by the specified degrees. Returns 0 on
139 * success, -EERROR otherwise.
140 */
141int clk_set_phase(struct clk *clk, int degrees);
142
143/**
144 * clk_get_phase - return the phase shift of a clock signal
145 * @clk: clock signal source
146 *
147 * Returns the phase shift of a clock node in degrees, otherwise returns
148 * -EERROR.
149 */
150int clk_get_phase(struct clk *clk);
151
152/**
153 * clk_set_duty_cycle - adjust the duty cycle ratio of a clock signal
154 * @clk: clock signal source
155 * @num: numerator of the duty cycle ratio to be applied
156 * @den: denominator of the duty cycle ratio to be applied
157 *
158 * Adjust the duty cycle of a clock signal by the specified ratio. Returns 0 on
159 * success, -EERROR otherwise.
160 */
161int clk_set_duty_cycle(struct clk *clk, unsigned int num, unsigned int den);
162
163/**
164 * clk_get_scaled_duty_cycle - return the duty cycle ratio of a clock signal
165 * @clk: clock signal source
166 * @scale: scaling factor to be applied to represent the ratio as an integer
167 *
168 * Returns the duty cycle ratio multiplied by the scale provided, otherwise
169 * returns -EERROR.
170 */
171int clk_get_scaled_duty_cycle(struct clk *clk, unsigned int scale);
172
173/**
174 * clk_is_match - check if two clk's point to the same hardware clock
175 * @p: clk compared against q
176 * @q: clk compared against p
177 *
178 * Returns true if the two struct clk pointers both point to the same hardware
179 * clock node. Put differently, returns true if @p and @q
180 * share the same &struct clk_core object.
181 *
182 * Returns false otherwise. Note that two NULL clks are treated as matching.
183 */
184bool clk_is_match(const struct clk *p, const struct clk *q);
185
186/**
187 * clk_rate_exclusive_get - get exclusivity over the rate control of a
188 * producer
189 * @clk: clock source
190 *
191 * This function allows drivers to get exclusive control over the rate of a
192 * provider. It prevents any other consumer to execute, even indirectly,
193 * opereation which could alter the rate of the provider or cause glitches
194 *
195 * If exlusivity is claimed more than once on clock, even by the same driver,
196 * the rate effectively gets locked as exclusivity can't be preempted.
197 *
198 * Must not be called from within atomic context.
199 *
200 * Returns success (0) or negative errno.
201 */
202int clk_rate_exclusive_get(struct clk *clk);
203
204/**
205 * devm_clk_rate_exclusive_get - devm variant of clk_rate_exclusive_get
206 * @dev: device the exclusivity is bound to
207 * @clk: clock source
208 *
209 * Calls clk_rate_exclusive_get() on @clk and registers a devm cleanup handler
210 * on @dev to call clk_rate_exclusive_put().
211 *
212 * Must not be called from within atomic context.
213 */
214int devm_clk_rate_exclusive_get(struct device *dev, struct clk *clk);
215
216/**
217 * clk_rate_exclusive_put - release exclusivity over the rate control of a
218 * producer
219 * @clk: clock source
220 *
221 * This function allows drivers to release the exclusivity it previously got
222 * from clk_rate_exclusive_get()
223 *
224 * The caller must balance the number of clk_rate_exclusive_get() and
225 * clk_rate_exclusive_put() calls.
226 *
227 * Must not be called from within atomic context.
228 */
229void clk_rate_exclusive_put(struct clk *clk);
230
231/**
232 * clk_save_context - save clock context for poweroff
233 *
234 * Saves the context of the clock register for powerstates in which the
235 * contents of the registers will be lost. Occurs deep within the suspend
236 * code so locking is not necessary.
237 */
238int clk_save_context(void);
239
240/**
241 * clk_restore_context - restore clock context after poweroff
242 *
243 * This occurs with all clocks enabled. Occurs deep within the resume code
244 * so locking is not necessary.
245 */
246void clk_restore_context(void);
247
248#else /* !CONFIG_COMMON_CLK */
249
250static inline int clk_notifier_register(struct clk *clk,
251 struct notifier_block *nb)
252{
253 return -ENOTSUPP;
254}
255
256static inline int clk_notifier_unregister(struct clk *clk,
257 struct notifier_block *nb)
258{
259 return -ENOTSUPP;
260}
261
262static inline int devm_clk_notifier_register(struct device *dev,
263 struct clk *clk,
264 struct notifier_block *nb)
265{
266 return -ENOTSUPP;
267}
268
269static inline long clk_get_accuracy(struct clk *clk)
270{
271 return -ENOTSUPP;
272}
273
274static inline long clk_set_phase(struct clk *clk, int phase)
275{
276 return -ENOTSUPP;
277}
278
279static inline long clk_get_phase(struct clk *clk)
280{
281 return -ENOTSUPP;
282}
283
284static inline int clk_set_duty_cycle(struct clk *clk, unsigned int num,
285 unsigned int den)
286{
287 return -ENOTSUPP;
288}
289
290static inline unsigned int clk_get_scaled_duty_cycle(struct clk *clk,
291 unsigned int scale)
292{
293 return 0;
294}
295
296static inline bool clk_is_match(const struct clk *p, const struct clk *q)
297{
298 return p == q;
299}
300
301static inline int clk_rate_exclusive_get(struct clk *clk)
302{
303 return 0;
304}
305
306static inline int devm_clk_rate_exclusive_get(struct device *dev, struct clk *clk)
307{
308 return 0;
309}
310
311static inline void clk_rate_exclusive_put(struct clk *clk) {}
312
313static inline int clk_save_context(void)
314{
315 return 0;
316}
317
318static inline void clk_restore_context(void) {}
319
320#endif /* !CONFIG_COMMON_CLK */
321
322#ifdef CONFIG_HAVE_CLK_PREPARE
323/**
324 * clk_prepare - prepare a clock source
325 * @clk: clock source
326 *
327 * This prepares the clock source for use.
328 *
329 * Must not be called from within atomic context.
330 */
331int clk_prepare(struct clk *clk);
332
333/**
334 * clk_unprepare - undo preparation of a clock source
335 * @clk: clock source
336 *
337 * This undoes a previously prepared clock. The caller must balance
338 * the number of prepare and unprepare calls.
339 *
340 * Must not be called from within atomic context.
341 */
342void clk_unprepare(struct clk *clk);
343
344int __must_check clk_bulk_prepare(int num_clks,
345 const struct clk_bulk_data *clks);
346void clk_bulk_unprepare(int num_clks, const struct clk_bulk_data *clks);
347
348/**
349 * clk_is_enabled_when_prepared - indicate if preparing a clock also enables it.
350 * @clk: clock source
351 *
352 * Returns true if clk_prepare() implicitly enables the clock, effectively
353 * making clk_enable()/clk_disable() no-ops, false otherwise.
354 *
355 * This is of interest mainly to the power management code where actually
356 * disabling the clock also requires unpreparing it to have any material
357 * effect.
358 *
359 * Regardless of the value returned here, the caller must always invoke
360 * clk_enable() or clk_prepare_enable() and counterparts for usage counts
361 * to be right.
362 */
363bool clk_is_enabled_when_prepared(struct clk *clk);
364#else /* !CONFIG_HAVE_CLK_PREPARE */
365static inline int clk_prepare(struct clk *clk)
366{
367 might_sleep();
368 return 0;
369}
370
371static inline void clk_unprepare(struct clk *clk)
372{
373 might_sleep();
374}
375
376static inline int __must_check
377clk_bulk_prepare(int num_clks, const struct clk_bulk_data *clks)
378{
379 might_sleep();
380 return 0;
381}
382
383static inline void clk_bulk_unprepare(int num_clks,
384 const struct clk_bulk_data *clks)
385{
386 might_sleep();
387}
388
389static inline bool clk_is_enabled_when_prepared(struct clk *clk)
390{
391 return false;
392}
393#endif /* !CONFIG_HAVE_CLK_PREPARE */
394
395#ifdef CONFIG_HAVE_CLK
396/**
397 * clk_get - lookup and obtain a reference to a clock producer.
398 * @dev: device for clock "consumer"
399 * @id: clock consumer ID
400 *
401 * Returns a struct clk corresponding to the clock producer, or
402 * valid IS_ERR() condition containing errno. The implementation
403 * uses @dev and @id to determine the clock consumer, and thereby
404 * the clock producer. (IOW, @id may be identical strings, but
405 * clk_get may return different clock producers depending on @dev.)
406 *
407 * Drivers must assume that the clock source is not enabled.
408 *
409 * clk_get should not be called from within interrupt context.
410 */
411struct clk *clk_get(struct device *dev, const char *id);
412
413/**
414 * clk_bulk_get - lookup and obtain a number of references to clock producer.
415 * @dev: device for clock "consumer"
416 * @num_clks: the number of clk_bulk_data
417 * @clks: the clk_bulk_data table of consumer
418 *
419 * This helper function allows drivers to get several clk consumers in one
420 * operation. If any of the clk cannot be acquired then any clks
421 * that were obtained will be freed before returning to the caller.
422 *
423 * Returns 0 if all clocks specified in clk_bulk_data table are obtained
424 * successfully, or valid IS_ERR() condition containing errno.
425 * The implementation uses @dev and @clk_bulk_data.id to determine the
426 * clock consumer, and thereby the clock producer.
427 * The clock returned is stored in each @clk_bulk_data.clk field.
428 *
429 * Drivers must assume that the clock source is not enabled.
430 *
431 * clk_bulk_get should not be called from within interrupt context.
432 */
433int __must_check clk_bulk_get(struct device *dev, int num_clks,
434 struct clk_bulk_data *clks);
435/**
436 * clk_bulk_get_all - lookup and obtain all available references to clock
437 * producer.
438 * @dev: device for clock "consumer"
439 * @clks: pointer to the clk_bulk_data table of consumer
440 *
441 * This helper function allows drivers to get all clk consumers in one
442 * operation. If any of the clk cannot be acquired then any clks
443 * that were obtained will be freed before returning to the caller.
444 *
445 * Returns a positive value for the number of clocks obtained while the
446 * clock references are stored in the clk_bulk_data table in @clks field.
447 * Returns 0 if there're none and a negative value if something failed.
448 *
449 * Drivers must assume that the clock source is not enabled.
450 *
451 * clk_bulk_get should not be called from within interrupt context.
452 */
453int __must_check clk_bulk_get_all(struct device *dev,
454 struct clk_bulk_data **clks);
455
456/**
457 * clk_bulk_get_optional - lookup and obtain a number of references to clock producer
458 * @dev: device for clock "consumer"
459 * @num_clks: the number of clk_bulk_data
460 * @clks: the clk_bulk_data table of consumer
461 *
462 * Behaves the same as clk_bulk_get() except where there is no clock producer.
463 * In this case, instead of returning -ENOENT, the function returns 0 and
464 * NULL for a clk for which a clock producer could not be determined.
465 */
466int __must_check clk_bulk_get_optional(struct device *dev, int num_clks,
467 struct clk_bulk_data *clks);
468/**
469 * devm_clk_bulk_get - managed get multiple clk consumers
470 * @dev: device for clock "consumer"
471 * @num_clks: the number of clk_bulk_data
472 * @clks: the clk_bulk_data table of consumer
473 *
474 * Return 0 on success, an errno on failure.
475 *
476 * This helper function allows drivers to get several clk
477 * consumers in one operation with management, the clks will
478 * automatically be freed when the device is unbound.
479 */
480int __must_check devm_clk_bulk_get(struct device *dev, int num_clks,
481 struct clk_bulk_data *clks);
482/**
483 * devm_clk_bulk_get_optional - managed get multiple optional consumer clocks
484 * @dev: device for clock "consumer"
485 * @num_clks: the number of clk_bulk_data
486 * @clks: pointer to the clk_bulk_data table of consumer
487 *
488 * Behaves the same as devm_clk_bulk_get() except where there is no clock
489 * producer. In this case, instead of returning -ENOENT, the function returns
490 * NULL for given clk. It is assumed all clocks in clk_bulk_data are optional.
491 *
492 * Returns 0 if all clocks specified in clk_bulk_data table are obtained
493 * successfully or for any clk there was no clk provider available, otherwise
494 * returns valid IS_ERR() condition containing errno.
495 * The implementation uses @dev and @clk_bulk_data.id to determine the
496 * clock consumer, and thereby the clock producer.
497 * The clock returned is stored in each @clk_bulk_data.clk field.
498 *
499 * Drivers must assume that the clock source is not enabled.
500 *
501 * clk_bulk_get should not be called from within interrupt context.
502 */
503int __must_check devm_clk_bulk_get_optional(struct device *dev, int num_clks,
504 struct clk_bulk_data *clks);
505/**
506 * devm_clk_bulk_get_optional_enable - Get and enable optional bulk clocks (managed)
507 * @dev: device for clock "consumer"
508 * @num_clks: the number of clk_bulk_data
509 * @clks: pointer to the clk_bulk_data table of consumer
510 *
511 * Behaves the same as devm_clk_bulk_get_optional() but also prepares and enables
512 * the clocks in one operation with management. The clks will automatically be
513 * disabled, unprepared and freed when the device is unbound.
514 *
515 * Return: 0 if all clocks specified in clk_bulk_data table are obtained
516 * and enabled successfully, or for any clk there was no clk provider available.
517 * Otherwise returns valid IS_ERR() condition containing errno.
518 */
519int __must_check devm_clk_bulk_get_optional_enable(struct device *dev, int num_clks,
520 struct clk_bulk_data *clks);
521/**
522 * devm_clk_bulk_get_all - managed get multiple clk consumers
523 * @dev: device for clock "consumer"
524 * @clks: pointer to the clk_bulk_data table of consumer
525 *
526 * Returns a positive value for the number of clocks obtained while the
527 * clock references are stored in the clk_bulk_data table in @clks field.
528 * Returns 0 if there're none and a negative value if something failed.
529 *
530 * This helper function allows drivers to get several clk
531 * consumers in one operation with management, the clks will
532 * automatically be freed when the device is unbound.
533 */
534
535int __must_check devm_clk_bulk_get_all(struct device *dev,
536 struct clk_bulk_data **clks);
537
538/**
539 * devm_clk_bulk_get_all_enabled - Get and enable all clocks of the consumer (managed)
540 * @dev: device for clock "consumer"
541 * @clks: pointer to the clk_bulk_data table of consumer
542 *
543 * Returns a positive value for the number of clocks obtained while the
544 * clock references are stored in the clk_bulk_data table in @clks field.
545 * Returns 0 if there're none and a negative value if something failed.
546 *
547 * This helper function allows drivers to get all clocks of the
548 * consumer and enables them in one operation with management.
549 * The clks will automatically be disabled and freed when the device
550 * is unbound.
551 */
552
553int __must_check devm_clk_bulk_get_all_enabled(struct device *dev,
554 struct clk_bulk_data **clks);
555
556/**
557 * devm_clk_get - lookup and obtain a managed reference to a clock producer.
558 * @dev: device for clock "consumer"
559 * @id: clock consumer ID
560 *
561 * Context: May sleep.
562 *
563 * Return: a struct clk corresponding to the clock producer, or
564 * valid IS_ERR() condition containing errno. The implementation
565 * uses @dev and @id to determine the clock consumer, and thereby
566 * the clock producer. (IOW, @id may be identical strings, but
567 * clk_get may return different clock producers depending on @dev.)
568 *
569 * Drivers must assume that the clock source is neither prepared nor
570 * enabled.
571 *
572 * The clock will automatically be freed when the device is unbound
573 * from the bus.
574 */
575struct clk *devm_clk_get(struct device *dev, const char *id);
576
577/**
578 * devm_clk_get_prepared - devm_clk_get() + clk_prepare()
579 * @dev: device for clock "consumer"
580 * @id: clock consumer ID
581 *
582 * Context: May sleep.
583 *
584 * Return: a struct clk corresponding to the clock producer, or
585 * valid IS_ERR() condition containing errno. The implementation
586 * uses @dev and @id to determine the clock consumer, and thereby
587 * the clock producer. (IOW, @id may be identical strings, but
588 * clk_get may return different clock producers depending on @dev.)
589 *
590 * The returned clk (if valid) is prepared. Drivers must however assume
591 * that the clock is not enabled.
592 *
593 * The clock will automatically be unprepared and freed when the device
594 * is unbound from the bus.
595 */
596struct clk *devm_clk_get_prepared(struct device *dev, const char *id);
597
598/**
599 * devm_clk_get_enabled - devm_clk_get() + clk_prepare_enable()
600 * @dev: device for clock "consumer"
601 * @id: clock consumer ID
602 *
603 * Context: May sleep.
604 *
605 * Return: a struct clk corresponding to the clock producer, or
606 * valid IS_ERR() condition containing errno. The implementation
607 * uses @dev and @id to determine the clock consumer, and thereby
608 * the clock producer. (IOW, @id may be identical strings, but
609 * clk_get may return different clock producers depending on @dev.)
610 *
611 * The returned clk (if valid) is prepared and enabled.
612 *
613 * The clock will automatically be disabled, unprepared and freed
614 * when the device is unbound from the bus.
615 */
616struct clk *devm_clk_get_enabled(struct device *dev, const char *id);
617
618/**
619 * devm_clk_get_optional - lookup and obtain a managed reference to an optional
620 * clock producer.
621 * @dev: device for clock "consumer"
622 * @id: clock consumer ID
623 *
624 * Context: May sleep.
625 *
626 * Return: a struct clk corresponding to the clock producer, or
627 * valid IS_ERR() condition containing errno. The implementation
628 * uses @dev and @id to determine the clock consumer, and thereby
629 * the clock producer. If no such clk is found, it returns NULL
630 * which serves as a dummy clk. That's the only difference compared
631 * to devm_clk_get().
632 *
633 * Drivers must assume that the clock source is neither prepared nor
634 * enabled.
635 *
636 * The clock will automatically be freed when the device is unbound
637 * from the bus.
638 */
639struct clk *devm_clk_get_optional(struct device *dev, const char *id);
640
641/**
642 * devm_clk_get_optional_prepared - devm_clk_get_optional() + clk_prepare()
643 * @dev: device for clock "consumer"
644 * @id: clock consumer ID
645 *
646 * Context: May sleep.
647 *
648 * Return: a struct clk corresponding to the clock producer, or
649 * valid IS_ERR() condition containing errno. The implementation
650 * uses @dev and @id to determine the clock consumer, and thereby
651 * the clock producer. If no such clk is found, it returns NULL
652 * which serves as a dummy clk. That's the only difference compared
653 * to devm_clk_get_prepared().
654 *
655 * The returned clk (if valid) is prepared. Drivers must however
656 * assume that the clock is not enabled.
657 *
658 * The clock will automatically be unprepared and freed when the
659 * device is unbound from the bus.
660 */
661struct clk *devm_clk_get_optional_prepared(struct device *dev, const char *id);
662
663/**
664 * devm_clk_get_optional_enabled - devm_clk_get_optional() +
665 * clk_prepare_enable()
666 * @dev: device for clock "consumer"
667 * @id: clock consumer ID
668 *
669 * Context: May sleep.
670 *
671 * Return: a struct clk corresponding to the clock producer, or
672 * valid IS_ERR() condition containing errno. The implementation
673 * uses @dev and @id to determine the clock consumer, and thereby
674 * the clock producer. If no such clk is found, it returns NULL
675 * which serves as a dummy clk. That's the only difference compared
676 * to devm_clk_get_enabled().
677 *
678 * The returned clk (if valid) is prepared and enabled.
679 *
680 * The clock will automatically be disabled, unprepared and freed
681 * when the device is unbound from the bus.
682 */
683struct clk *devm_clk_get_optional_enabled(struct device *dev, const char *id);
684
685/**
686 * devm_clk_get_optional_enabled_with_rate - devm_clk_get_optional() +
687 * clk_set_rate() +
688 * clk_prepare_enable()
689 * @dev: device for clock "consumer"
690 * @id: clock consumer ID
691 * @rate: new clock rate
692 *
693 * Context: May sleep.
694 *
695 * Return: a struct clk corresponding to the clock producer, or
696 * valid IS_ERR() condition containing errno. The implementation
697 * uses @dev and @id to determine the clock consumer, and thereby
698 * the clock producer. If no such clk is found, it returns NULL
699 * which serves as a dummy clk. That's the only difference compared
700 * to devm_clk_get_enabled().
701 *
702 * The returned clk (if valid) is prepared and enabled and rate was set.
703 *
704 * The clock will automatically be disabled, unprepared and freed
705 * when the device is unbound from the bus.
706 */
707struct clk *devm_clk_get_optional_enabled_with_rate(struct device *dev,
708 const char *id,
709 unsigned long rate);
710
711/**
712 * devm_get_clk_from_child - lookup and obtain a managed reference to a
713 * clock producer from child node.
714 * @dev: device for clock "consumer"
715 * @np: pointer to clock consumer node
716 * @con_id: clock consumer ID
717 *
718 * This function parses the clocks, and uses them to look up the
719 * struct clk from the registered list of clock providers by using
720 * @np and @con_id
721 *
722 * The clock will automatically be freed when the device is unbound
723 * from the bus.
724 */
725struct clk *devm_get_clk_from_child(struct device *dev,
726 struct device_node *np, const char *con_id);
727
728/**
729 * clk_enable - inform the system when the clock source should be running.
730 * @clk: clock source
731 *
732 * If the clock can not be enabled/disabled, this should return success.
733 *
734 * May be called from atomic contexts.
735 *
736 * Returns success (0) or negative errno.
737 */
738int clk_enable(struct clk *clk);
739
740/**
741 * clk_bulk_enable - inform the system when the set of clks should be running.
742 * @num_clks: the number of clk_bulk_data
743 * @clks: the clk_bulk_data table of consumer
744 *
745 * May be called from atomic contexts.
746 *
747 * Returns success (0) or negative errno.
748 */
749int __must_check clk_bulk_enable(int num_clks,
750 const struct clk_bulk_data *clks);
751
752/**
753 * clk_disable - inform the system when the clock source is no longer required.
754 * @clk: clock source
755 *
756 * Inform the system that a clock source is no longer required by
757 * a driver and may be shut down.
758 *
759 * May be called from atomic contexts.
760 *
761 * Implementation detail: if the clock source is shared between
762 * multiple drivers, clk_enable() calls must be balanced by the
763 * same number of clk_disable() calls for the clock source to be
764 * disabled.
765 */
766void clk_disable(struct clk *clk);
767
768/**
769 * clk_bulk_disable - inform the system when the set of clks is no
770 * longer required.
771 * @num_clks: the number of clk_bulk_data
772 * @clks: the clk_bulk_data table of consumer
773 *
774 * Inform the system that a set of clks is no longer required by
775 * a driver and may be shut down.
776 *
777 * May be called from atomic contexts.
778 *
779 * Implementation detail: if the set of clks is shared between
780 * multiple drivers, clk_bulk_enable() calls must be balanced by the
781 * same number of clk_bulk_disable() calls for the clock source to be
782 * disabled.
783 */
784void clk_bulk_disable(int num_clks, const struct clk_bulk_data *clks);
785
786/**
787 * clk_get_rate - obtain the current clock rate (in Hz) for a clock source.
788 * This is only valid once the clock source has been enabled.
789 * @clk: clock source
790 */
791unsigned long clk_get_rate(struct clk *clk);
792
793/**
794 * clk_put - "free" the clock source
795 * @clk: clock source
796 *
797 * Note: drivers must ensure that all clk_enable calls made on this
798 * clock source are balanced by clk_disable calls prior to calling
799 * this function.
800 *
801 * clk_put should not be called from within interrupt context.
802 */
803void clk_put(struct clk *clk);
804
805/**
806 * clk_bulk_put - "free" the clock source
807 * @num_clks: the number of clk_bulk_data
808 * @clks: the clk_bulk_data table of consumer
809 *
810 * Note: drivers must ensure that all clk_bulk_enable calls made on this
811 * clock source are balanced by clk_bulk_disable calls prior to calling
812 * this function.
813 *
814 * clk_bulk_put should not be called from within interrupt context.
815 */
816void clk_bulk_put(int num_clks, struct clk_bulk_data *clks);
817
818/**
819 * clk_bulk_put_all - "free" all the clock source
820 * @num_clks: the number of clk_bulk_data
821 * @clks: the clk_bulk_data table of consumer
822 *
823 * Note: drivers must ensure that all clk_bulk_enable calls made on this
824 * clock source are balanced by clk_bulk_disable calls prior to calling
825 * this function.
826 *
827 * clk_bulk_put_all should not be called from within interrupt context.
828 */
829void clk_bulk_put_all(int num_clks, struct clk_bulk_data *clks);
830
831/**
832 * devm_clk_put - "free" a managed clock source
833 * @dev: device used to acquire the clock
834 * @clk: clock source acquired with devm_clk_get()
835 *
836 * Note: drivers must ensure that all clk_enable calls made on this
837 * clock source are balanced by clk_disable calls prior to calling
838 * this function.
839 *
840 * clk_put should not be called from within interrupt context.
841 */
842void devm_clk_put(struct device *dev, struct clk *clk);
843
844/*
845 * The remaining APIs are optional for machine class support.
846 */
847
848
849/**
850 * clk_round_rate - adjust a rate to the exact rate a clock can provide
851 * @clk: clock source
852 * @rate: desired clock rate in Hz
853 *
854 * This answers the question "if I were to pass @rate to clk_set_rate(),
855 * what clock rate would I end up with?" without changing the hardware
856 * in any way. In other words:
857 *
858 * rate = clk_round_rate(clk, r);
859 *
860 * and:
861 *
862 * clk_set_rate(clk, r);
863 * rate = clk_get_rate(clk);
864 *
865 * are equivalent except the former does not modify the clock hardware
866 * in any way.
867 *
868 * Returns rounded clock rate in Hz, or negative errno.
869 */
870long clk_round_rate(struct clk *clk, unsigned long rate);
871
872/**
873 * clk_set_rate - set the clock rate for a clock source
874 * @clk: clock source
875 * @rate: desired clock rate in Hz
876 *
877 * Updating the rate starts at the top-most affected clock and then
878 * walks the tree down to the bottom-most clock that needs updating.
879 *
880 * Returns success (0) or negative errno.
881 */
882int clk_set_rate(struct clk *clk, unsigned long rate);
883
884/**
885 * clk_set_rate_exclusive- set the clock rate and claim exclusivity over
886 * clock source
887 * @clk: clock source
888 * @rate: desired clock rate in Hz
889 *
890 * This helper function allows drivers to atomically set the rate of a producer
891 * and claim exclusivity over the rate control of the producer.
892 *
893 * It is essentially a combination of clk_set_rate() and
894 * clk_rate_exclusite_get(). Caller must balance this call with a call to
895 * clk_rate_exclusive_put()
896 *
897 * Returns success (0) or negative errno.
898 */
899int clk_set_rate_exclusive(struct clk *clk, unsigned long rate);
900
901/**
902 * clk_has_parent - check if a clock is a possible parent for another
903 * @clk: clock source
904 * @parent: parent clock source
905 *
906 * This function can be used in drivers that need to check that a clock can be
907 * the parent of another without actually changing the parent.
908 *
909 * Returns true if @parent is a possible parent for @clk, false otherwise.
910 */
911bool clk_has_parent(const struct clk *clk, const struct clk *parent);
912
913/**
914 * clk_set_rate_range - set a rate range for a clock source
915 * @clk: clock source
916 * @min: desired minimum clock rate in Hz, inclusive
917 * @max: desired maximum clock rate in Hz, inclusive
918 *
919 * Returns success (0) or negative errno.
920 */
921int clk_set_rate_range(struct clk *clk, unsigned long min, unsigned long max);
922
923/**
924 * clk_set_min_rate - set a minimum clock rate for a clock source
925 * @clk: clock source
926 * @rate: desired minimum clock rate in Hz, inclusive
927 *
928 * Returns success (0) or negative errno.
929 */
930int clk_set_min_rate(struct clk *clk, unsigned long rate);
931
932/**
933 * clk_set_max_rate - set a maximum clock rate for a clock source
934 * @clk: clock source
935 * @rate: desired maximum clock rate in Hz, inclusive
936 *
937 * Returns success (0) or negative errno.
938 */
939int clk_set_max_rate(struct clk *clk, unsigned long rate);
940
941/**
942 * clk_set_parent - set the parent clock source for this clock
943 * @clk: clock source
944 * @parent: parent clock source
945 *
946 * Returns success (0) or negative errno.
947 */
948int clk_set_parent(struct clk *clk, struct clk *parent);
949
950/**
951 * clk_get_parent - get the parent clock source for this clock
952 * @clk: clock source
953 *
954 * Returns struct clk corresponding to parent clock source, or
955 * valid IS_ERR() condition containing errno.
956 */
957struct clk *clk_get_parent(struct clk *clk);
958
959/**
960 * clk_get_sys - get a clock based upon the device name
961 * @dev_id: device name
962 * @con_id: connection ID
963 *
964 * Returns a struct clk corresponding to the clock producer, or
965 * valid IS_ERR() condition containing errno. The implementation
966 * uses @dev_id and @con_id to determine the clock consumer, and
967 * thereby the clock producer. In contrast to clk_get() this function
968 * takes the device name instead of the device itself for identification.
969 *
970 * Drivers must assume that the clock source is not enabled.
971 *
972 * clk_get_sys should not be called from within interrupt context.
973 */
974struct clk *clk_get_sys(const char *dev_id, const char *con_id);
975
976#else /* !CONFIG_HAVE_CLK */
977
978static inline struct clk *clk_get(struct device *dev, const char *id)
979{
980 return NULL;
981}
982
983static inline int __must_check clk_bulk_get(struct device *dev, int num_clks,
984 struct clk_bulk_data *clks)
985{
986 return 0;
987}
988
989static inline int __must_check clk_bulk_get_optional(struct device *dev,
990 int num_clks, struct clk_bulk_data *clks)
991{
992 return 0;
993}
994
995static inline int __must_check clk_bulk_get_all(struct device *dev,
996 struct clk_bulk_data **clks)
997{
998 return 0;
999}
1000
1001static inline struct clk *devm_clk_get(struct device *dev, const char *id)
1002{
1003 return NULL;
1004}
1005
1006static inline struct clk *devm_clk_get_prepared(struct device *dev,
1007 const char *id)
1008{
1009 return NULL;
1010}
1011
1012static inline struct clk *devm_clk_get_enabled(struct device *dev,
1013 const char *id)
1014{
1015 return NULL;
1016}
1017
1018static inline struct clk *devm_clk_get_optional(struct device *dev,
1019 const char *id)
1020{
1021 return NULL;
1022}
1023
1024static inline struct clk *devm_clk_get_optional_prepared(struct device *dev,
1025 const char *id)
1026{
1027 return NULL;
1028}
1029
1030static inline struct clk *devm_clk_get_optional_enabled(struct device *dev,
1031 const char *id)
1032{
1033 return NULL;
1034}
1035
1036static inline struct clk *
1037devm_clk_get_optional_enabled_with_rate(struct device *dev, const char *id,
1038 unsigned long rate)
1039{
1040 return NULL;
1041}
1042
1043static inline int __must_check devm_clk_bulk_get(struct device *dev, int num_clks,
1044 struct clk_bulk_data *clks)
1045{
1046 return 0;
1047}
1048
1049static inline int __must_check devm_clk_bulk_get_optional(struct device *dev,
1050 int num_clks, struct clk_bulk_data *clks)
1051{
1052 return 0;
1053}
1054
1055static inline int __must_check devm_clk_bulk_get_optional_enable(struct device *dev,
1056 int num_clks,
1057 struct clk_bulk_data *clks)
1058{
1059 return 0;
1060}
1061
1062static inline int __must_check devm_clk_bulk_get_all(struct device *dev,
1063 struct clk_bulk_data **clks)
1064{
1065
1066 return 0;
1067}
1068
1069static inline int __must_check devm_clk_bulk_get_all_enabled(struct device *dev,
1070 struct clk_bulk_data **clks)
1071{
1072 return 0;
1073}
1074
1075static inline struct clk *devm_get_clk_from_child(struct device *dev,
1076 struct device_node *np, const char *con_id)
1077{
1078 return NULL;
1079}
1080
1081static inline void clk_put(struct clk *clk) {}
1082
1083static inline void clk_bulk_put(int num_clks, struct clk_bulk_data *clks) {}
1084
1085static inline void clk_bulk_put_all(int num_clks, struct clk_bulk_data *clks) {}
1086
1087static inline void devm_clk_put(struct device *dev, struct clk *clk) {}
1088
1089static inline int clk_enable(struct clk *clk)
1090{
1091 return 0;
1092}
1093
1094static inline int __must_check clk_bulk_enable(int num_clks,
1095 const struct clk_bulk_data *clks)
1096{
1097 return 0;
1098}
1099
1100static inline void clk_disable(struct clk *clk) {}
1101
1102
1103static inline void clk_bulk_disable(int num_clks,
1104 const struct clk_bulk_data *clks) {}
1105
1106static inline unsigned long clk_get_rate(struct clk *clk)
1107{
1108 return 0;
1109}
1110
1111static inline int clk_set_rate(struct clk *clk, unsigned long rate)
1112{
1113 return 0;
1114}
1115
1116static inline int clk_set_rate_exclusive(struct clk *clk, unsigned long rate)
1117{
1118 return 0;
1119}
1120
1121static inline long clk_round_rate(struct clk *clk, unsigned long rate)
1122{
1123 return 0;
1124}
1125
1126static inline bool clk_has_parent(struct clk *clk, struct clk *parent)
1127{
1128 return true;
1129}
1130
1131static inline int clk_set_rate_range(struct clk *clk, unsigned long min,
1132 unsigned long max)
1133{
1134 return 0;
1135}
1136
1137static inline int clk_set_min_rate(struct clk *clk, unsigned long rate)
1138{
1139 return 0;
1140}
1141
1142static inline int clk_set_max_rate(struct clk *clk, unsigned long rate)
1143{
1144 return 0;
1145}
1146
1147static inline int clk_set_parent(struct clk *clk, struct clk *parent)
1148{
1149 return 0;
1150}
1151
1152static inline struct clk *clk_get_parent(struct clk *clk)
1153{
1154 return NULL;
1155}
1156
1157static inline struct clk *clk_get_sys(const char *dev_id, const char *con_id)
1158{
1159 return NULL;
1160}
1161
1162#endif /* !CONFIG_HAVE_CLK */
1163
1164/* clk_prepare_enable helps cases using clk_enable in non-atomic context. */
1165static inline int clk_prepare_enable(struct clk *clk)
1166{
1167 int ret;
1168
1169 ret = clk_prepare(clk);
1170 if (ret)
1171 return ret;
1172 ret = clk_enable(clk);
1173 if (ret)
1174 clk_unprepare(clk);
1175
1176 return ret;
1177}
1178
1179/* clk_disable_unprepare helps cases using clk_disable in non-atomic context. */
1180static inline void clk_disable_unprepare(struct clk *clk)
1181{
1182 clk_disable(clk);
1183 clk_unprepare(clk);
1184}
1185
1186static inline int __must_check
1187clk_bulk_prepare_enable(int num_clks, const struct clk_bulk_data *clks)
1188{
1189 int ret;
1190
1191 ret = clk_bulk_prepare(num_clks, clks);
1192 if (ret)
1193 return ret;
1194 ret = clk_bulk_enable(num_clks, clks);
1195 if (ret)
1196 clk_bulk_unprepare(num_clks, clks);
1197
1198 return ret;
1199}
1200
1201static inline void clk_bulk_disable_unprepare(int num_clks,
1202 const struct clk_bulk_data *clks)
1203{
1204 clk_bulk_disable(num_clks, clks);
1205 clk_bulk_unprepare(num_clks, clks);
1206}
1207
1208/**
1209 * clk_drop_range - Reset any range set on that clock
1210 * @clk: clock source
1211 *
1212 * Returns success (0) or negative errno.
1213 */
1214static inline int clk_drop_range(struct clk *clk)
1215{
1216 return clk_set_rate_range(clk, 0, ULONG_MAX);
1217}
1218
1219/**
1220 * clk_get_optional - lookup and obtain a reference to an optional clock
1221 * producer.
1222 * @dev: device for clock "consumer"
1223 * @id: clock consumer ID
1224 *
1225 * Behaves the same as clk_get() except where there is no clock producer. In
1226 * this case, instead of returning -ENOENT, the function returns NULL.
1227 */
1228static inline struct clk *clk_get_optional(struct device *dev, const char *id)
1229{
1230 struct clk *clk = clk_get(dev, id);
1231
1232 if (clk == ERR_PTR(-ENOENT))
1233 return NULL;
1234
1235 return clk;
1236}
1237
1238#if defined(CONFIG_OF) && defined(CONFIG_COMMON_CLK)
1239struct clk *of_clk_get(struct device_node *np, int index);
1240struct clk *of_clk_get_by_name(struct device_node *np, const char *name);
1241struct clk *of_clk_get_from_provider(struct of_phandle_args *clkspec);
1242#else
1243static inline struct clk *of_clk_get(struct device_node *np, int index)
1244{
1245 return ERR_PTR(-ENOENT);
1246}
1247static inline struct clk *of_clk_get_by_name(struct device_node *np,
1248 const char *name)
1249{
1250 return ERR_PTR(-ENOENT);
1251}
1252static inline struct clk *of_clk_get_from_provider(struct of_phandle_args *clkspec)
1253{
1254 return ERR_PTR(-ENOENT);
1255}
1256#endif
1257
1258#endif