Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Hardware spinlock public header
4 *
5 * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com
6 *
7 * Contact: Ohad Ben-Cohen <ohad@wizery.com>
8 */
9
10#ifndef __LINUX_HWSPINLOCK_H
11#define __LINUX_HWSPINLOCK_H
12
13#include <linux/err.h>
14#include <linux/sched.h>
15
16/* hwspinlock mode argument */
17#define HWLOCK_IRQSTATE 0x01 /* Disable interrupts, save state */
18#define HWLOCK_IRQ 0x02 /* Disable interrupts, don't save state */
19#define HWLOCK_RAW 0x03
20#define HWLOCK_IN_ATOMIC 0x04 /* Called while in atomic context */
21
22struct device;
23struct device_node;
24struct hwspinlock;
25struct hwspinlock_device;
26struct hwspinlock_ops;
27
28#ifdef CONFIG_HWSPINLOCK
29
30int hwspin_lock_register(struct hwspinlock_device *bank, struct device *dev,
31 const struct hwspinlock_ops *ops, int base_id, int num_locks);
32int hwspin_lock_unregister(struct hwspinlock_device *bank);
33struct hwspinlock *hwspin_lock_request_specific(unsigned int id);
34int hwspin_lock_free(struct hwspinlock *hwlock);
35int of_hwspin_lock_get_id(struct device_node *np, int index);
36int __hwspin_lock_timeout(struct hwspinlock *, unsigned int, int,
37 unsigned long *);
38int __hwspin_trylock(struct hwspinlock *, int, unsigned long *);
39void __hwspin_unlock(struct hwspinlock *, int, unsigned long *);
40int of_hwspin_lock_get_id_byname(struct device_node *np, const char *name);
41int hwspin_lock_bust(struct hwspinlock *hwlock, unsigned int id);
42int devm_hwspin_lock_free(struct device *dev, struct hwspinlock *hwlock);
43struct hwspinlock *devm_hwspin_lock_request_specific(struct device *dev,
44 unsigned int id);
45int devm_hwspin_lock_unregister(struct device *dev,
46 struct hwspinlock_device *bank);
47int devm_hwspin_lock_register(struct device *dev,
48 struct hwspinlock_device *bank,
49 const struct hwspinlock_ops *ops,
50 int base_id, int num_locks);
51
52#else /* !CONFIG_HWSPINLOCK */
53
54/*
55 * We don't want these functions to fail if CONFIG_HWSPINLOCK is not
56 * enabled. We prefer to silently succeed in this case, and let the
57 * code path get compiled away. This way, if CONFIG_HWSPINLOCK is not
58 * required on a given setup, users will still work.
59 *
60 * The only exception is hwspin_lock_register/hwspin_lock_unregister, with which
61 * we _do_ want users to fail (no point in registering hwspinlock instances if
62 * the framework is not available).
63 *
64 * Note: ERR_PTR(-ENODEV) will still be considered a success for NULL-checking
65 * users. Others, which care, can still check this with IS_ERR.
66 */
67static inline struct hwspinlock *hwspin_lock_request_specific(unsigned int id)
68{
69 return ERR_PTR(-ENODEV);
70}
71
72static inline int hwspin_lock_free(struct hwspinlock *hwlock)
73{
74 return 0;
75}
76
77static inline
78int __hwspin_lock_timeout(struct hwspinlock *hwlock, unsigned int to,
79 int mode, unsigned long *flags)
80{
81 return 0;
82}
83
84static inline
85int __hwspin_trylock(struct hwspinlock *hwlock, int mode, unsigned long *flags)
86{
87 return 0;
88}
89
90static inline
91void __hwspin_unlock(struct hwspinlock *hwlock, int mode, unsigned long *flags)
92{
93}
94
95static inline int hwspin_lock_bust(struct hwspinlock *hwlock, unsigned int id)
96{
97 return 0;
98}
99
100static inline int of_hwspin_lock_get_id(struct device_node *np, int index)
101{
102 return 0;
103}
104
105static inline
106int of_hwspin_lock_get_id_byname(struct device_node *np, const char *name)
107{
108 return 0;
109}
110
111static inline
112int devm_hwspin_lock_free(struct device *dev, struct hwspinlock *hwlock)
113{
114 return 0;
115}
116
117static inline
118struct hwspinlock *devm_hwspin_lock_request_specific(struct device *dev,
119 unsigned int id)
120{
121 return ERR_PTR(-ENODEV);
122}
123
124#endif /* !CONFIG_HWSPINLOCK */
125
126/**
127 * hwspin_trylock_irqsave() - try to lock an hwspinlock, disable interrupts
128 * @hwlock: an hwspinlock which we want to trylock
129 * @flags: a pointer to where the caller's interrupt state will be saved at
130 *
131 * This function attempts to lock the underlying hwspinlock, and will
132 * immediately fail if the hwspinlock is already locked.
133 *
134 * Upon a successful return from this function, preemption and local
135 * interrupts are disabled (previous interrupts state is saved at @flags),
136 * so the caller must not sleep, and is advised to release the hwspinlock
137 * as soon as possible.
138 *
139 * Returns 0 if we successfully locked the hwspinlock, -EBUSY if
140 * the hwspinlock was already taken, and -EINVAL if @hwlock is invalid.
141 */
142static inline
143int hwspin_trylock_irqsave(struct hwspinlock *hwlock, unsigned long *flags)
144{
145 return __hwspin_trylock(hwlock, HWLOCK_IRQSTATE, flags);
146}
147
148/**
149 * hwspin_trylock_irq() - try to lock an hwspinlock, disable interrupts
150 * @hwlock: an hwspinlock which we want to trylock
151 *
152 * This function attempts to lock the underlying hwspinlock, and will
153 * immediately fail if the hwspinlock is already locked.
154 *
155 * Upon a successful return from this function, preemption and local
156 * interrupts are disabled, so the caller must not sleep, and is advised
157 * to release the hwspinlock as soon as possible.
158 *
159 * Returns 0 if we successfully locked the hwspinlock, -EBUSY if
160 * the hwspinlock was already taken, and -EINVAL if @hwlock is invalid.
161 */
162static inline int hwspin_trylock_irq(struct hwspinlock *hwlock)
163{
164 return __hwspin_trylock(hwlock, HWLOCK_IRQ, NULL);
165}
166
167/**
168 * hwspin_trylock_raw() - attempt to lock a specific hwspinlock
169 * @hwlock: an hwspinlock which we want to trylock
170 *
171 * This function attempts to lock an hwspinlock, and will immediately fail
172 * if the hwspinlock is already taken.
173 *
174 * Caution: User must protect the routine of getting hardware lock with mutex
175 * or spinlock to avoid dead-lock, that will let user can do some time-consuming
176 * or sleepable operations under the hardware lock.
177 *
178 * Returns 0 if we successfully locked the hwspinlock, -EBUSY if
179 * the hwspinlock was already taken, and -EINVAL if @hwlock is invalid.
180 */
181static inline int hwspin_trylock_raw(struct hwspinlock *hwlock)
182{
183 return __hwspin_trylock(hwlock, HWLOCK_RAW, NULL);
184}
185
186/**
187 * hwspin_trylock_in_atomic() - attempt to lock a specific hwspinlock
188 * @hwlock: an hwspinlock which we want to trylock
189 *
190 * This function attempts to lock an hwspinlock, and will immediately fail
191 * if the hwspinlock is already taken.
192 *
193 * This function shall be called only from an atomic context.
194 *
195 * Returns 0 if we successfully locked the hwspinlock, -EBUSY if
196 * the hwspinlock was already taken, and -EINVAL if @hwlock is invalid.
197 */
198static inline int hwspin_trylock_in_atomic(struct hwspinlock *hwlock)
199{
200 return __hwspin_trylock(hwlock, HWLOCK_IN_ATOMIC, NULL);
201}
202
203/**
204 * hwspin_trylock() - attempt to lock a specific hwspinlock
205 * @hwlock: an hwspinlock which we want to trylock
206 *
207 * This function attempts to lock an hwspinlock, and will immediately fail
208 * if the hwspinlock is already taken.
209 *
210 * Upon a successful return from this function, preemption is disabled,
211 * so the caller must not sleep, and is advised to release the hwspinlock
212 * as soon as possible. This is required in order to minimize remote cores
213 * polling on the hardware interconnect.
214 *
215 * Returns 0 if we successfully locked the hwspinlock, -EBUSY if
216 * the hwspinlock was already taken, and -EINVAL if @hwlock is invalid.
217 */
218static inline int hwspin_trylock(struct hwspinlock *hwlock)
219{
220 return __hwspin_trylock(hwlock, 0, NULL);
221}
222
223/**
224 * hwspin_lock_timeout_irqsave() - lock hwspinlock, with timeout, disable irqs
225 * @hwlock: the hwspinlock to be locked
226 * @to: timeout value in msecs
227 * @flags: a pointer to where the caller's interrupt state will be saved at
228 *
229 * This function locks the underlying @hwlock. If the @hwlock
230 * is already taken, the function will busy loop waiting for it to
231 * be released, but give up when @timeout msecs have elapsed.
232 *
233 * Upon a successful return from this function, preemption and local interrupts
234 * are disabled (plus previous interrupt state is saved), so the caller must
235 * not sleep, and is advised to release the hwspinlock as soon as possible.
236 *
237 * Returns 0 when the @hwlock was successfully taken, and an appropriate
238 * error code otherwise (most notably an -ETIMEDOUT if the @hwlock is still
239 * busy after @timeout msecs). The function will never sleep.
240 */
241static inline int hwspin_lock_timeout_irqsave(struct hwspinlock *hwlock,
242 unsigned int to, unsigned long *flags)
243{
244 return __hwspin_lock_timeout(hwlock, to, HWLOCK_IRQSTATE, flags);
245}
246
247/**
248 * hwspin_lock_timeout_irq() - lock hwspinlock, with timeout, disable irqs
249 * @hwlock: the hwspinlock to be locked
250 * @to: timeout value in msecs
251 *
252 * This function locks the underlying @hwlock. If the @hwlock
253 * is already taken, the function will busy loop waiting for it to
254 * be released, but give up when @timeout msecs have elapsed.
255 *
256 * Upon a successful return from this function, preemption and local interrupts
257 * are disabled so the caller must not sleep, and is advised to release the
258 * hwspinlock as soon as possible.
259 *
260 * Returns 0 when the @hwlock was successfully taken, and an appropriate
261 * error code otherwise (most notably an -ETIMEDOUT if the @hwlock is still
262 * busy after @timeout msecs). The function will never sleep.
263 */
264static inline
265int hwspin_lock_timeout_irq(struct hwspinlock *hwlock, unsigned int to)
266{
267 return __hwspin_lock_timeout(hwlock, to, HWLOCK_IRQ, NULL);
268}
269
270/**
271 * hwspin_lock_timeout_raw() - lock an hwspinlock with timeout limit
272 * @hwlock: the hwspinlock to be locked
273 * @to: timeout value in msecs
274 *
275 * This function locks the underlying @hwlock. If the @hwlock
276 * is already taken, the function will busy loop waiting for it to
277 * be released, but give up when @timeout msecs have elapsed.
278 *
279 * Caution: User must protect the routine of getting hardware lock with mutex
280 * or spinlock to avoid dead-lock, that will let user can do some time-consuming
281 * or sleepable operations under the hardware lock.
282 *
283 * Returns 0 when the @hwlock was successfully taken, and an appropriate
284 * error code otherwise (most notably an -ETIMEDOUT if the @hwlock is still
285 * busy after @timeout msecs). The function will never sleep.
286 */
287static inline
288int hwspin_lock_timeout_raw(struct hwspinlock *hwlock, unsigned int to)
289{
290 return __hwspin_lock_timeout(hwlock, to, HWLOCK_RAW, NULL);
291}
292
293/**
294 * hwspin_lock_timeout_in_atomic() - lock an hwspinlock with timeout limit
295 * @hwlock: the hwspinlock to be locked
296 * @to: timeout value in msecs
297 *
298 * This function locks the underlying @hwlock. If the @hwlock
299 * is already taken, the function will busy loop waiting for it to
300 * be released, but give up when @timeout msecs have elapsed.
301 *
302 * This function shall be called only from an atomic context and the timeout
303 * value shall not exceed a few msecs.
304 *
305 * Returns 0 when the @hwlock was successfully taken, and an appropriate
306 * error code otherwise (most notably an -ETIMEDOUT if the @hwlock is still
307 * busy after @timeout msecs). The function will never sleep.
308 */
309static inline
310int hwspin_lock_timeout_in_atomic(struct hwspinlock *hwlock, unsigned int to)
311{
312 return __hwspin_lock_timeout(hwlock, to, HWLOCK_IN_ATOMIC, NULL);
313}
314
315/**
316 * hwspin_lock_timeout() - lock an hwspinlock with timeout limit
317 * @hwlock: the hwspinlock to be locked
318 * @to: timeout value in msecs
319 *
320 * This function locks the underlying @hwlock. If the @hwlock
321 * is already taken, the function will busy loop waiting for it to
322 * be released, but give up when @timeout msecs have elapsed.
323 *
324 * Upon a successful return from this function, preemption is disabled
325 * so the caller must not sleep, and is advised to release the hwspinlock
326 * as soon as possible.
327 * This is required in order to minimize remote cores polling on the
328 * hardware interconnect.
329 *
330 * Returns 0 when the @hwlock was successfully taken, and an appropriate
331 * error code otherwise (most notably an -ETIMEDOUT if the @hwlock is still
332 * busy after @timeout msecs). The function will never sleep.
333 */
334static inline
335int hwspin_lock_timeout(struct hwspinlock *hwlock, unsigned int to)
336{
337 return __hwspin_lock_timeout(hwlock, to, 0, NULL);
338}
339
340/**
341 * hwspin_unlock_irqrestore() - unlock hwspinlock, restore irq state
342 * @hwlock: a previously-acquired hwspinlock which we want to unlock
343 * @flags: previous caller's interrupt state to restore
344 *
345 * This function will unlock a specific hwspinlock, enable preemption and
346 * restore the previous state of the local interrupts. It should be used
347 * to undo, e.g., hwspin_trylock_irqsave().
348 *
349 * @hwlock must be already locked before calling this function: it is a bug
350 * to call unlock on a @hwlock that is already unlocked.
351 */
352static inline void hwspin_unlock_irqrestore(struct hwspinlock *hwlock,
353 unsigned long *flags)
354{
355 __hwspin_unlock(hwlock, HWLOCK_IRQSTATE, flags);
356}
357
358/**
359 * hwspin_unlock_irq() - unlock hwspinlock, enable interrupts
360 * @hwlock: a previously-acquired hwspinlock which we want to unlock
361 *
362 * This function will unlock a specific hwspinlock, enable preemption and
363 * enable local interrupts. Should be used to undo hwspin_lock_irq().
364 *
365 * @hwlock must be already locked (e.g. by hwspin_trylock_irq()) before
366 * calling this function: it is a bug to call unlock on a @hwlock that is
367 * already unlocked.
368 */
369static inline void hwspin_unlock_irq(struct hwspinlock *hwlock)
370{
371 __hwspin_unlock(hwlock, HWLOCK_IRQ, NULL);
372}
373
374/**
375 * hwspin_unlock_raw() - unlock hwspinlock
376 * @hwlock: a previously-acquired hwspinlock which we want to unlock
377 *
378 * This function will unlock a specific hwspinlock.
379 *
380 * @hwlock must be already locked (e.g. by hwspin_trylock()) before calling
381 * this function: it is a bug to call unlock on a @hwlock that is already
382 * unlocked.
383 */
384static inline void hwspin_unlock_raw(struct hwspinlock *hwlock)
385{
386 __hwspin_unlock(hwlock, HWLOCK_RAW, NULL);
387}
388
389/**
390 * hwspin_unlock_in_atomic() - unlock hwspinlock
391 * @hwlock: a previously-acquired hwspinlock which we want to unlock
392 *
393 * This function will unlock a specific hwspinlock.
394 *
395 * @hwlock must be already locked (e.g. by hwspin_trylock()) before calling
396 * this function: it is a bug to call unlock on a @hwlock that is already
397 * unlocked.
398 */
399static inline void hwspin_unlock_in_atomic(struct hwspinlock *hwlock)
400{
401 __hwspin_unlock(hwlock, HWLOCK_IN_ATOMIC, NULL);
402}
403
404/**
405 * hwspin_unlock() - unlock hwspinlock
406 * @hwlock: a previously-acquired hwspinlock which we want to unlock
407 *
408 * This function will unlock a specific hwspinlock and enable preemption
409 * back.
410 *
411 * @hwlock must be already locked (e.g. by hwspin_trylock()) before calling
412 * this function: it is a bug to call unlock on a @hwlock that is already
413 * unlocked.
414 */
415static inline void hwspin_unlock(struct hwspinlock *hwlock)
416{
417 __hwspin_unlock(hwlock, 0, NULL);
418}
419
420#endif /* __LINUX_HWSPINLOCK_H */