Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Resizable, Scalable, Concurrent Hash Table
4 *
5 * Copyright (c) 2015-2016 Herbert Xu <herbert@gondor.apana.org.au>
6 * Copyright (c) 2014-2015 Thomas Graf <tgraf@suug.ch>
7 * Copyright (c) 2008-2014 Patrick McHardy <kaber@trash.net>
8 *
9 * Code partially derived from nft_hash
10 * Rewritten with rehash code from br_multicast plus single list
11 * pointer as suggested by Josh Triplett
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License version 2 as
15 * published by the Free Software Foundation.
16 */
17
18#ifndef _LINUX_RHASHTABLE_H
19#define _LINUX_RHASHTABLE_H
20
21#include <linux/err.h>
22#include <linux/errno.h>
23#include <linux/irq_work.h>
24#include <linux/jhash.h>
25#include <linux/list_nulls.h>
26#include <linux/workqueue.h>
27#include <linux/rculist.h>
28#include <linux/bit_spinlock.h>
29
30#include <linux/rhashtable-types.h>
31/*
32 * Objects in an rhashtable have an embedded struct rhash_head
33 * which is linked into as hash chain from the hash table - or one
34 * of two or more hash tables when the rhashtable is being resized.
35 * The end of the chain is marked with a special nulls marks which has
36 * the least significant bit set but otherwise stores the address of
37 * the hash bucket. This allows us to be sure we've found the end
38 * of the right list.
39 * The value stored in the hash bucket has BIT(0) used as a lock bit.
40 * This bit must be atomically set before any changes are made to
41 * the chain. To avoid dereferencing this pointer without clearing
42 * the bit first, we use an opaque 'struct rhash_lock_head *' for the
43 * pointer stored in the bucket. This struct needs to be defined so
44 * that rcu_dereference() works on it, but it has no content so a
45 * cast is needed for it to be useful. This ensures it isn't
46 * used by mistake with clearing the lock bit first.
47 */
48struct rhash_lock_head {};
49
50/* Maximum chain length before rehash
51 *
52 * The maximum (not average) chain length grows with the size of the hash
53 * table, at a rate of (log N)/(log log N).
54 *
55 * The value of 16 is selected so that even if the hash table grew to
56 * 2^32 you would not expect the maximum chain length to exceed it
57 * unless we are under attack (or extremely unlucky).
58 *
59 * As this limit is only to detect attacks, we don't need to set it to a
60 * lower value as you'd need the chain length to vastly exceed 16 to have
61 * any real effect on the system.
62 */
63#define RHT_ELASTICITY 16u
64
65/**
66 * struct bucket_table - Table of hash buckets
67 * @size: Number of hash buckets
68 * @nest: Number of bits of first-level nested table.
69 * @rehash: Current bucket being rehashed
70 * @hash_rnd: Random seed to fold into hash
71 * @walkers: List of active walkers
72 * @rcu: RCU structure for freeing the table
73 * @future_tbl: Table under construction during rehashing
74 * @ntbl: Nested table used when out of memory.
75 * @buckets: size * hash buckets
76 */
77struct bucket_table {
78 unsigned int size;
79 unsigned int nest;
80 u32 hash_rnd;
81 struct list_head walkers;
82 struct rcu_head rcu;
83
84 struct bucket_table __rcu *future_tbl;
85
86 struct lockdep_map dep_map;
87
88 struct rhash_lock_head __rcu *buckets[] ____cacheline_aligned_in_smp;
89};
90
91/*
92 * NULLS_MARKER() expects a hash value with the low
93 * bits mostly likely to be significant, and it discards
94 * the msb.
95 * We give it an address, in which the bottom bit is
96 * always 0, and the msb might be significant.
97 * So we shift the address down one bit to align with
98 * expectations and avoid losing a significant bit.
99 *
100 * We never store the NULLS_MARKER in the hash table
101 * itself as we need the lsb for locking.
102 * Instead we store a NULL
103 */
104#define RHT_NULLS_MARKER(ptr) \
105 ((void *)NULLS_MARKER(((unsigned long) (ptr)) >> 1))
106#define INIT_RHT_NULLS_HEAD(ptr) \
107 ((ptr) = NULL)
108
109static inline bool rht_is_a_nulls(const struct rhash_head *ptr)
110{
111 return ((unsigned long) ptr & 1);
112}
113
114static inline void *rht_obj(const struct rhashtable *ht,
115 const struct rhash_head *he)
116{
117 return (char *)he - ht->p.head_offset;
118}
119
120static inline unsigned int rht_bucket_index(const struct bucket_table *tbl,
121 unsigned int hash)
122{
123 return hash & (tbl->size - 1);
124}
125
126static __always_inline unsigned int rht_key_get_hash(struct rhashtable *ht,
127 const void *key, const struct rhashtable_params params,
128 unsigned int hash_rnd)
129{
130 unsigned int hash;
131
132 /* params must be equal to ht->p if it isn't constant. */
133 if (!__builtin_constant_p(params.key_len)) {
134 hash = ht->p.hashfn(key, ht->key_len, hash_rnd);
135 } else {
136 unsigned int key_len = params.key_len ? : ht->p.key_len;
137
138 if (params.hashfn)
139 hash = params.hashfn(key, key_len, hash_rnd);
140 else if (key_len & (sizeof(u32) - 1))
141 hash = jhash(key, key_len, hash_rnd);
142 else
143 hash = jhash2(key, key_len / sizeof(u32), hash_rnd);
144 }
145
146 return hash;
147}
148
149static __always_inline unsigned int rht_key_hashfn(
150 struct rhashtable *ht, const struct bucket_table *tbl,
151 const void *key, const struct rhashtable_params params)
152{
153 unsigned int hash = rht_key_get_hash(ht, key, params, tbl->hash_rnd);
154
155 return rht_bucket_index(tbl, hash);
156}
157
158static __always_inline unsigned int rht_head_hashfn(
159 struct rhashtable *ht, const struct bucket_table *tbl,
160 const struct rhash_head *he, const struct rhashtable_params params)
161{
162 const char *ptr = rht_obj(ht, he);
163
164 return likely(params.obj_hashfn) ?
165 rht_bucket_index(tbl, params.obj_hashfn(ptr, params.key_len ?:
166 ht->p.key_len,
167 tbl->hash_rnd)) :
168 rht_key_hashfn(ht, tbl, ptr + params.key_offset, params);
169}
170
171/**
172 * rht_grow_above_75 - returns true if nelems > 0.75 * table-size
173 * @ht: hash table
174 * @tbl: current table
175 */
176static inline bool rht_grow_above_75(const struct rhashtable *ht,
177 const struct bucket_table *tbl)
178{
179 /* Expand table when exceeding 75% load */
180 return atomic_read(&ht->nelems) > (tbl->size / 4 * 3) &&
181 (!ht->p.max_size || tbl->size < ht->p.max_size);
182}
183
184/**
185 * rht_shrink_below_30 - returns true if nelems < 0.3 * table-size
186 * @ht: hash table
187 * @tbl: current table
188 */
189static inline bool rht_shrink_below_30(const struct rhashtable *ht,
190 const struct bucket_table *tbl)
191{
192 /* Shrink table beneath 30% load */
193 return atomic_read(&ht->nelems) < (tbl->size * 3 / 10) &&
194 tbl->size > ht->p.min_size;
195}
196
197/**
198 * rht_grow_above_100 - returns true if nelems > table-size
199 * @ht: hash table
200 * @tbl: current table
201 */
202static inline bool rht_grow_above_100(const struct rhashtable *ht,
203 const struct bucket_table *tbl)
204{
205 return atomic_read(&ht->nelems) > tbl->size &&
206 (!ht->p.max_size || tbl->size < ht->p.max_size);
207}
208
209/**
210 * rht_grow_above_max - returns true if table is above maximum
211 * @ht: hash table
212 * @tbl: current table
213 */
214static inline bool rht_grow_above_max(const struct rhashtable *ht,
215 const struct bucket_table *tbl)
216{
217 return atomic_read(&ht->nelems) >= ht->max_elems;
218}
219
220#ifdef CONFIG_PROVE_LOCKING
221int lockdep_rht_mutex_is_held(struct rhashtable *ht);
222int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, u32 hash);
223#else
224static inline int lockdep_rht_mutex_is_held(struct rhashtable *ht)
225{
226 return 1;
227}
228
229static inline int lockdep_rht_bucket_is_held(const struct bucket_table *tbl,
230 u32 hash)
231{
232 return 1;
233}
234#endif /* CONFIG_PROVE_LOCKING */
235
236void *rhashtable_insert_slow(struct rhashtable *ht, const void *key,
237 struct rhash_head *obj);
238
239void rhashtable_walk_enter(struct rhashtable *ht,
240 struct rhashtable_iter *iter);
241void rhashtable_walk_exit(struct rhashtable_iter *iter);
242int rhashtable_walk_start_check(struct rhashtable_iter *iter) __acquires_shared(RCU);
243
244static inline void rhashtable_walk_start(struct rhashtable_iter *iter)
245 __acquires_shared(RCU)
246{
247 (void)rhashtable_walk_start_check(iter);
248}
249
250void *rhashtable_walk_next(struct rhashtable_iter *iter);
251void *rhashtable_walk_peek(struct rhashtable_iter *iter);
252void rhashtable_walk_stop(struct rhashtable_iter *iter) __releases_shared(RCU);
253
254void rhashtable_free_and_destroy(struct rhashtable *ht,
255 void (*free_fn)(void *ptr, void *arg),
256 void *arg);
257void rhashtable_destroy(struct rhashtable *ht);
258
259struct rhash_lock_head __rcu **rht_bucket_nested(
260 const struct bucket_table *tbl, unsigned int hash);
261struct rhash_lock_head __rcu **__rht_bucket_nested(
262 const struct bucket_table *tbl, unsigned int hash);
263struct rhash_lock_head __rcu **rht_bucket_nested_insert(
264 struct rhashtable *ht, struct bucket_table *tbl, unsigned int hash);
265
266#define rht_dereference(p, ht) \
267 rcu_dereference_protected(p, lockdep_rht_mutex_is_held(ht))
268
269#define rht_dereference_rcu(p, ht) \
270 rcu_dereference_all_check(p, lockdep_rht_mutex_is_held(ht))
271
272#define rht_dereference_bucket(p, tbl, hash) \
273 rcu_dereference_protected(p, lockdep_rht_bucket_is_held(tbl, hash))
274
275#define rht_dereference_bucket_rcu(p, tbl, hash) \
276 rcu_dereference_all_check(p, lockdep_rht_bucket_is_held(tbl, hash))
277
278#define rht_entry(tpos, pos, member) \
279 ({ tpos = container_of(pos, typeof(*tpos), member); 1; })
280
281static inline struct rhash_lock_head __rcu *const *rht_bucket(
282 const struct bucket_table *tbl, unsigned int hash)
283{
284 return unlikely(tbl->nest) ? rht_bucket_nested(tbl, hash) :
285 &tbl->buckets[hash];
286}
287
288static inline struct rhash_lock_head __rcu **rht_bucket_var(
289 struct bucket_table *tbl, unsigned int hash)
290{
291 return unlikely(tbl->nest) ? __rht_bucket_nested(tbl, hash) :
292 &tbl->buckets[hash];
293}
294
295static inline struct rhash_lock_head __rcu **rht_bucket_insert(
296 struct rhashtable *ht, struct bucket_table *tbl, unsigned int hash)
297{
298 return unlikely(tbl->nest) ? rht_bucket_nested_insert(ht, tbl, hash) :
299 &tbl->buckets[hash];
300}
301
302/*
303 * We lock a bucket by setting BIT(0) in the pointer - this is always
304 * zero in real pointers. The NULLS mark is never stored in the bucket,
305 * rather we store NULL if the bucket is empty.
306 * bit_spin_locks do not handle contention well, but the whole point
307 * of the hashtable design is to achieve minimum per-bucket contention.
308 * A nested hash table might not have a bucket pointer. In that case
309 * we cannot get a lock. For remove and replace the bucket cannot be
310 * interesting and doesn't need locking.
311 * For insert we allocate the bucket if this is the last bucket_table,
312 * and then take the lock.
313 * Sometimes we unlock a bucket by writing a new pointer there. In that
314 * case we don't need to unlock, but we do need to reset state such as
315 * local_bh. For that we have rht_assign_unlock(). As rcu_assign_pointer()
316 * provides the same release semantics that bit_spin_unlock() provides,
317 * this is safe.
318 * When we write to a bucket without unlocking, we use rht_assign_locked().
319 */
320
321static inline unsigned long rht_lock(struct bucket_table *tbl,
322 struct rhash_lock_head __rcu **bkt)
323 __acquires(__bitlock(0, bkt))
324{
325 unsigned long flags;
326
327 local_irq_save(flags);
328 bit_spin_lock(0, (unsigned long *)bkt);
329 lock_map_acquire(&tbl->dep_map);
330 return flags;
331}
332
333static inline unsigned long rht_lock_nested(struct bucket_table *tbl,
334 struct rhash_lock_head __rcu **bucket,
335 unsigned int subclass)
336 __acquires(__bitlock(0, bucket))
337{
338 unsigned long flags;
339
340 local_irq_save(flags);
341 bit_spin_lock(0, (unsigned long *)bucket);
342 lock_acquire_exclusive(&tbl->dep_map, subclass, 0, NULL, _THIS_IP_);
343 return flags;
344}
345
346static inline void rht_unlock(struct bucket_table *tbl,
347 struct rhash_lock_head __rcu **bkt,
348 unsigned long flags)
349 __releases(__bitlock(0, bkt))
350{
351 lock_map_release(&tbl->dep_map);
352 bit_spin_unlock(0, (unsigned long *)bkt);
353 local_irq_restore(flags);
354}
355
356enum rht_lookup_freq {
357 RHT_LOOKUP_NORMAL,
358 RHT_LOOKUP_LIKELY,
359};
360
361static __always_inline struct rhash_head *__rht_ptr(
362 struct rhash_lock_head *p, struct rhash_lock_head __rcu *const *bkt,
363 const enum rht_lookup_freq freq)
364{
365 unsigned long p_val = (unsigned long)p & ~BIT(0);
366
367 BUILD_BUG_ON(!__builtin_constant_p(freq));
368
369 if (freq == RHT_LOOKUP_LIKELY)
370 return (struct rhash_head *)
371 (likely(p_val) ? p_val : (unsigned long)RHT_NULLS_MARKER(bkt));
372 else
373 return (struct rhash_head *)
374 (p_val ?: (unsigned long)RHT_NULLS_MARKER(bkt));
375}
376
377/*
378 * Where 'bkt' is a bucket and might be locked:
379 * rht_ptr_rcu() dereferences that pointer and clears the lock bit.
380 * rht_ptr() dereferences in a context where the bucket is locked.
381 * rht_ptr_exclusive() dereferences in a context where exclusive
382 * access is guaranteed, such as when destroying the table.
383 */
384static __always_inline struct rhash_head *__rht_ptr_rcu(
385 struct rhash_lock_head __rcu *const *bkt,
386 const enum rht_lookup_freq freq)
387{
388 return __rht_ptr(rcu_dereference_all(*bkt), bkt, freq);
389}
390
391static inline struct rhash_head *rht_ptr_rcu(
392 struct rhash_lock_head __rcu *const *bkt)
393{
394 return __rht_ptr_rcu(bkt, RHT_LOOKUP_NORMAL);
395}
396
397static inline struct rhash_head *rht_ptr(
398 struct rhash_lock_head __rcu *const *bkt,
399 struct bucket_table *tbl,
400 unsigned int hash)
401{
402 return __rht_ptr(rht_dereference_bucket(*bkt, tbl, hash), bkt,
403 RHT_LOOKUP_NORMAL);
404}
405
406static inline struct rhash_head *rht_ptr_exclusive(
407 struct rhash_lock_head __rcu *const *bkt)
408{
409 return __rht_ptr(rcu_dereference_protected(*bkt, 1), bkt,
410 RHT_LOOKUP_NORMAL);
411}
412
413static inline void rht_assign_locked(struct rhash_lock_head __rcu **bkt,
414 struct rhash_head *obj)
415{
416 if (rht_is_a_nulls(obj))
417 obj = NULL;
418 rcu_assign_pointer(*bkt, (void *)((unsigned long)obj | BIT(0)));
419}
420
421static inline void rht_assign_unlock(struct bucket_table *tbl,
422 struct rhash_lock_head __rcu **bkt,
423 struct rhash_head *obj,
424 unsigned long flags)
425 __releases(__bitlock(0, bkt))
426{
427 if (rht_is_a_nulls(obj))
428 obj = NULL;
429 lock_map_release(&tbl->dep_map);
430 rcu_assign_pointer(*bkt, (void *)obj);
431 preempt_enable();
432 __release(__bitlock(0, bkt));
433 local_irq_restore(flags);
434}
435
436/**
437 * rht_for_each_from - iterate over hash chain from given head
438 * @pos: the &struct rhash_head to use as a loop cursor.
439 * @head: the &struct rhash_head to start from
440 * @tbl: the &struct bucket_table
441 * @hash: the hash value / bucket index
442 */
443#define rht_for_each_from(pos, head, tbl, hash) \
444 for (pos = head; \
445 !rht_is_a_nulls(pos); \
446 pos = rht_dereference_bucket((pos)->next, tbl, hash))
447
448/**
449 * rht_for_each - iterate over hash chain
450 * @pos: the &struct rhash_head to use as a loop cursor.
451 * @tbl: the &struct bucket_table
452 * @hash: the hash value / bucket index
453 */
454#define rht_for_each(pos, tbl, hash) \
455 rht_for_each_from(pos, rht_ptr(rht_bucket(tbl, hash), tbl, hash), \
456 tbl, hash)
457
458/**
459 * rht_for_each_entry_from - iterate over hash chain from given head
460 * @tpos: the type * to use as a loop cursor.
461 * @pos: the &struct rhash_head to use as a loop cursor.
462 * @head: the &struct rhash_head to start from
463 * @tbl: the &struct bucket_table
464 * @hash: the hash value / bucket index
465 * @member: name of the &struct rhash_head within the hashable struct.
466 */
467#define rht_for_each_entry_from(tpos, pos, head, tbl, hash, member) \
468 for (pos = head; \
469 (!rht_is_a_nulls(pos)) && rht_entry(tpos, pos, member); \
470 pos = rht_dereference_bucket((pos)->next, tbl, hash))
471
472/**
473 * rht_for_each_entry - iterate over hash chain of given type
474 * @tpos: the type * to use as a loop cursor.
475 * @pos: the &struct rhash_head to use as a loop cursor.
476 * @tbl: the &struct bucket_table
477 * @hash: the hash value / bucket index
478 * @member: name of the &struct rhash_head within the hashable struct.
479 */
480#define rht_for_each_entry(tpos, pos, tbl, hash, member) \
481 rht_for_each_entry_from(tpos, pos, \
482 rht_ptr(rht_bucket(tbl, hash), tbl, hash), \
483 tbl, hash, member)
484
485/**
486 * rht_for_each_entry_safe - safely iterate over hash chain of given type
487 * @tpos: the type * to use as a loop cursor.
488 * @pos: the &struct rhash_head to use as a loop cursor.
489 * @next: the &struct rhash_head to use as next in loop cursor.
490 * @tbl: the &struct bucket_table
491 * @hash: the hash value / bucket index
492 * @member: name of the &struct rhash_head within the hashable struct.
493 *
494 * This hash chain list-traversal primitive allows for the looped code to
495 * remove the loop cursor from the list.
496 */
497#define rht_for_each_entry_safe(tpos, pos, next, tbl, hash, member) \
498 for (pos = rht_ptr(rht_bucket(tbl, hash), tbl, hash), \
499 next = !rht_is_a_nulls(pos) ? \
500 rht_dereference_bucket(pos->next, tbl, hash) : NULL; \
501 (!rht_is_a_nulls(pos)) && rht_entry(tpos, pos, member); \
502 pos = next, \
503 next = !rht_is_a_nulls(pos) ? \
504 rht_dereference_bucket(pos->next, tbl, hash) : NULL)
505
506/**
507 * rht_for_each_rcu_from - iterate over rcu hash chain from given head
508 * @pos: the &struct rhash_head to use as a loop cursor.
509 * @head: the &struct rhash_head to start from
510 * @tbl: the &struct bucket_table
511 * @hash: the hash value / bucket index
512 *
513 * This hash chain list-traversal primitive may safely run concurrently with
514 * the _rcu mutation primitives such as rhashtable_insert() as long as the
515 * traversal is guarded by rcu_read_lock().
516 */
517#define rht_for_each_rcu_from(pos, head, tbl, hash) \
518 for (({barrier(); }), \
519 pos = head; \
520 !rht_is_a_nulls(pos); \
521 pos = rcu_dereference_all(pos->next))
522
523/**
524 * rht_for_each_rcu - iterate over rcu hash chain
525 * @pos: the &struct rhash_head to use as a loop cursor.
526 * @tbl: the &struct bucket_table
527 * @hash: the hash value / bucket index
528 *
529 * This hash chain list-traversal primitive may safely run concurrently with
530 * the _rcu mutation primitives such as rhashtable_insert() as long as the
531 * traversal is guarded by rcu_read_lock().
532 */
533#define rht_for_each_rcu(pos, tbl, hash) \
534 for (({barrier(); }), \
535 pos = rht_ptr_rcu(rht_bucket(tbl, hash)); \
536 !rht_is_a_nulls(pos); \
537 pos = rcu_dereference_all(pos->next))
538
539/**
540 * rht_for_each_entry_rcu_from - iterated over rcu hash chain from given head
541 * @tpos: the type * to use as a loop cursor.
542 * @pos: the &struct rhash_head to use as a loop cursor.
543 * @head: the &struct rhash_head to start from
544 * @tbl: the &struct bucket_table
545 * @hash: the hash value / bucket index
546 * @member: name of the &struct rhash_head within the hashable struct.
547 *
548 * This hash chain list-traversal primitive may safely run concurrently with
549 * the _rcu mutation primitives such as rhashtable_insert() as long as the
550 * traversal is guarded by rcu_read_lock().
551 */
552#define rht_for_each_entry_rcu_from(tpos, pos, head, tbl, hash, member) \
553 for (({barrier(); }), \
554 pos = head; \
555 (!rht_is_a_nulls(pos)) && rht_entry(tpos, pos, member); \
556 pos = rht_dereference_bucket_rcu(pos->next, tbl, hash))
557
558/**
559 * rht_for_each_entry_rcu - iterate over rcu hash chain of given type
560 * @tpos: the type * to use as a loop cursor.
561 * @pos: the &struct rhash_head to use as a loop cursor.
562 * @tbl: the &struct bucket_table
563 * @hash: the hash value / bucket index
564 * @member: name of the &struct rhash_head within the hashable struct.
565 *
566 * This hash chain list-traversal primitive may safely run concurrently with
567 * the _rcu mutation primitives such as rhashtable_insert() as long as the
568 * traversal is guarded by rcu_read_lock().
569 */
570#define rht_for_each_entry_rcu(tpos, pos, tbl, hash, member) \
571 rht_for_each_entry_rcu_from(tpos, pos, \
572 rht_ptr_rcu(rht_bucket(tbl, hash)), \
573 tbl, hash, member)
574
575/**
576 * rhl_for_each_rcu - iterate over rcu hash table list
577 * @pos: the &struct rlist_head to use as a loop cursor.
578 * @list: the head of the list
579 *
580 * This hash chain list-traversal primitive should be used on the
581 * list returned by rhltable_lookup.
582 */
583#define rhl_for_each_rcu(pos, list) \
584 for (pos = list; pos; pos = rcu_dereference_all(pos->next))
585
586/**
587 * rhl_for_each_entry_rcu - iterate over rcu hash table list of given type
588 * @tpos: the type * to use as a loop cursor.
589 * @pos: the &struct rlist_head to use as a loop cursor.
590 * @list: the head of the list
591 * @member: name of the &struct rlist_head within the hashable struct.
592 *
593 * This hash chain list-traversal primitive should be used on the
594 * list returned by rhltable_lookup.
595 */
596#define rhl_for_each_entry_rcu(tpos, pos, list, member) \
597 for (pos = list; pos && rht_entry(tpos, pos, member); \
598 pos = rcu_dereference_all(pos->next))
599
600static inline int rhashtable_compare(struct rhashtable_compare_arg *arg,
601 const void *obj)
602{
603 struct rhashtable *ht = arg->ht;
604 const char *ptr = obj;
605
606 return memcmp(ptr + ht->p.key_offset, arg->key, ht->p.key_len);
607}
608
609/* Internal function, do not use. */
610static __always_inline struct rhash_head *__rhashtable_lookup(
611 struct rhashtable *ht, const void *key,
612 const struct rhashtable_params params,
613 const enum rht_lookup_freq freq)
614 __must_hold_shared(RCU)
615{
616 struct rhashtable_compare_arg arg = {
617 .ht = ht,
618 .key = key,
619 };
620 struct rhash_lock_head __rcu *const *bkt;
621 struct bucket_table *tbl;
622 struct rhash_head *he;
623 unsigned int hash;
624
625 BUILD_BUG_ON(!__builtin_constant_p(freq));
626 tbl = rht_dereference_rcu(ht->tbl, ht);
627restart:
628 hash = rht_key_hashfn(ht, tbl, key, params);
629 bkt = rht_bucket(tbl, hash);
630 do {
631 rht_for_each_rcu_from(he, __rht_ptr_rcu(bkt, freq), tbl, hash) {
632 if (params.obj_cmpfn ?
633 params.obj_cmpfn(&arg, rht_obj(ht, he)) :
634 rhashtable_compare(&arg, rht_obj(ht, he)))
635 continue;
636 return he;
637 }
638 /* An object might have been moved to a different hash chain,
639 * while we walk along it - better check and retry.
640 */
641 } while (he != RHT_NULLS_MARKER(bkt));
642
643 /* Ensure we see any new tables. */
644 smp_rmb();
645
646 tbl = rht_dereference_rcu(tbl->future_tbl, ht);
647 if (unlikely(tbl))
648 goto restart;
649
650 return NULL;
651}
652
653/**
654 * rhashtable_lookup - search hash table
655 * @ht: hash table
656 * @key: the pointer to the key
657 * @params: hash table parameters
658 *
659 * Computes the hash value for the key and traverses the bucket chain looking
660 * for an entry with an identical key. The first matching entry is returned.
661 *
662 * This must only be called under the RCU read lock.
663 *
664 * Returns the first entry on which the compare function returned true.
665 */
666static __always_inline void *rhashtable_lookup(
667 struct rhashtable *ht, const void *key,
668 const struct rhashtable_params params)
669 __must_hold_shared(RCU)
670{
671 struct rhash_head *he = __rhashtable_lookup(ht, key, params,
672 RHT_LOOKUP_NORMAL);
673
674 return he ? rht_obj(ht, he) : NULL;
675}
676
677static __always_inline void *rhashtable_lookup_likely(
678 struct rhashtable *ht, const void *key,
679 const struct rhashtable_params params)
680 __must_hold_shared(RCU)
681{
682 struct rhash_head *he = __rhashtable_lookup(ht, key, params,
683 RHT_LOOKUP_LIKELY);
684
685 return likely(he) ? rht_obj(ht, he) : NULL;
686}
687
688/**
689 * rhashtable_lookup_fast - search hash table, without RCU read lock
690 * @ht: hash table
691 * @key: the pointer to the key
692 * @params: hash table parameters
693 *
694 * Computes the hash value for the key and traverses the bucket chain looking
695 * for an entry with an identical key. The first matching entry is returned.
696 *
697 * Only use this function when you have other mechanisms guaranteeing
698 * that the object won't go away after the RCU read lock is released.
699 *
700 * Returns the first entry on which the compare function returned true.
701 */
702static __always_inline void *rhashtable_lookup_fast(
703 struct rhashtable *ht, const void *key,
704 const struct rhashtable_params params)
705{
706 void *obj;
707
708 rcu_read_lock();
709 obj = rhashtable_lookup(ht, key, params);
710 rcu_read_unlock();
711
712 return obj;
713}
714
715/**
716 * rhltable_lookup - search hash list table
717 * @hlt: hash table
718 * @key: the pointer to the key
719 * @params: hash table parameters
720 *
721 * Computes the hash value for the key and traverses the bucket chain looking
722 * for an entry with an identical key. All matching entries are returned
723 * in a list.
724 *
725 * This must only be called under the RCU read lock.
726 *
727 * Returns the list of entries that match the given key.
728 */
729static __always_inline struct rhlist_head *rhltable_lookup(
730 struct rhltable *hlt, const void *key,
731 const struct rhashtable_params params)
732 __must_hold_shared(RCU)
733{
734 struct rhash_head *he = __rhashtable_lookup(&hlt->ht, key, params,
735 RHT_LOOKUP_NORMAL);
736
737 return he ? container_of(he, struct rhlist_head, rhead) : NULL;
738}
739
740static __always_inline struct rhlist_head *rhltable_lookup_likely(
741 struct rhltable *hlt, const void *key,
742 const struct rhashtable_params params)
743 __must_hold_shared(RCU)
744{
745 struct rhash_head *he = __rhashtable_lookup(&hlt->ht, key, params,
746 RHT_LOOKUP_LIKELY);
747
748 return likely(he) ? container_of(he, struct rhlist_head, rhead) : NULL;
749}
750
751/* Internal function, please use rhashtable_insert_fast() instead. This
752 * function returns the existing element already in hashes if there is a clash,
753 * otherwise it returns an error via ERR_PTR().
754 */
755static __always_inline void *__rhashtable_insert_fast(
756 struct rhashtable *ht, const void *key, struct rhash_head *obj,
757 const struct rhashtable_params params, bool rhlist)
758{
759 struct rhashtable_compare_arg arg = {
760 .ht = ht,
761 .key = key,
762 };
763 struct rhash_lock_head __rcu **bkt;
764 struct rhash_head __rcu **pprev;
765 struct bucket_table *tbl;
766 struct rhash_head *head;
767 unsigned long flags;
768 unsigned int hash;
769 int elasticity;
770 void *data;
771
772 rcu_read_lock();
773
774 tbl = rht_dereference_rcu(ht->tbl, ht);
775 hash = rht_head_hashfn(ht, tbl, obj, params);
776 elasticity = RHT_ELASTICITY;
777 bkt = rht_bucket_insert(ht, tbl, hash);
778 data = ERR_PTR(-ENOMEM);
779 if (!bkt)
780 goto out;
781 pprev = NULL;
782 flags = rht_lock(tbl, bkt);
783
784 if (unlikely(rcu_access_pointer(tbl->future_tbl))) {
785slow_path:
786 rht_unlock(tbl, bkt, flags);
787 rcu_read_unlock();
788 return rhashtable_insert_slow(ht, key, obj);
789 }
790
791 rht_for_each_from(head, rht_ptr(bkt, tbl, hash), tbl, hash) {
792 struct rhlist_head *plist;
793 struct rhlist_head *list;
794
795 elasticity--;
796 if (!key ||
797 (params.obj_cmpfn ?
798 params.obj_cmpfn(&arg, rht_obj(ht, head)) :
799 rhashtable_compare(&arg, rht_obj(ht, head)))) {
800 pprev = &head->next;
801 continue;
802 }
803
804 data = rht_obj(ht, head);
805
806 if (!rhlist)
807 goto out_unlock;
808
809
810 list = container_of(obj, struct rhlist_head, rhead);
811 plist = container_of(head, struct rhlist_head, rhead);
812
813 RCU_INIT_POINTER(list->next, plist);
814 head = rht_dereference_bucket(head->next, tbl, hash);
815 RCU_INIT_POINTER(list->rhead.next, head);
816 if (pprev) {
817 rcu_assign_pointer(*pprev, obj);
818 rht_unlock(tbl, bkt, flags);
819 } else
820 rht_assign_unlock(tbl, bkt, obj, flags);
821 data = NULL;
822 goto out;
823 }
824
825 if (elasticity <= 0 && !params.insecure_elasticity)
826 goto slow_path;
827
828 data = ERR_PTR(-E2BIG);
829 if (unlikely(rht_grow_above_max(ht, tbl)))
830 goto out_unlock;
831
832 if (unlikely(rht_grow_above_100(ht, tbl)) &&
833 !params.insecure_elasticity)
834 goto slow_path;
835
836 /* Inserting at head of list makes unlocking free. */
837 head = rht_ptr(bkt, tbl, hash);
838
839 RCU_INIT_POINTER(obj->next, head);
840 if (rhlist) {
841 struct rhlist_head *list;
842
843 list = container_of(obj, struct rhlist_head, rhead);
844 RCU_INIT_POINTER(list->next, NULL);
845 }
846
847 atomic_inc(&ht->nelems);
848 rht_assign_unlock(tbl, bkt, obj, flags);
849
850 if (rht_grow_above_75(ht, tbl))
851 irq_work_queue(&ht->run_irq_work);
852
853 data = NULL;
854out:
855 rcu_read_unlock();
856
857 return data;
858
859out_unlock:
860 rht_unlock(tbl, bkt, flags);
861 goto out;
862}
863
864/**
865 * rhashtable_insert_fast - insert object into hash table
866 * @ht: hash table
867 * @obj: pointer to hash head inside object
868 * @params: hash table parameters
869 *
870 * Will take the per bucket bitlock to protect against mutual mutations
871 * on the same bucket. Multiple insertions may occur in parallel unless
872 * they map to the same bucket.
873 *
874 * It is safe to call this function from atomic context.
875 *
876 * Will trigger an automatic deferred table resizing if residency in the
877 * table grows beyond 70%.
878 */
879static __always_inline int rhashtable_insert_fast(
880 struct rhashtable *ht, struct rhash_head *obj,
881 const struct rhashtable_params params)
882{
883 void *ret;
884
885 ret = __rhashtable_insert_fast(ht, NULL, obj, params, false);
886 if (IS_ERR(ret))
887 return PTR_ERR(ret);
888
889 return ret == NULL ? 0 : -EEXIST;
890}
891
892/**
893 * rhltable_insert_key - insert object into hash list table
894 * @hlt: hash list table
895 * @key: the pointer to the key
896 * @list: pointer to hash list head inside object
897 * @params: hash table parameters
898 *
899 * Will take the per bucket bitlock to protect against mutual mutations
900 * on the same bucket. Multiple insertions may occur in parallel unless
901 * they map to the same bucket.
902 *
903 * It is safe to call this function from atomic context.
904 *
905 * Will trigger an automatic deferred table resizing if residency in the
906 * table grows beyond 70%.
907 */
908static __always_inline int rhltable_insert_key(
909 struct rhltable *hlt, const void *key, struct rhlist_head *list,
910 const struct rhashtable_params params)
911{
912 return PTR_ERR(__rhashtable_insert_fast(&hlt->ht, key, &list->rhead,
913 params, true));
914}
915
916/**
917 * rhltable_insert - insert object into hash list table
918 * @hlt: hash list table
919 * @list: pointer to hash list head inside object
920 * @params: hash table parameters
921 *
922 * Will take the per bucket bitlock to protect against mutual mutations
923 * on the same bucket. Multiple insertions may occur in parallel unless
924 * they map to the same bucket.
925 *
926 * It is safe to call this function from atomic context.
927 *
928 * Will trigger an automatic deferred table resizing if residency in the
929 * table grows beyond 70%.
930 */
931static __always_inline int rhltable_insert(
932 struct rhltable *hlt, struct rhlist_head *list,
933 const struct rhashtable_params params)
934{
935 const char *key = rht_obj(&hlt->ht, &list->rhead);
936
937 key += params.key_offset;
938
939 return rhltable_insert_key(hlt, key, list, params);
940}
941
942/**
943 * rhashtable_lookup_insert_fast - lookup and insert object into hash table
944 * @ht: hash table
945 * @obj: pointer to hash head inside object
946 * @params: hash table parameters
947 *
948 * This lookup function may only be used for fixed key hash table (key_len
949 * parameter set). It will BUG() if used inappropriately.
950 *
951 * It is safe to call this function from atomic context.
952 *
953 * Will trigger an automatic deferred table resizing if residency in the
954 * table grows beyond 70%.
955 */
956static __always_inline int rhashtable_lookup_insert_fast(
957 struct rhashtable *ht, struct rhash_head *obj,
958 const struct rhashtable_params params)
959{
960 const char *key = rht_obj(ht, obj);
961 void *ret;
962
963 BUG_ON(ht->p.obj_hashfn);
964
965 ret = __rhashtable_insert_fast(ht, key + ht->p.key_offset, obj, params,
966 false);
967 if (IS_ERR(ret))
968 return PTR_ERR(ret);
969
970 return ret == NULL ? 0 : -EEXIST;
971}
972
973/**
974 * rhashtable_lookup_get_insert_fast - lookup and insert object into hash table
975 * @ht: hash table
976 * @obj: pointer to hash head inside object
977 * @params: hash table parameters
978 *
979 * Just like rhashtable_lookup_insert_fast(), but this function returns the
980 * object if it exists, NULL if it did not and the insertion was successful,
981 * and an ERR_PTR otherwise.
982 */
983static __always_inline void *rhashtable_lookup_get_insert_fast(
984 struct rhashtable *ht, struct rhash_head *obj,
985 const struct rhashtable_params params)
986{
987 const char *key = rht_obj(ht, obj);
988
989 BUG_ON(ht->p.obj_hashfn);
990
991 return __rhashtable_insert_fast(ht, key + ht->p.key_offset, obj, params,
992 false);
993}
994
995/**
996 * rhashtable_lookup_insert_key - search and insert object to hash table
997 * with explicit key
998 * @ht: hash table
999 * @key: key
1000 * @obj: pointer to hash head inside object
1001 * @params: hash table parameters
1002 *
1003 * Lookups may occur in parallel with hashtable mutations and resizing.
1004 *
1005 * Will trigger an automatic deferred table resizing if residency in the
1006 * table grows beyond 70%.
1007 *
1008 * Returns zero on success.
1009 */
1010static __always_inline int rhashtable_lookup_insert_key(
1011 struct rhashtable *ht, const void *key, struct rhash_head *obj,
1012 const struct rhashtable_params params)
1013{
1014 void *ret;
1015
1016 BUG_ON(!ht->p.obj_hashfn || !key);
1017
1018 ret = __rhashtable_insert_fast(ht, key, obj, params, false);
1019 if (IS_ERR(ret))
1020 return PTR_ERR(ret);
1021
1022 return ret == NULL ? 0 : -EEXIST;
1023}
1024
1025/**
1026 * rhashtable_lookup_get_insert_key - lookup and insert object into hash table
1027 * @ht: hash table
1028 * @key: key
1029 * @obj: pointer to hash head inside object
1030 * @params: hash table parameters
1031 *
1032 * Just like rhashtable_lookup_insert_key(), but this function returns the
1033 * object if it exists, NULL if it does not and the insertion was successful,
1034 * and an ERR_PTR otherwise.
1035 */
1036static __always_inline void *rhashtable_lookup_get_insert_key(
1037 struct rhashtable *ht, const void *key, struct rhash_head *obj,
1038 const struct rhashtable_params params)
1039{
1040 BUG_ON(!ht->p.obj_hashfn || !key);
1041
1042 return __rhashtable_insert_fast(ht, key, obj, params, false);
1043}
1044
1045/* Internal function, please use rhashtable_remove_fast() instead */
1046static __always_inline int __rhashtable_remove_fast_one(
1047 struct rhashtable *ht, struct bucket_table *tbl,
1048 struct rhash_head *obj, const struct rhashtable_params params,
1049 bool rhlist)
1050{
1051 struct rhash_lock_head __rcu **bkt;
1052 struct rhash_head __rcu **pprev;
1053 struct rhash_head *he;
1054 unsigned long flags;
1055 unsigned int hash;
1056 int err = -ENOENT;
1057
1058 hash = rht_head_hashfn(ht, tbl, obj, params);
1059 bkt = rht_bucket_var(tbl, hash);
1060 if (!bkt)
1061 return -ENOENT;
1062 pprev = NULL;
1063 flags = rht_lock(tbl, bkt);
1064
1065 rht_for_each_from(he, rht_ptr(bkt, tbl, hash), tbl, hash) {
1066 struct rhlist_head *list;
1067
1068 list = container_of(he, struct rhlist_head, rhead);
1069
1070 if (he != obj) {
1071 struct rhlist_head __rcu **lpprev;
1072
1073 pprev = &he->next;
1074
1075 if (!rhlist)
1076 continue;
1077
1078 do {
1079 lpprev = &list->next;
1080 list = rht_dereference_bucket(list->next,
1081 tbl, hash);
1082 } while (list && obj != &list->rhead);
1083
1084 if (!list)
1085 continue;
1086
1087 list = rht_dereference_bucket(list->next, tbl, hash);
1088 RCU_INIT_POINTER(*lpprev, list);
1089 err = 0;
1090 break;
1091 }
1092
1093 obj = rht_dereference_bucket(obj->next, tbl, hash);
1094 err = 1;
1095
1096 if (rhlist) {
1097 list = rht_dereference_bucket(list->next, tbl, hash);
1098 if (list) {
1099 RCU_INIT_POINTER(list->rhead.next, obj);
1100 obj = &list->rhead;
1101 err = 0;
1102 }
1103 }
1104
1105 if (pprev) {
1106 rcu_assign_pointer(*pprev, obj);
1107 rht_unlock(tbl, bkt, flags);
1108 } else {
1109 rht_assign_unlock(tbl, bkt, obj, flags);
1110 }
1111 goto unlocked;
1112 }
1113
1114 rht_unlock(tbl, bkt, flags);
1115unlocked:
1116 if (err > 0) {
1117 atomic_dec(&ht->nelems);
1118 if (unlikely(ht->p.automatic_shrinking &&
1119 rht_shrink_below_30(ht, tbl)))
1120 schedule_work(&ht->run_work);
1121 err = 0;
1122 }
1123
1124 return err;
1125}
1126
1127/* Internal function, please use rhashtable_remove_fast() instead */
1128static __always_inline int __rhashtable_remove_fast(
1129 struct rhashtable *ht, struct rhash_head *obj,
1130 const struct rhashtable_params params, bool rhlist)
1131{
1132 struct bucket_table *tbl;
1133 int err;
1134
1135 rcu_read_lock();
1136
1137 tbl = rht_dereference_rcu(ht->tbl, ht);
1138
1139 /* Because we have already taken (and released) the bucket
1140 * lock in old_tbl, if we find that future_tbl is not yet
1141 * visible then that guarantees the entry to still be in
1142 * the old tbl if it exists.
1143 */
1144 while ((err = __rhashtable_remove_fast_one(ht, tbl, obj, params,
1145 rhlist)) &&
1146 (tbl = rht_dereference_rcu(tbl->future_tbl, ht)))
1147 ;
1148
1149 rcu_read_unlock();
1150
1151 return err;
1152}
1153
1154/**
1155 * rhashtable_remove_fast - remove object from hash table
1156 * @ht: hash table
1157 * @obj: pointer to hash head inside object
1158 * @params: hash table parameters
1159 *
1160 * Since the hash chain is single linked, the removal operation needs to
1161 * walk the bucket chain upon removal. The removal operation is thus
1162 * considerable slow if the hash table is not correctly sized.
1163 *
1164 * Will automatically shrink the table if permitted when residency drops
1165 * below 30%.
1166 *
1167 * Returns zero on success, -ENOENT if the entry could not be found.
1168 */
1169static __always_inline int rhashtable_remove_fast(
1170 struct rhashtable *ht, struct rhash_head *obj,
1171 const struct rhashtable_params params)
1172{
1173 return __rhashtable_remove_fast(ht, obj, params, false);
1174}
1175
1176/**
1177 * rhltable_remove - remove object from hash list table
1178 * @hlt: hash list table
1179 * @list: pointer to hash list head inside object
1180 * @params: hash table parameters
1181 *
1182 * Since the hash chain is single linked, the removal operation needs to
1183 * walk the bucket chain upon removal. The removal operation is thus
1184 * considerably slower if the hash table is not correctly sized.
1185 *
1186 * Will automatically shrink the table if permitted when residency drops
1187 * below 30%
1188 *
1189 * Returns zero on success, -ENOENT if the entry could not be found.
1190 */
1191static __always_inline int rhltable_remove(
1192 struct rhltable *hlt, struct rhlist_head *list,
1193 const struct rhashtable_params params)
1194{
1195 return __rhashtable_remove_fast(&hlt->ht, &list->rhead, params, true);
1196}
1197
1198/* Internal function, please use rhashtable_replace_fast() instead */
1199static __always_inline int __rhashtable_replace_fast(
1200 struct rhashtable *ht, struct bucket_table *tbl,
1201 struct rhash_head *obj_old, struct rhash_head *obj_new,
1202 const struct rhashtable_params params)
1203{
1204 struct rhash_lock_head __rcu **bkt;
1205 struct rhash_head __rcu **pprev;
1206 struct rhash_head *he;
1207 unsigned long flags;
1208 unsigned int hash;
1209 int err = -ENOENT;
1210
1211 /* Minimally, the old and new objects must have same hash
1212 * (which should mean identifiers are the same).
1213 */
1214 hash = rht_head_hashfn(ht, tbl, obj_old, params);
1215 if (hash != rht_head_hashfn(ht, tbl, obj_new, params))
1216 return -EINVAL;
1217
1218 bkt = rht_bucket_var(tbl, hash);
1219 if (!bkt)
1220 return -ENOENT;
1221
1222 pprev = NULL;
1223 flags = rht_lock(tbl, bkt);
1224
1225 rht_for_each_from(he, rht_ptr(bkt, tbl, hash), tbl, hash) {
1226 if (he != obj_old) {
1227 pprev = &he->next;
1228 continue;
1229 }
1230
1231 rcu_assign_pointer(obj_new->next, obj_old->next);
1232 if (pprev) {
1233 rcu_assign_pointer(*pprev, obj_new);
1234 rht_unlock(tbl, bkt, flags);
1235 } else {
1236 rht_assign_unlock(tbl, bkt, obj_new, flags);
1237 }
1238 err = 0;
1239 goto unlocked;
1240 }
1241
1242 rht_unlock(tbl, bkt, flags);
1243
1244unlocked:
1245 return err;
1246}
1247
1248/**
1249 * rhashtable_replace_fast - replace an object in hash table
1250 * @ht: hash table
1251 * @obj_old: pointer to hash head inside object being replaced
1252 * @obj_new: pointer to hash head inside object which is new
1253 * @params: hash table parameters
1254 *
1255 * Replacing an object doesn't affect the number of elements in the hash table
1256 * or bucket, so we don't need to worry about shrinking or expanding the
1257 * table here.
1258 *
1259 * Returns zero on success, -ENOENT if the entry could not be found,
1260 * -EINVAL if hash is not the same for the old and new objects.
1261 */
1262static __always_inline int rhashtable_replace_fast(
1263 struct rhashtable *ht, struct rhash_head *obj_old,
1264 struct rhash_head *obj_new,
1265 const struct rhashtable_params params)
1266{
1267 struct bucket_table *tbl;
1268 int err;
1269
1270 rcu_read_lock();
1271
1272 tbl = rht_dereference_rcu(ht->tbl, ht);
1273
1274 /* Because we have already taken (and released) the bucket
1275 * lock in old_tbl, if we find that future_tbl is not yet
1276 * visible then that guarantees the entry to still be in
1277 * the old tbl if it exists.
1278 */
1279 while ((err = __rhashtable_replace_fast(ht, tbl, obj_old,
1280 obj_new, params)) &&
1281 (tbl = rht_dereference_rcu(tbl->future_tbl, ht)))
1282 ;
1283
1284 rcu_read_unlock();
1285
1286 return err;
1287}
1288
1289/**
1290 * rhltable_walk_enter - Initialise an iterator
1291 * @hlt: Table to walk over
1292 * @iter: Hash table Iterator
1293 *
1294 * This function prepares a hash table walk.
1295 *
1296 * Note that if you restart a walk after rhashtable_walk_stop you
1297 * may see the same object twice. Also, you may miss objects if
1298 * there are removals in between rhashtable_walk_stop and the next
1299 * call to rhashtable_walk_start.
1300 *
1301 * For a completely stable walk you should construct your own data
1302 * structure outside the hash table.
1303 *
1304 * This function may be called from any process context, including
1305 * non-preemptable context, but cannot be called from softirq or
1306 * hardirq context.
1307 *
1308 * You must call rhashtable_walk_exit after this function returns.
1309 */
1310static inline void rhltable_walk_enter(struct rhltable *hlt,
1311 struct rhashtable_iter *iter)
1312{
1313 rhashtable_walk_enter(&hlt->ht, iter);
1314}
1315
1316/**
1317 * rhltable_free_and_destroy - free elements and destroy hash list table
1318 * @hlt: the hash list table to destroy
1319 * @free_fn: callback to release resources of element
1320 * @arg: pointer passed to free_fn
1321 *
1322 * See documentation for rhashtable_free_and_destroy.
1323 */
1324static inline void rhltable_free_and_destroy(struct rhltable *hlt,
1325 void (*free_fn)(void *ptr,
1326 void *arg),
1327 void *arg)
1328{
1329 rhashtable_free_and_destroy(&hlt->ht, free_fn, arg);
1330}
1331
1332static inline void rhltable_destroy(struct rhltable *hlt)
1333{
1334 rhltable_free_and_destroy(hlt, NULL, NULL);
1335}
1336
1337#endif /* _LINUX_RHASHTABLE_H */