Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at 7dfc0063022078a80fe5774815723c185e4b7b57 1335 lines 40 kB view raw
1/* SPDX-License-Identifier: GPL-2.0 */ 2/* 3 * Resizable, Scalable, Concurrent Hash Table 4 * 5 * Copyright (c) 2015-2016 Herbert Xu <herbert@gondor.apana.org.au> 6 * Copyright (c) 2014-2015 Thomas Graf <tgraf@suug.ch> 7 * Copyright (c) 2008-2014 Patrick McHardy <kaber@trash.net> 8 * 9 * Code partially derived from nft_hash 10 * Rewritten with rehash code from br_multicast plus single list 11 * pointer as suggested by Josh Triplett 12 * 13 * This program is free software; you can redistribute it and/or modify 14 * it under the terms of the GNU General Public License version 2 as 15 * published by the Free Software Foundation. 16 */ 17 18#ifndef _LINUX_RHASHTABLE_H 19#define _LINUX_RHASHTABLE_H 20 21#include <linux/err.h> 22#include <linux/errno.h> 23#include <linux/jhash.h> 24#include <linux/list_nulls.h> 25#include <linux/workqueue.h> 26#include <linux/rculist.h> 27#include <linux/bit_spinlock.h> 28 29#include <linux/rhashtable-types.h> 30/* 31 * Objects in an rhashtable have an embedded struct rhash_head 32 * which is linked into as hash chain from the hash table - or one 33 * of two or more hash tables when the rhashtable is being resized. 34 * The end of the chain is marked with a special nulls marks which has 35 * the least significant bit set but otherwise stores the address of 36 * the hash bucket. This allows us to be sure we've found the end 37 * of the right list. 38 * The value stored in the hash bucket has BIT(0) used as a lock bit. 39 * This bit must be atomically set before any changes are made to 40 * the chain. To avoid dereferencing this pointer without clearing 41 * the bit first, we use an opaque 'struct rhash_lock_head *' for the 42 * pointer stored in the bucket. This struct needs to be defined so 43 * that rcu_dereference() works on it, but it has no content so a 44 * cast is needed for it to be useful. This ensures it isn't 45 * used by mistake with clearing the lock bit first. 46 */ 47struct rhash_lock_head {}; 48 49/* Maximum chain length before rehash 50 * 51 * The maximum (not average) chain length grows with the size of the hash 52 * table, at a rate of (log N)/(log log N). 53 * 54 * The value of 16 is selected so that even if the hash table grew to 55 * 2^32 you would not expect the maximum chain length to exceed it 56 * unless we are under attack (or extremely unlucky). 57 * 58 * As this limit is only to detect attacks, we don't need to set it to a 59 * lower value as you'd need the chain length to vastly exceed 16 to have 60 * any real effect on the system. 61 */ 62#define RHT_ELASTICITY 16u 63 64/** 65 * struct bucket_table - Table of hash buckets 66 * @size: Number of hash buckets 67 * @nest: Number of bits of first-level nested table. 68 * @rehash: Current bucket being rehashed 69 * @hash_rnd: Random seed to fold into hash 70 * @walkers: List of active walkers 71 * @rcu: RCU structure for freeing the table 72 * @future_tbl: Table under construction during rehashing 73 * @ntbl: Nested table used when out of memory. 74 * @buckets: size * hash buckets 75 */ 76struct bucket_table { 77 unsigned int size; 78 unsigned int nest; 79 u32 hash_rnd; 80 struct list_head walkers; 81 struct rcu_head rcu; 82 83 struct bucket_table __rcu *future_tbl; 84 85 struct lockdep_map dep_map; 86 87 struct rhash_lock_head __rcu *buckets[] ____cacheline_aligned_in_smp; 88}; 89 90/* 91 * NULLS_MARKER() expects a hash value with the low 92 * bits mostly likely to be significant, and it discards 93 * the msb. 94 * We give it an address, in which the bottom bit is 95 * always 0, and the msb might be significant. 96 * So we shift the address down one bit to align with 97 * expectations and avoid losing a significant bit. 98 * 99 * We never store the NULLS_MARKER in the hash table 100 * itself as we need the lsb for locking. 101 * Instead we store a NULL 102 */ 103#define RHT_NULLS_MARKER(ptr) \ 104 ((void *)NULLS_MARKER(((unsigned long) (ptr)) >> 1)) 105#define INIT_RHT_NULLS_HEAD(ptr) \ 106 ((ptr) = NULL) 107 108static inline bool rht_is_a_nulls(const struct rhash_head *ptr) 109{ 110 return ((unsigned long) ptr & 1); 111} 112 113static inline void *rht_obj(const struct rhashtable *ht, 114 const struct rhash_head *he) 115{ 116 return (char *)he - ht->p.head_offset; 117} 118 119static inline unsigned int rht_bucket_index(const struct bucket_table *tbl, 120 unsigned int hash) 121{ 122 return hash & (tbl->size - 1); 123} 124 125static __always_inline unsigned int rht_key_get_hash(struct rhashtable *ht, 126 const void *key, const struct rhashtable_params params, 127 unsigned int hash_rnd) 128{ 129 unsigned int hash; 130 131 /* params must be equal to ht->p if it isn't constant. */ 132 if (!__builtin_constant_p(params.key_len)) { 133 hash = ht->p.hashfn(key, ht->key_len, hash_rnd); 134 } else { 135 unsigned int key_len = params.key_len ? : ht->p.key_len; 136 137 if (params.hashfn) 138 hash = params.hashfn(key, key_len, hash_rnd); 139 else if (key_len & (sizeof(u32) - 1)) 140 hash = jhash(key, key_len, hash_rnd); 141 else 142 hash = jhash2(key, key_len / sizeof(u32), hash_rnd); 143 } 144 145 return hash; 146} 147 148static __always_inline unsigned int rht_key_hashfn( 149 struct rhashtable *ht, const struct bucket_table *tbl, 150 const void *key, const struct rhashtable_params params) 151{ 152 unsigned int hash = rht_key_get_hash(ht, key, params, tbl->hash_rnd); 153 154 return rht_bucket_index(tbl, hash); 155} 156 157static __always_inline unsigned int rht_head_hashfn( 158 struct rhashtable *ht, const struct bucket_table *tbl, 159 const struct rhash_head *he, const struct rhashtable_params params) 160{ 161 const char *ptr = rht_obj(ht, he); 162 163 return likely(params.obj_hashfn) ? 164 rht_bucket_index(tbl, params.obj_hashfn(ptr, params.key_len ?: 165 ht->p.key_len, 166 tbl->hash_rnd)) : 167 rht_key_hashfn(ht, tbl, ptr + params.key_offset, params); 168} 169 170/** 171 * rht_grow_above_75 - returns true if nelems > 0.75 * table-size 172 * @ht: hash table 173 * @tbl: current table 174 */ 175static inline bool rht_grow_above_75(const struct rhashtable *ht, 176 const struct bucket_table *tbl) 177{ 178 /* Expand table when exceeding 75% load */ 179 return atomic_read(&ht->nelems) > (tbl->size / 4 * 3) && 180 (!ht->p.max_size || tbl->size < ht->p.max_size); 181} 182 183/** 184 * rht_shrink_below_30 - returns true if nelems < 0.3 * table-size 185 * @ht: hash table 186 * @tbl: current table 187 */ 188static inline bool rht_shrink_below_30(const struct rhashtable *ht, 189 const struct bucket_table *tbl) 190{ 191 /* Shrink table beneath 30% load */ 192 return atomic_read(&ht->nelems) < (tbl->size * 3 / 10) && 193 tbl->size > ht->p.min_size; 194} 195 196/** 197 * rht_grow_above_100 - returns true if nelems > table-size 198 * @ht: hash table 199 * @tbl: current table 200 */ 201static inline bool rht_grow_above_100(const struct rhashtable *ht, 202 const struct bucket_table *tbl) 203{ 204 return atomic_read(&ht->nelems) > tbl->size && 205 (!ht->p.max_size || tbl->size < ht->p.max_size); 206} 207 208/** 209 * rht_grow_above_max - returns true if table is above maximum 210 * @ht: hash table 211 * @tbl: current table 212 */ 213static inline bool rht_grow_above_max(const struct rhashtable *ht, 214 const struct bucket_table *tbl) 215{ 216 return atomic_read(&ht->nelems) >= ht->max_elems; 217} 218 219#ifdef CONFIG_PROVE_LOCKING 220int lockdep_rht_mutex_is_held(struct rhashtable *ht); 221int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, u32 hash); 222#else 223static inline int lockdep_rht_mutex_is_held(struct rhashtable *ht) 224{ 225 return 1; 226} 227 228static inline int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, 229 u32 hash) 230{ 231 return 1; 232} 233#endif /* CONFIG_PROVE_LOCKING */ 234 235void *rhashtable_insert_slow(struct rhashtable *ht, const void *key, 236 struct rhash_head *obj); 237 238void rhashtable_walk_enter(struct rhashtable *ht, 239 struct rhashtable_iter *iter); 240void rhashtable_walk_exit(struct rhashtable_iter *iter); 241int rhashtable_walk_start_check(struct rhashtable_iter *iter) __acquires_shared(RCU); 242 243static inline void rhashtable_walk_start(struct rhashtable_iter *iter) 244 __acquires_shared(RCU) 245{ 246 (void)rhashtable_walk_start_check(iter); 247} 248 249void *rhashtable_walk_next(struct rhashtable_iter *iter); 250void *rhashtable_walk_peek(struct rhashtable_iter *iter); 251void rhashtable_walk_stop(struct rhashtable_iter *iter) __releases_shared(RCU); 252 253void rhashtable_free_and_destroy(struct rhashtable *ht, 254 void (*free_fn)(void *ptr, void *arg), 255 void *arg); 256void rhashtable_destroy(struct rhashtable *ht); 257 258struct rhash_lock_head __rcu **rht_bucket_nested( 259 const struct bucket_table *tbl, unsigned int hash); 260struct rhash_lock_head __rcu **__rht_bucket_nested( 261 const struct bucket_table *tbl, unsigned int hash); 262struct rhash_lock_head __rcu **rht_bucket_nested_insert( 263 struct rhashtable *ht, struct bucket_table *tbl, unsigned int hash); 264 265#define rht_dereference(p, ht) \ 266 rcu_dereference_protected(p, lockdep_rht_mutex_is_held(ht)) 267 268#define rht_dereference_rcu(p, ht) \ 269 rcu_dereference_all_check(p, lockdep_rht_mutex_is_held(ht)) 270 271#define rht_dereference_bucket(p, tbl, hash) \ 272 rcu_dereference_protected(p, lockdep_rht_bucket_is_held(tbl, hash)) 273 274#define rht_dereference_bucket_rcu(p, tbl, hash) \ 275 rcu_dereference_all_check(p, lockdep_rht_bucket_is_held(tbl, hash)) 276 277#define rht_entry(tpos, pos, member) \ 278 ({ tpos = container_of(pos, typeof(*tpos), member); 1; }) 279 280static inline struct rhash_lock_head __rcu *const *rht_bucket( 281 const struct bucket_table *tbl, unsigned int hash) 282{ 283 return unlikely(tbl->nest) ? rht_bucket_nested(tbl, hash) : 284 &tbl->buckets[hash]; 285} 286 287static inline struct rhash_lock_head __rcu **rht_bucket_var( 288 struct bucket_table *tbl, unsigned int hash) 289{ 290 return unlikely(tbl->nest) ? __rht_bucket_nested(tbl, hash) : 291 &tbl->buckets[hash]; 292} 293 294static inline struct rhash_lock_head __rcu **rht_bucket_insert( 295 struct rhashtable *ht, struct bucket_table *tbl, unsigned int hash) 296{ 297 return unlikely(tbl->nest) ? rht_bucket_nested_insert(ht, tbl, hash) : 298 &tbl->buckets[hash]; 299} 300 301/* 302 * We lock a bucket by setting BIT(0) in the pointer - this is always 303 * zero in real pointers. The NULLS mark is never stored in the bucket, 304 * rather we store NULL if the bucket is empty. 305 * bit_spin_locks do not handle contention well, but the whole point 306 * of the hashtable design is to achieve minimum per-bucket contention. 307 * A nested hash table might not have a bucket pointer. In that case 308 * we cannot get a lock. For remove and replace the bucket cannot be 309 * interesting and doesn't need locking. 310 * For insert we allocate the bucket if this is the last bucket_table, 311 * and then take the lock. 312 * Sometimes we unlock a bucket by writing a new pointer there. In that 313 * case we don't need to unlock, but we do need to reset state such as 314 * local_bh. For that we have rht_assign_unlock(). As rcu_assign_pointer() 315 * provides the same release semantics that bit_spin_unlock() provides, 316 * this is safe. 317 * When we write to a bucket without unlocking, we use rht_assign_locked(). 318 */ 319 320static inline unsigned long rht_lock(struct bucket_table *tbl, 321 struct rhash_lock_head __rcu **bkt) 322 __acquires(__bitlock(0, bkt)) 323{ 324 unsigned long flags; 325 326 local_irq_save(flags); 327 bit_spin_lock(0, (unsigned long *)bkt); 328 lock_map_acquire(&tbl->dep_map); 329 return flags; 330} 331 332static inline unsigned long rht_lock_nested(struct bucket_table *tbl, 333 struct rhash_lock_head __rcu **bucket, 334 unsigned int subclass) 335 __acquires(__bitlock(0, bucket)) 336{ 337 unsigned long flags; 338 339 local_irq_save(flags); 340 bit_spin_lock(0, (unsigned long *)bucket); 341 lock_acquire_exclusive(&tbl->dep_map, subclass, 0, NULL, _THIS_IP_); 342 return flags; 343} 344 345static inline void rht_unlock(struct bucket_table *tbl, 346 struct rhash_lock_head __rcu **bkt, 347 unsigned long flags) 348 __releases(__bitlock(0, bkt)) 349{ 350 lock_map_release(&tbl->dep_map); 351 bit_spin_unlock(0, (unsigned long *)bkt); 352 local_irq_restore(flags); 353} 354 355enum rht_lookup_freq { 356 RHT_LOOKUP_NORMAL, 357 RHT_LOOKUP_LIKELY, 358}; 359 360static __always_inline struct rhash_head *__rht_ptr( 361 struct rhash_lock_head *p, struct rhash_lock_head __rcu *const *bkt, 362 const enum rht_lookup_freq freq) 363{ 364 unsigned long p_val = (unsigned long)p & ~BIT(0); 365 366 BUILD_BUG_ON(!__builtin_constant_p(freq)); 367 368 if (freq == RHT_LOOKUP_LIKELY) 369 return (struct rhash_head *) 370 (likely(p_val) ? p_val : (unsigned long)RHT_NULLS_MARKER(bkt)); 371 else 372 return (struct rhash_head *) 373 (p_val ?: (unsigned long)RHT_NULLS_MARKER(bkt)); 374} 375 376/* 377 * Where 'bkt' is a bucket and might be locked: 378 * rht_ptr_rcu() dereferences that pointer and clears the lock bit. 379 * rht_ptr() dereferences in a context where the bucket is locked. 380 * rht_ptr_exclusive() dereferences in a context where exclusive 381 * access is guaranteed, such as when destroying the table. 382 */ 383static __always_inline struct rhash_head *__rht_ptr_rcu( 384 struct rhash_lock_head __rcu *const *bkt, 385 const enum rht_lookup_freq freq) 386{ 387 return __rht_ptr(rcu_dereference_all(*bkt), bkt, freq); 388} 389 390static inline struct rhash_head *rht_ptr_rcu( 391 struct rhash_lock_head __rcu *const *bkt) 392{ 393 return __rht_ptr_rcu(bkt, RHT_LOOKUP_NORMAL); 394} 395 396static inline struct rhash_head *rht_ptr( 397 struct rhash_lock_head __rcu *const *bkt, 398 struct bucket_table *tbl, 399 unsigned int hash) 400{ 401 return __rht_ptr(rht_dereference_bucket(*bkt, tbl, hash), bkt, 402 RHT_LOOKUP_NORMAL); 403} 404 405static inline struct rhash_head *rht_ptr_exclusive( 406 struct rhash_lock_head __rcu *const *bkt) 407{ 408 return __rht_ptr(rcu_dereference_protected(*bkt, 1), bkt, 409 RHT_LOOKUP_NORMAL); 410} 411 412static inline void rht_assign_locked(struct rhash_lock_head __rcu **bkt, 413 struct rhash_head *obj) 414{ 415 if (rht_is_a_nulls(obj)) 416 obj = NULL; 417 rcu_assign_pointer(*bkt, (void *)((unsigned long)obj | BIT(0))); 418} 419 420static inline void rht_assign_unlock(struct bucket_table *tbl, 421 struct rhash_lock_head __rcu **bkt, 422 struct rhash_head *obj, 423 unsigned long flags) 424 __releases(__bitlock(0, bkt)) 425{ 426 if (rht_is_a_nulls(obj)) 427 obj = NULL; 428 lock_map_release(&tbl->dep_map); 429 rcu_assign_pointer(*bkt, (void *)obj); 430 preempt_enable(); 431 __release(__bitlock(0, bkt)); 432 local_irq_restore(flags); 433} 434 435/** 436 * rht_for_each_from - iterate over hash chain from given head 437 * @pos: the &struct rhash_head to use as a loop cursor. 438 * @head: the &struct rhash_head to start from 439 * @tbl: the &struct bucket_table 440 * @hash: the hash value / bucket index 441 */ 442#define rht_for_each_from(pos, head, tbl, hash) \ 443 for (pos = head; \ 444 !rht_is_a_nulls(pos); \ 445 pos = rht_dereference_bucket((pos)->next, tbl, hash)) 446 447/** 448 * rht_for_each - iterate over hash chain 449 * @pos: the &struct rhash_head to use as a loop cursor. 450 * @tbl: the &struct bucket_table 451 * @hash: the hash value / bucket index 452 */ 453#define rht_for_each(pos, tbl, hash) \ 454 rht_for_each_from(pos, rht_ptr(rht_bucket(tbl, hash), tbl, hash), \ 455 tbl, hash) 456 457/** 458 * rht_for_each_entry_from - iterate over hash chain from given head 459 * @tpos: the type * to use as a loop cursor. 460 * @pos: the &struct rhash_head to use as a loop cursor. 461 * @head: the &struct rhash_head to start from 462 * @tbl: the &struct bucket_table 463 * @hash: the hash value / bucket index 464 * @member: name of the &struct rhash_head within the hashable struct. 465 */ 466#define rht_for_each_entry_from(tpos, pos, head, tbl, hash, member) \ 467 for (pos = head; \ 468 (!rht_is_a_nulls(pos)) && rht_entry(tpos, pos, member); \ 469 pos = rht_dereference_bucket((pos)->next, tbl, hash)) 470 471/** 472 * rht_for_each_entry - iterate over hash chain of given type 473 * @tpos: the type * to use as a loop cursor. 474 * @pos: the &struct rhash_head to use as a loop cursor. 475 * @tbl: the &struct bucket_table 476 * @hash: the hash value / bucket index 477 * @member: name of the &struct rhash_head within the hashable struct. 478 */ 479#define rht_for_each_entry(tpos, pos, tbl, hash, member) \ 480 rht_for_each_entry_from(tpos, pos, \ 481 rht_ptr(rht_bucket(tbl, hash), tbl, hash), \ 482 tbl, hash, member) 483 484/** 485 * rht_for_each_entry_safe - safely iterate over hash chain of given type 486 * @tpos: the type * to use as a loop cursor. 487 * @pos: the &struct rhash_head to use as a loop cursor. 488 * @next: the &struct rhash_head to use as next in loop cursor. 489 * @tbl: the &struct bucket_table 490 * @hash: the hash value / bucket index 491 * @member: name of the &struct rhash_head within the hashable struct. 492 * 493 * This hash chain list-traversal primitive allows for the looped code to 494 * remove the loop cursor from the list. 495 */ 496#define rht_for_each_entry_safe(tpos, pos, next, tbl, hash, member) \ 497 for (pos = rht_ptr(rht_bucket(tbl, hash), tbl, hash), \ 498 next = !rht_is_a_nulls(pos) ? \ 499 rht_dereference_bucket(pos->next, tbl, hash) : NULL; \ 500 (!rht_is_a_nulls(pos)) && rht_entry(tpos, pos, member); \ 501 pos = next, \ 502 next = !rht_is_a_nulls(pos) ? \ 503 rht_dereference_bucket(pos->next, tbl, hash) : NULL) 504 505/** 506 * rht_for_each_rcu_from - iterate over rcu hash chain from given head 507 * @pos: the &struct rhash_head to use as a loop cursor. 508 * @head: the &struct rhash_head to start from 509 * @tbl: the &struct bucket_table 510 * @hash: the hash value / bucket index 511 * 512 * This hash chain list-traversal primitive may safely run concurrently with 513 * the _rcu mutation primitives such as rhashtable_insert() as long as the 514 * traversal is guarded by rcu_read_lock(). 515 */ 516#define rht_for_each_rcu_from(pos, head, tbl, hash) \ 517 for (({barrier(); }), \ 518 pos = head; \ 519 !rht_is_a_nulls(pos); \ 520 pos = rcu_dereference_all(pos->next)) 521 522/** 523 * rht_for_each_rcu - iterate over rcu hash chain 524 * @pos: the &struct rhash_head to use as a loop cursor. 525 * @tbl: the &struct bucket_table 526 * @hash: the hash value / bucket index 527 * 528 * This hash chain list-traversal primitive may safely run concurrently with 529 * the _rcu mutation primitives such as rhashtable_insert() as long as the 530 * traversal is guarded by rcu_read_lock(). 531 */ 532#define rht_for_each_rcu(pos, tbl, hash) \ 533 for (({barrier(); }), \ 534 pos = rht_ptr_rcu(rht_bucket(tbl, hash)); \ 535 !rht_is_a_nulls(pos); \ 536 pos = rcu_dereference_all(pos->next)) 537 538/** 539 * rht_for_each_entry_rcu_from - iterated over rcu hash chain from given head 540 * @tpos: the type * to use as a loop cursor. 541 * @pos: the &struct rhash_head to use as a loop cursor. 542 * @head: the &struct rhash_head to start from 543 * @tbl: the &struct bucket_table 544 * @hash: the hash value / bucket index 545 * @member: name of the &struct rhash_head within the hashable struct. 546 * 547 * This hash chain list-traversal primitive may safely run concurrently with 548 * the _rcu mutation primitives such as rhashtable_insert() as long as the 549 * traversal is guarded by rcu_read_lock(). 550 */ 551#define rht_for_each_entry_rcu_from(tpos, pos, head, tbl, hash, member) \ 552 for (({barrier(); }), \ 553 pos = head; \ 554 (!rht_is_a_nulls(pos)) && rht_entry(tpos, pos, member); \ 555 pos = rht_dereference_bucket_rcu(pos->next, tbl, hash)) 556 557/** 558 * rht_for_each_entry_rcu - iterate over rcu hash chain of given type 559 * @tpos: the type * to use as a loop cursor. 560 * @pos: the &struct rhash_head to use as a loop cursor. 561 * @tbl: the &struct bucket_table 562 * @hash: the hash value / bucket index 563 * @member: name of the &struct rhash_head within the hashable struct. 564 * 565 * This hash chain list-traversal primitive may safely run concurrently with 566 * the _rcu mutation primitives such as rhashtable_insert() as long as the 567 * traversal is guarded by rcu_read_lock(). 568 */ 569#define rht_for_each_entry_rcu(tpos, pos, tbl, hash, member) \ 570 rht_for_each_entry_rcu_from(tpos, pos, \ 571 rht_ptr_rcu(rht_bucket(tbl, hash)), \ 572 tbl, hash, member) 573 574/** 575 * rhl_for_each_rcu - iterate over rcu hash table list 576 * @pos: the &struct rlist_head to use as a loop cursor. 577 * @list: the head of the list 578 * 579 * This hash chain list-traversal primitive should be used on the 580 * list returned by rhltable_lookup. 581 */ 582#define rhl_for_each_rcu(pos, list) \ 583 for (pos = list; pos; pos = rcu_dereference_all(pos->next)) 584 585/** 586 * rhl_for_each_entry_rcu - iterate over rcu hash table list of given type 587 * @tpos: the type * to use as a loop cursor. 588 * @pos: the &struct rlist_head to use as a loop cursor. 589 * @list: the head of the list 590 * @member: name of the &struct rlist_head within the hashable struct. 591 * 592 * This hash chain list-traversal primitive should be used on the 593 * list returned by rhltable_lookup. 594 */ 595#define rhl_for_each_entry_rcu(tpos, pos, list, member) \ 596 for (pos = list; pos && rht_entry(tpos, pos, member); \ 597 pos = rcu_dereference_all(pos->next)) 598 599static inline int rhashtable_compare(struct rhashtable_compare_arg *arg, 600 const void *obj) 601{ 602 struct rhashtable *ht = arg->ht; 603 const char *ptr = obj; 604 605 return memcmp(ptr + ht->p.key_offset, arg->key, ht->p.key_len); 606} 607 608/* Internal function, do not use. */ 609static __always_inline struct rhash_head *__rhashtable_lookup( 610 struct rhashtable *ht, const void *key, 611 const struct rhashtable_params params, 612 const enum rht_lookup_freq freq) 613 __must_hold_shared(RCU) 614{ 615 struct rhashtable_compare_arg arg = { 616 .ht = ht, 617 .key = key, 618 }; 619 struct rhash_lock_head __rcu *const *bkt; 620 struct bucket_table *tbl; 621 struct rhash_head *he; 622 unsigned int hash; 623 624 BUILD_BUG_ON(!__builtin_constant_p(freq)); 625 tbl = rht_dereference_rcu(ht->tbl, ht); 626restart: 627 hash = rht_key_hashfn(ht, tbl, key, params); 628 bkt = rht_bucket(tbl, hash); 629 do { 630 rht_for_each_rcu_from(he, __rht_ptr_rcu(bkt, freq), tbl, hash) { 631 if (params.obj_cmpfn ? 632 params.obj_cmpfn(&arg, rht_obj(ht, he)) : 633 rhashtable_compare(&arg, rht_obj(ht, he))) 634 continue; 635 return he; 636 } 637 /* An object might have been moved to a different hash chain, 638 * while we walk along it - better check and retry. 639 */ 640 } while (he != RHT_NULLS_MARKER(bkt)); 641 642 /* Ensure we see any new tables. */ 643 smp_rmb(); 644 645 tbl = rht_dereference_rcu(tbl->future_tbl, ht); 646 if (unlikely(tbl)) 647 goto restart; 648 649 return NULL; 650} 651 652/** 653 * rhashtable_lookup - search hash table 654 * @ht: hash table 655 * @key: the pointer to the key 656 * @params: hash table parameters 657 * 658 * Computes the hash value for the key and traverses the bucket chain looking 659 * for an entry with an identical key. The first matching entry is returned. 660 * 661 * This must only be called under the RCU read lock. 662 * 663 * Returns the first entry on which the compare function returned true. 664 */ 665static __always_inline void *rhashtable_lookup( 666 struct rhashtable *ht, const void *key, 667 const struct rhashtable_params params) 668 __must_hold_shared(RCU) 669{ 670 struct rhash_head *he = __rhashtable_lookup(ht, key, params, 671 RHT_LOOKUP_NORMAL); 672 673 return he ? rht_obj(ht, he) : NULL; 674} 675 676static __always_inline void *rhashtable_lookup_likely( 677 struct rhashtable *ht, const void *key, 678 const struct rhashtable_params params) 679 __must_hold_shared(RCU) 680{ 681 struct rhash_head *he = __rhashtable_lookup(ht, key, params, 682 RHT_LOOKUP_LIKELY); 683 684 return likely(he) ? rht_obj(ht, he) : NULL; 685} 686 687/** 688 * rhashtable_lookup_fast - search hash table, without RCU read lock 689 * @ht: hash table 690 * @key: the pointer to the key 691 * @params: hash table parameters 692 * 693 * Computes the hash value for the key and traverses the bucket chain looking 694 * for an entry with an identical key. The first matching entry is returned. 695 * 696 * Only use this function when you have other mechanisms guaranteeing 697 * that the object won't go away after the RCU read lock is released. 698 * 699 * Returns the first entry on which the compare function returned true. 700 */ 701static __always_inline void *rhashtable_lookup_fast( 702 struct rhashtable *ht, const void *key, 703 const struct rhashtable_params params) 704{ 705 void *obj; 706 707 rcu_read_lock(); 708 obj = rhashtable_lookup(ht, key, params); 709 rcu_read_unlock(); 710 711 return obj; 712} 713 714/** 715 * rhltable_lookup - search hash list table 716 * @hlt: hash table 717 * @key: the pointer to the key 718 * @params: hash table parameters 719 * 720 * Computes the hash value for the key and traverses the bucket chain looking 721 * for an entry with an identical key. All matching entries are returned 722 * in a list. 723 * 724 * This must only be called under the RCU read lock. 725 * 726 * Returns the list of entries that match the given key. 727 */ 728static __always_inline struct rhlist_head *rhltable_lookup( 729 struct rhltable *hlt, const void *key, 730 const struct rhashtable_params params) 731 __must_hold_shared(RCU) 732{ 733 struct rhash_head *he = __rhashtable_lookup(&hlt->ht, key, params, 734 RHT_LOOKUP_NORMAL); 735 736 return he ? container_of(he, struct rhlist_head, rhead) : NULL; 737} 738 739static __always_inline struct rhlist_head *rhltable_lookup_likely( 740 struct rhltable *hlt, const void *key, 741 const struct rhashtable_params params) 742 __must_hold_shared(RCU) 743{ 744 struct rhash_head *he = __rhashtable_lookup(&hlt->ht, key, params, 745 RHT_LOOKUP_LIKELY); 746 747 return likely(he) ? container_of(he, struct rhlist_head, rhead) : NULL; 748} 749 750/* Internal function, please use rhashtable_insert_fast() instead. This 751 * function returns the existing element already in hashes if there is a clash, 752 * otherwise it returns an error via ERR_PTR(). 753 */ 754static __always_inline void *__rhashtable_insert_fast( 755 struct rhashtable *ht, const void *key, struct rhash_head *obj, 756 const struct rhashtable_params params, bool rhlist) 757{ 758 struct rhashtable_compare_arg arg = { 759 .ht = ht, 760 .key = key, 761 }; 762 struct rhash_lock_head __rcu **bkt; 763 struct rhash_head __rcu **pprev; 764 struct bucket_table *tbl; 765 struct rhash_head *head; 766 unsigned long flags; 767 unsigned int hash; 768 int elasticity; 769 void *data; 770 771 rcu_read_lock(); 772 773 tbl = rht_dereference_rcu(ht->tbl, ht); 774 hash = rht_head_hashfn(ht, tbl, obj, params); 775 elasticity = RHT_ELASTICITY; 776 bkt = rht_bucket_insert(ht, tbl, hash); 777 data = ERR_PTR(-ENOMEM); 778 if (!bkt) 779 goto out; 780 pprev = NULL; 781 flags = rht_lock(tbl, bkt); 782 783 if (unlikely(rcu_access_pointer(tbl->future_tbl))) { 784slow_path: 785 rht_unlock(tbl, bkt, flags); 786 rcu_read_unlock(); 787 return rhashtable_insert_slow(ht, key, obj); 788 } 789 790 rht_for_each_from(head, rht_ptr(bkt, tbl, hash), tbl, hash) { 791 struct rhlist_head *plist; 792 struct rhlist_head *list; 793 794 elasticity--; 795 if (!key || 796 (params.obj_cmpfn ? 797 params.obj_cmpfn(&arg, rht_obj(ht, head)) : 798 rhashtable_compare(&arg, rht_obj(ht, head)))) { 799 pprev = &head->next; 800 continue; 801 } 802 803 data = rht_obj(ht, head); 804 805 if (!rhlist) 806 goto out_unlock; 807 808 809 list = container_of(obj, struct rhlist_head, rhead); 810 plist = container_of(head, struct rhlist_head, rhead); 811 812 RCU_INIT_POINTER(list->next, plist); 813 head = rht_dereference_bucket(head->next, tbl, hash); 814 RCU_INIT_POINTER(list->rhead.next, head); 815 if (pprev) { 816 rcu_assign_pointer(*pprev, obj); 817 rht_unlock(tbl, bkt, flags); 818 } else 819 rht_assign_unlock(tbl, bkt, obj, flags); 820 data = NULL; 821 goto out; 822 } 823 824 if (elasticity <= 0) 825 goto slow_path; 826 827 data = ERR_PTR(-E2BIG); 828 if (unlikely(rht_grow_above_max(ht, tbl))) 829 goto out_unlock; 830 831 if (unlikely(rht_grow_above_100(ht, tbl))) 832 goto slow_path; 833 834 /* Inserting at head of list makes unlocking free. */ 835 head = rht_ptr(bkt, tbl, hash); 836 837 RCU_INIT_POINTER(obj->next, head); 838 if (rhlist) { 839 struct rhlist_head *list; 840 841 list = container_of(obj, struct rhlist_head, rhead); 842 RCU_INIT_POINTER(list->next, NULL); 843 } 844 845 atomic_inc(&ht->nelems); 846 rht_assign_unlock(tbl, bkt, obj, flags); 847 848 if (rht_grow_above_75(ht, tbl)) 849 schedule_work(&ht->run_work); 850 851 data = NULL; 852out: 853 rcu_read_unlock(); 854 855 return data; 856 857out_unlock: 858 rht_unlock(tbl, bkt, flags); 859 goto out; 860} 861 862/** 863 * rhashtable_insert_fast - insert object into hash table 864 * @ht: hash table 865 * @obj: pointer to hash head inside object 866 * @params: hash table parameters 867 * 868 * Will take the per bucket bitlock to protect against mutual mutations 869 * on the same bucket. Multiple insertions may occur in parallel unless 870 * they map to the same bucket. 871 * 872 * It is safe to call this function from atomic context. 873 * 874 * Will trigger an automatic deferred table resizing if residency in the 875 * table grows beyond 70%. 876 */ 877static __always_inline int rhashtable_insert_fast( 878 struct rhashtable *ht, struct rhash_head *obj, 879 const struct rhashtable_params params) 880{ 881 void *ret; 882 883 ret = __rhashtable_insert_fast(ht, NULL, obj, params, false); 884 if (IS_ERR(ret)) 885 return PTR_ERR(ret); 886 887 return ret == NULL ? 0 : -EEXIST; 888} 889 890/** 891 * rhltable_insert_key - insert object into hash list table 892 * @hlt: hash list table 893 * @key: the pointer to the key 894 * @list: pointer to hash list head inside object 895 * @params: hash table parameters 896 * 897 * Will take the per bucket bitlock to protect against mutual mutations 898 * on the same bucket. Multiple insertions may occur in parallel unless 899 * they map to the same bucket. 900 * 901 * It is safe to call this function from atomic context. 902 * 903 * Will trigger an automatic deferred table resizing if residency in the 904 * table grows beyond 70%. 905 */ 906static __always_inline int rhltable_insert_key( 907 struct rhltable *hlt, const void *key, struct rhlist_head *list, 908 const struct rhashtable_params params) 909{ 910 return PTR_ERR(__rhashtable_insert_fast(&hlt->ht, key, &list->rhead, 911 params, true)); 912} 913 914/** 915 * rhltable_insert - insert object into hash list table 916 * @hlt: hash list table 917 * @list: pointer to hash list head inside object 918 * @params: hash table parameters 919 * 920 * Will take the per bucket bitlock to protect against mutual mutations 921 * on the same bucket. Multiple insertions may occur in parallel unless 922 * they map to the same bucket. 923 * 924 * It is safe to call this function from atomic context. 925 * 926 * Will trigger an automatic deferred table resizing if residency in the 927 * table grows beyond 70%. 928 */ 929static __always_inline int rhltable_insert( 930 struct rhltable *hlt, struct rhlist_head *list, 931 const struct rhashtable_params params) 932{ 933 const char *key = rht_obj(&hlt->ht, &list->rhead); 934 935 key += params.key_offset; 936 937 return rhltable_insert_key(hlt, key, list, params); 938} 939 940/** 941 * rhashtable_lookup_insert_fast - lookup and insert object into hash table 942 * @ht: hash table 943 * @obj: pointer to hash head inside object 944 * @params: hash table parameters 945 * 946 * This lookup function may only be used for fixed key hash table (key_len 947 * parameter set). It will BUG() if used inappropriately. 948 * 949 * It is safe to call this function from atomic context. 950 * 951 * Will trigger an automatic deferred table resizing if residency in the 952 * table grows beyond 70%. 953 */ 954static __always_inline int rhashtable_lookup_insert_fast( 955 struct rhashtable *ht, struct rhash_head *obj, 956 const struct rhashtable_params params) 957{ 958 const char *key = rht_obj(ht, obj); 959 void *ret; 960 961 BUG_ON(ht->p.obj_hashfn); 962 963 ret = __rhashtable_insert_fast(ht, key + ht->p.key_offset, obj, params, 964 false); 965 if (IS_ERR(ret)) 966 return PTR_ERR(ret); 967 968 return ret == NULL ? 0 : -EEXIST; 969} 970 971/** 972 * rhashtable_lookup_get_insert_fast - lookup and insert object into hash table 973 * @ht: hash table 974 * @obj: pointer to hash head inside object 975 * @params: hash table parameters 976 * 977 * Just like rhashtable_lookup_insert_fast(), but this function returns the 978 * object if it exists, NULL if it did not and the insertion was successful, 979 * and an ERR_PTR otherwise. 980 */ 981static __always_inline void *rhashtable_lookup_get_insert_fast( 982 struct rhashtable *ht, struct rhash_head *obj, 983 const struct rhashtable_params params) 984{ 985 const char *key = rht_obj(ht, obj); 986 987 BUG_ON(ht->p.obj_hashfn); 988 989 return __rhashtable_insert_fast(ht, key + ht->p.key_offset, obj, params, 990 false); 991} 992 993/** 994 * rhashtable_lookup_insert_key - search and insert object to hash table 995 * with explicit key 996 * @ht: hash table 997 * @key: key 998 * @obj: pointer to hash head inside object 999 * @params: hash table parameters 1000 * 1001 * Lookups may occur in parallel with hashtable mutations and resizing. 1002 * 1003 * Will trigger an automatic deferred table resizing if residency in the 1004 * table grows beyond 70%. 1005 * 1006 * Returns zero on success. 1007 */ 1008static __always_inline int rhashtable_lookup_insert_key( 1009 struct rhashtable *ht, const void *key, struct rhash_head *obj, 1010 const struct rhashtable_params params) 1011{ 1012 void *ret; 1013 1014 BUG_ON(!ht->p.obj_hashfn || !key); 1015 1016 ret = __rhashtable_insert_fast(ht, key, obj, params, false); 1017 if (IS_ERR(ret)) 1018 return PTR_ERR(ret); 1019 1020 return ret == NULL ? 0 : -EEXIST; 1021} 1022 1023/** 1024 * rhashtable_lookup_get_insert_key - lookup and insert object into hash table 1025 * @ht: hash table 1026 * @key: key 1027 * @obj: pointer to hash head inside object 1028 * @params: hash table parameters 1029 * 1030 * Just like rhashtable_lookup_insert_key(), but this function returns the 1031 * object if it exists, NULL if it does not and the insertion was successful, 1032 * and an ERR_PTR otherwise. 1033 */ 1034static __always_inline void *rhashtable_lookup_get_insert_key( 1035 struct rhashtable *ht, const void *key, struct rhash_head *obj, 1036 const struct rhashtable_params params) 1037{ 1038 BUG_ON(!ht->p.obj_hashfn || !key); 1039 1040 return __rhashtable_insert_fast(ht, key, obj, params, false); 1041} 1042 1043/* Internal function, please use rhashtable_remove_fast() instead */ 1044static __always_inline int __rhashtable_remove_fast_one( 1045 struct rhashtable *ht, struct bucket_table *tbl, 1046 struct rhash_head *obj, const struct rhashtable_params params, 1047 bool rhlist) 1048{ 1049 struct rhash_lock_head __rcu **bkt; 1050 struct rhash_head __rcu **pprev; 1051 struct rhash_head *he; 1052 unsigned long flags; 1053 unsigned int hash; 1054 int err = -ENOENT; 1055 1056 hash = rht_head_hashfn(ht, tbl, obj, params); 1057 bkt = rht_bucket_var(tbl, hash); 1058 if (!bkt) 1059 return -ENOENT; 1060 pprev = NULL; 1061 flags = rht_lock(tbl, bkt); 1062 1063 rht_for_each_from(he, rht_ptr(bkt, tbl, hash), tbl, hash) { 1064 struct rhlist_head *list; 1065 1066 list = container_of(he, struct rhlist_head, rhead); 1067 1068 if (he != obj) { 1069 struct rhlist_head __rcu **lpprev; 1070 1071 pprev = &he->next; 1072 1073 if (!rhlist) 1074 continue; 1075 1076 do { 1077 lpprev = &list->next; 1078 list = rht_dereference_bucket(list->next, 1079 tbl, hash); 1080 } while (list && obj != &list->rhead); 1081 1082 if (!list) 1083 continue; 1084 1085 list = rht_dereference_bucket(list->next, tbl, hash); 1086 RCU_INIT_POINTER(*lpprev, list); 1087 err = 0; 1088 break; 1089 } 1090 1091 obj = rht_dereference_bucket(obj->next, tbl, hash); 1092 err = 1; 1093 1094 if (rhlist) { 1095 list = rht_dereference_bucket(list->next, tbl, hash); 1096 if (list) { 1097 RCU_INIT_POINTER(list->rhead.next, obj); 1098 obj = &list->rhead; 1099 err = 0; 1100 } 1101 } 1102 1103 if (pprev) { 1104 rcu_assign_pointer(*pprev, obj); 1105 rht_unlock(tbl, bkt, flags); 1106 } else { 1107 rht_assign_unlock(tbl, bkt, obj, flags); 1108 } 1109 goto unlocked; 1110 } 1111 1112 rht_unlock(tbl, bkt, flags); 1113unlocked: 1114 if (err > 0) { 1115 atomic_dec(&ht->nelems); 1116 if (unlikely(ht->p.automatic_shrinking && 1117 rht_shrink_below_30(ht, tbl))) 1118 schedule_work(&ht->run_work); 1119 err = 0; 1120 } 1121 1122 return err; 1123} 1124 1125/* Internal function, please use rhashtable_remove_fast() instead */ 1126static __always_inline int __rhashtable_remove_fast( 1127 struct rhashtable *ht, struct rhash_head *obj, 1128 const struct rhashtable_params params, bool rhlist) 1129{ 1130 struct bucket_table *tbl; 1131 int err; 1132 1133 rcu_read_lock(); 1134 1135 tbl = rht_dereference_rcu(ht->tbl, ht); 1136 1137 /* Because we have already taken (and released) the bucket 1138 * lock in old_tbl, if we find that future_tbl is not yet 1139 * visible then that guarantees the entry to still be in 1140 * the old tbl if it exists. 1141 */ 1142 while ((err = __rhashtable_remove_fast_one(ht, tbl, obj, params, 1143 rhlist)) && 1144 (tbl = rht_dereference_rcu(tbl->future_tbl, ht))) 1145 ; 1146 1147 rcu_read_unlock(); 1148 1149 return err; 1150} 1151 1152/** 1153 * rhashtable_remove_fast - remove object from hash table 1154 * @ht: hash table 1155 * @obj: pointer to hash head inside object 1156 * @params: hash table parameters 1157 * 1158 * Since the hash chain is single linked, the removal operation needs to 1159 * walk the bucket chain upon removal. The removal operation is thus 1160 * considerable slow if the hash table is not correctly sized. 1161 * 1162 * Will automatically shrink the table if permitted when residency drops 1163 * below 30%. 1164 * 1165 * Returns zero on success, -ENOENT if the entry could not be found. 1166 */ 1167static __always_inline int rhashtable_remove_fast( 1168 struct rhashtable *ht, struct rhash_head *obj, 1169 const struct rhashtable_params params) 1170{ 1171 return __rhashtable_remove_fast(ht, obj, params, false); 1172} 1173 1174/** 1175 * rhltable_remove - remove object from hash list table 1176 * @hlt: hash list table 1177 * @list: pointer to hash list head inside object 1178 * @params: hash table parameters 1179 * 1180 * Since the hash chain is single linked, the removal operation needs to 1181 * walk the bucket chain upon removal. The removal operation is thus 1182 * considerably slower if the hash table is not correctly sized. 1183 * 1184 * Will automatically shrink the table if permitted when residency drops 1185 * below 30% 1186 * 1187 * Returns zero on success, -ENOENT if the entry could not be found. 1188 */ 1189static __always_inline int rhltable_remove( 1190 struct rhltable *hlt, struct rhlist_head *list, 1191 const struct rhashtable_params params) 1192{ 1193 return __rhashtable_remove_fast(&hlt->ht, &list->rhead, params, true); 1194} 1195 1196/* Internal function, please use rhashtable_replace_fast() instead */ 1197static __always_inline int __rhashtable_replace_fast( 1198 struct rhashtable *ht, struct bucket_table *tbl, 1199 struct rhash_head *obj_old, struct rhash_head *obj_new, 1200 const struct rhashtable_params params) 1201{ 1202 struct rhash_lock_head __rcu **bkt; 1203 struct rhash_head __rcu **pprev; 1204 struct rhash_head *he; 1205 unsigned long flags; 1206 unsigned int hash; 1207 int err = -ENOENT; 1208 1209 /* Minimally, the old and new objects must have same hash 1210 * (which should mean identifiers are the same). 1211 */ 1212 hash = rht_head_hashfn(ht, tbl, obj_old, params); 1213 if (hash != rht_head_hashfn(ht, tbl, obj_new, params)) 1214 return -EINVAL; 1215 1216 bkt = rht_bucket_var(tbl, hash); 1217 if (!bkt) 1218 return -ENOENT; 1219 1220 pprev = NULL; 1221 flags = rht_lock(tbl, bkt); 1222 1223 rht_for_each_from(he, rht_ptr(bkt, tbl, hash), tbl, hash) { 1224 if (he != obj_old) { 1225 pprev = &he->next; 1226 continue; 1227 } 1228 1229 rcu_assign_pointer(obj_new->next, obj_old->next); 1230 if (pprev) { 1231 rcu_assign_pointer(*pprev, obj_new); 1232 rht_unlock(tbl, bkt, flags); 1233 } else { 1234 rht_assign_unlock(tbl, bkt, obj_new, flags); 1235 } 1236 err = 0; 1237 goto unlocked; 1238 } 1239 1240 rht_unlock(tbl, bkt, flags); 1241 1242unlocked: 1243 return err; 1244} 1245 1246/** 1247 * rhashtable_replace_fast - replace an object in hash table 1248 * @ht: hash table 1249 * @obj_old: pointer to hash head inside object being replaced 1250 * @obj_new: pointer to hash head inside object which is new 1251 * @params: hash table parameters 1252 * 1253 * Replacing an object doesn't affect the number of elements in the hash table 1254 * or bucket, so we don't need to worry about shrinking or expanding the 1255 * table here. 1256 * 1257 * Returns zero on success, -ENOENT if the entry could not be found, 1258 * -EINVAL if hash is not the same for the old and new objects. 1259 */ 1260static __always_inline int rhashtable_replace_fast( 1261 struct rhashtable *ht, struct rhash_head *obj_old, 1262 struct rhash_head *obj_new, 1263 const struct rhashtable_params params) 1264{ 1265 struct bucket_table *tbl; 1266 int err; 1267 1268 rcu_read_lock(); 1269 1270 tbl = rht_dereference_rcu(ht->tbl, ht); 1271 1272 /* Because we have already taken (and released) the bucket 1273 * lock in old_tbl, if we find that future_tbl is not yet 1274 * visible then that guarantees the entry to still be in 1275 * the old tbl if it exists. 1276 */ 1277 while ((err = __rhashtable_replace_fast(ht, tbl, obj_old, 1278 obj_new, params)) && 1279 (tbl = rht_dereference_rcu(tbl->future_tbl, ht))) 1280 ; 1281 1282 rcu_read_unlock(); 1283 1284 return err; 1285} 1286 1287/** 1288 * rhltable_walk_enter - Initialise an iterator 1289 * @hlt: Table to walk over 1290 * @iter: Hash table Iterator 1291 * 1292 * This function prepares a hash table walk. 1293 * 1294 * Note that if you restart a walk after rhashtable_walk_stop you 1295 * may see the same object twice. Also, you may miss objects if 1296 * there are removals in between rhashtable_walk_stop and the next 1297 * call to rhashtable_walk_start. 1298 * 1299 * For a completely stable walk you should construct your own data 1300 * structure outside the hash table. 1301 * 1302 * This function may be called from any process context, including 1303 * non-preemptable context, but cannot be called from softirq or 1304 * hardirq context. 1305 * 1306 * You must call rhashtable_walk_exit after this function returns. 1307 */ 1308static inline void rhltable_walk_enter(struct rhltable *hlt, 1309 struct rhashtable_iter *iter) 1310{ 1311 rhashtable_walk_enter(&hlt->ht, iter); 1312} 1313 1314/** 1315 * rhltable_free_and_destroy - free elements and destroy hash list table 1316 * @hlt: the hash list table to destroy 1317 * @free_fn: callback to release resources of element 1318 * @arg: pointer passed to free_fn 1319 * 1320 * See documentation for rhashtable_free_and_destroy. 1321 */ 1322static inline void rhltable_free_and_destroy(struct rhltable *hlt, 1323 void (*free_fn)(void *ptr, 1324 void *arg), 1325 void *arg) 1326{ 1327 rhashtable_free_and_destroy(&hlt->ht, free_fn, arg); 1328} 1329 1330static inline void rhltable_destroy(struct rhltable *hlt) 1331{ 1332 rhltable_free_and_destroy(hlt, NULL, NULL); 1333} 1334 1335#endif /* _LINUX_RHASHTABLE_H */