Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at ee9dce44362b2d8132c32964656ab6dff7dfbc6a 132 lines 4.7 kB view raw
1/* SPDX-License-Identifier: GPL-2.0 */ 2#ifndef _LINUX_RCULIST_BL_H 3#define _LINUX_RCULIST_BL_H 4 5/* 6 * RCU-protected bl list version. See include/linux/list_bl.h. 7 */ 8#include <linux/list_bl.h> 9#include <linux/rcupdate.h> 10 11/* return the first ptr or next element in an RCU protected list */ 12#define hlist_bl_first_rcu(head) \ 13 (*((struct hlist_bl_node __rcu **)(&(head)->first))) 14#define hlist_bl_next_rcu(node) \ 15 (*((struct hlist_bl_node __rcu **)(&(node)->next))) 16 17static inline void hlist_bl_set_first_rcu(struct hlist_bl_head *h, 18 struct hlist_bl_node *n) 19{ 20 LIST_BL_BUG_ON((unsigned long)n & LIST_BL_LOCKMASK); 21 LIST_BL_BUG_ON(((unsigned long)h->first & LIST_BL_LOCKMASK) != 22 LIST_BL_LOCKMASK); 23 rcu_assign_pointer(hlist_bl_first_rcu(h), 24 (struct hlist_bl_node *)((unsigned long)n | LIST_BL_LOCKMASK)); 25} 26 27#define hlist_bl_first_rcu_dereference(head) \ 28({ \ 29 struct hlist_bl_head *__head = (head); \ 30 \ 31 (struct hlist_bl_node *) \ 32 ((unsigned long)rcu_dereference_check(hlist_bl_first_rcu(__head), \ 33 hlist_bl_is_locked(__head)) & \ 34 ~LIST_BL_LOCKMASK); \ 35}) 36 37/** 38 * hlist_bl_del_rcu - deletes entry from hash list without re-initialization 39 * @n: the element to delete from the hash list. 40 * 41 * Note: hlist_bl_unhashed() on entry does not return true after this, 42 * the entry is in an undefined state. It is useful for RCU based 43 * lockfree traversal. 44 * 45 * In particular, it means that we can not poison the forward 46 * pointers that may still be used for walking the hash list. 47 * 48 * The caller must take whatever precautions are necessary 49 * (such as holding appropriate locks) to avoid racing 50 * with another list-mutation primitive, such as hlist_bl_add_head_rcu() 51 * or hlist_bl_del_rcu(), running on this same list. 52 * However, it is perfectly legal to run concurrently with 53 * the _rcu list-traversal primitives, such as 54 * hlist_bl_for_each_entry(). 55 */ 56static inline void hlist_bl_del_rcu(struct hlist_bl_node *n) 57{ 58 __hlist_bl_del(n); 59 n->pprev = LIST_POISON2; 60} 61 62/** 63 * hlist_bl_add_head_rcu 64 * @n: the element to add to the hash list. 65 * @h: the list to add to. 66 * 67 * Description: 68 * Adds the specified element to the specified hlist_bl, 69 * while permitting racing traversals. 70 * 71 * The caller must take whatever precautions are necessary 72 * (such as holding appropriate locks) to avoid racing 73 * with another list-mutation primitive, such as hlist_bl_add_head_rcu() 74 * or hlist_bl_del_rcu(), running on this same list. 75 * However, it is perfectly legal to run concurrently with 76 * the _rcu list-traversal primitives, such as 77 * hlist_bl_for_each_entry_rcu(), used to prevent memory-consistency 78 * problems on Alpha CPUs. Regardless of the type of CPU, the 79 * list-traversal primitive must be guarded by rcu_read_lock(). 80 */ 81static inline void hlist_bl_add_head_rcu(struct hlist_bl_node *n, 82 struct hlist_bl_head *h) 83{ 84 struct hlist_bl_node *first; 85 86 /* don't need hlist_bl_first_rcu* because we're under lock */ 87 first = hlist_bl_first(h); 88 89 n->next = first; 90 if (first) 91 first->pprev = &n->next; 92 n->pprev = &h->first; 93 94 /* need _rcu because we can have concurrent lock free readers */ 95 hlist_bl_set_first_rcu(h, n); 96} 97/** 98 * hlist_bl_for_each_entry_rcu - iterate over rcu list of given type 99 * @tpos: the type * to use as a loop cursor. 100 * @pos: the &struct hlist_bl_node to use as a loop cursor. 101 * @head: the head for your list. 102 * @member: the name of the hlist_bl_node within the struct. 103 * 104 */ 105#define hlist_bl_for_each_entry_rcu(tpos, pos, head, member) \ 106 for (pos = hlist_bl_first_rcu_dereference(head); \ 107 pos && \ 108 ({ tpos = hlist_bl_entry(pos, typeof(*tpos), member); 1; }); \ 109 pos = rcu_dereference_raw(hlist_bl_next_rcu(pos))) 110 111/** 112 * hlist_bl_for_each_entry_continue_rcu - continue iteration over list of given 113 * type 114 * @tpos: the type * to use as a loop cursor. 115 * @pos: the &struct hlist_bl_node to use as a loop cursor. 116 * @member: the name of the hlist_bl_node within the struct. 117 * 118 * Continue to iterate over list of given type, continuing after 119 * the current position which must have been in the list when the RCU read 120 * lock was taken. 121 * This would typically require either that you obtained the node from a 122 * previous walk of the list in the same RCU read-side critical section, or 123 * that you held some sort of non-RCU reference (such as a reference count) 124 * to keep the node alive *and* in the list. 125 */ 126#define hlist_bl_for_each_entry_continue_rcu(tpos, pos, member) \ 127 for (pos = rcu_dereference_raw(hlist_bl_next_rcu(&(tpos)->member)); \ 128 pos && \ 129 ({ tpos = hlist_bl_entry(pos, typeof(*tpos), member); 1; }); \ 130 pos = rcu_dereference_raw(hlist_bl_next_rcu(pos))) 131 132#endif