Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

rv: Refactor da_monitor to minimise macros

The da_monitor helper functions are generated from macros of the type:

DECLARE_DA_FUNCTION(name, type) \
static void da_func_x_##name(type arg) {} \
static void da_func_y_##name(type arg) {} \

This is good to minimise code duplication but the long macros made of
skipped end of lines is rather hard to parse. Since functions are
static, the advantage of naming them differently for each monitor is
minimal.

Refactor the da_monitor.h file to minimise macros, instead of declaring
functions from macros, we simply declare them with the same name for all
monitors (e.g. da_func_x) and for any remaining reference to the monitor
name (e.g. tracepoints, enums, global variables) we use the CONCATENATE
macro.
In this way the file is much easier to maintain while keeping the same
generality.
Functions depending on the monitor types are now conditionally compiled
according to the value of RV_MON_TYPE, which must be defined in the
monitor source.
The monitor type can be specified as in the original implementation,
although it's best to keep the default implementation (unsigned char) as
not all parts of code support larger data types, and likely there's no
need.

We keep the empty macro definitions to ease review of this change with
diff tools, but cleanup is required.

Also adapt existing monitors to keep the build working.

Reviewed-by: Nam Cao <namcao@linutronix.de>
Link: https://lore.kernel.org/r/20251126104241.291258-2-gmonaco@redhat.com
Signed-off-by: Gabriele Monaco <gmonaco@redhat.com>

+682 -676
+4
include/linux/rv.h
··· 10 10 #define MAX_DA_NAME_LEN 32 11 11 #define MAX_DA_RETRY_RACING_EVENTS 3 12 12 13 + #define RV_MON_GLOBAL 0 14 + #define RV_MON_PER_CPU 1 15 + #define RV_MON_PER_TASK 2 16 + 13 17 #ifdef CONFIG_RV 14 18 #include <linux/array_size.h> 15 19 #include <linux/bitops.h>
+73 -59
include/rv/automata.h
··· 6 6 * models in C generated by the dot2k tool. 7 7 */ 8 8 9 + #ifndef MONITOR_NAME 10 + #error "MONITOR_NAME macro is not defined. Did you include $(MODEL_NAME).h generated by rvgen?" 11 + #endif 12 + 13 + #ifndef type 14 + #define type unsigned char 15 + #endif 16 + 17 + #define RV_AUTOMATON_NAME CONCATENATE(automaton_, MONITOR_NAME) 18 + #define EVENT_MAX CONCATENATE(event_max_, MONITOR_NAME) 19 + #define STATE_MAX CONCATENATE(state_max_, MONITOR_NAME) 20 + #define events CONCATENATE(events_, MONITOR_NAME) 21 + #define states CONCATENATE(states_, MONITOR_NAME) 22 + 9 23 /* 10 24 * DECLARE_AUTOMATA_HELPERS - define a set of helper functions for automata 11 25 * ··· 27 13 * as suffix for the functions and data. These functions will handle automaton 28 14 * with data type 'type'. 29 15 */ 30 - #define DECLARE_AUTOMATA_HELPERS(name, type) \ 31 - \ 32 - /* \ 33 - * model_get_state_name_##name - return the (string) name of the given state \ 34 - */ \ 35 - static char *model_get_state_name_##name(enum states_##name state) \ 36 - { \ 37 - if ((state < 0) || (state >= state_max_##name)) \ 38 - return "INVALID"; \ 39 - \ 40 - return automaton_##name.state_names[state]; \ 41 - } \ 42 - \ 43 - /* \ 44 - * model_get_event_name_##name - return the (string) name of the given event \ 45 - */ \ 46 - static char *model_get_event_name_##name(enum events_##name event) \ 47 - { \ 48 - if ((event < 0) || (event >= event_max_##name)) \ 49 - return "INVALID"; \ 50 - \ 51 - return automaton_##name.event_names[event]; \ 52 - } \ 53 - \ 54 - /* \ 55 - * model_get_initial_state_##name - return the automaton's initial state \ 56 - */ \ 57 - static inline type model_get_initial_state_##name(void) \ 58 - { \ 59 - return automaton_##name.initial_state; \ 60 - } \ 61 - \ 62 - /* \ 63 - * model_get_next_state_##name - process an automaton event occurrence \ 64 - * \ 65 - * Given the current state (curr_state) and the event (event), returns \ 66 - * the next state, or INVALID_STATE in case of error. \ 67 - */ \ 68 - static inline type model_get_next_state_##name(enum states_##name curr_state, \ 69 - enum events_##name event) \ 70 - { \ 71 - if ((curr_state < 0) || (curr_state >= state_max_##name)) \ 72 - return INVALID_STATE; \ 73 - \ 74 - if ((event < 0) || (event >= event_max_##name)) \ 75 - return INVALID_STATE; \ 76 - \ 77 - return automaton_##name.function[curr_state][event]; \ 78 - } \ 79 - \ 80 - /* \ 81 - * model_is_final_state_##name - check if the given state is a final state \ 82 - */ \ 83 - static inline bool model_is_final_state_##name(enum states_##name state) \ 84 - { \ 85 - if ((state < 0) || (state >= state_max_##name)) \ 86 - return 0; \ 87 - \ 88 - return automaton_##name.final_states[state]; \ 16 + #define DECLARE_AUTOMATA_HELPERS(name, type) 17 + 18 + /* 19 + * model_get_state_name - return the (string) name of the given state 20 + */ 21 + static char *model_get_state_name(enum states state) 22 + { 23 + if ((state < 0) || (state >= STATE_MAX)) 24 + return "INVALID"; 25 + 26 + return RV_AUTOMATON_NAME.state_names[state]; 27 + } 28 + 29 + /* 30 + * model_get_event_name - return the (string) name of the given event 31 + */ 32 + static char *model_get_event_name(enum events event) 33 + { 34 + if ((event < 0) || (event >= EVENT_MAX)) 35 + return "INVALID"; 36 + 37 + return RV_AUTOMATON_NAME.event_names[event]; 38 + } 39 + 40 + /* 41 + * model_get_initial_state - return the automaton's initial state 42 + */ 43 + static inline type model_get_initial_state(void) 44 + { 45 + return RV_AUTOMATON_NAME.initial_state; 46 + } 47 + 48 + /* 49 + * model_get_next_state - process an automaton event occurrence 50 + * 51 + * Given the current state (curr_state) and the event (event), returns 52 + * the next state, or INVALID_STATE in case of error. 53 + */ 54 + static inline type model_get_next_state(enum states curr_state, 55 + enum events event) 56 + { 57 + if ((curr_state < 0) || (curr_state >= STATE_MAX)) 58 + return INVALID_STATE; 59 + 60 + if ((event < 0) || (event >= EVENT_MAX)) 61 + return INVALID_STATE; 62 + 63 + return RV_AUTOMATON_NAME.function[curr_state][event]; 64 + } 65 + 66 + /* 67 + * model_is_final_state - check if the given state is a final state 68 + */ 69 + static inline bool model_is_final_state(enum states state) 70 + { 71 + if ((state < 0) || (state >= STATE_MAX)) 72 + return 0; 73 + 74 + return RV_AUTOMATON_NAME.final_states[state]; 89 75 }
+443 -455
include/rv/da_monitor.h
··· 13 13 14 14 #include <rv/automata.h> 15 15 #include <linux/rv.h> 16 + #include <linux/stringify.h> 16 17 #include <linux/bug.h> 17 18 #include <linux/sched.h> 19 + 20 + static struct rv_monitor rv_this; 18 21 19 22 /* 20 23 * Generic helpers for all types of deterministic automata monitors. 21 24 */ 22 - #define DECLARE_DA_MON_GENERIC_HELPERS(name, type) \ 23 - \ 24 - static void react_##name(type curr_state, type event) \ 25 - { \ 26 - rv_react(&rv_##name, \ 27 - "rv: monitor %s does not allow event %s on state %s\n", \ 28 - #name, \ 29 - model_get_event_name_##name(event), \ 30 - model_get_state_name_##name(curr_state)); \ 31 - } \ 32 - \ 33 - /* \ 34 - * da_monitor_reset_##name - reset a monitor and setting it to init state \ 35 - */ \ 36 - static inline void da_monitor_reset_##name(struct da_monitor *da_mon) \ 37 - { \ 38 - da_mon->monitoring = 0; \ 39 - da_mon->curr_state = model_get_initial_state_##name(); \ 40 - } \ 41 - \ 42 - /* \ 43 - * da_monitor_start_##name - start monitoring \ 44 - * \ 45 - * The monitor will ignore all events until monitoring is set to true. This \ 46 - * function needs to be called to tell the monitor to start monitoring. \ 47 - */ \ 48 - static inline void da_monitor_start_##name(struct da_monitor *da_mon) \ 49 - { \ 50 - da_mon->curr_state = model_get_initial_state_##name(); \ 51 - da_mon->monitoring = 1; \ 52 - } \ 53 - \ 54 - /* \ 55 - * da_monitoring_##name - returns true if the monitor is processing events \ 56 - */ \ 57 - static inline bool da_monitoring_##name(struct da_monitor *da_mon) \ 58 - { \ 59 - return da_mon->monitoring; \ 60 - } \ 61 - \ 62 - /* \ 63 - * da_monitor_enabled_##name - checks if the monitor is enabled \ 64 - */ \ 65 - static inline bool da_monitor_enabled_##name(void) \ 66 - { \ 67 - /* global switch */ \ 68 - if (unlikely(!rv_monitoring_on())) \ 69 - return 0; \ 70 - \ 71 - /* monitor enabled */ \ 72 - if (unlikely(!rv_##name.enabled)) \ 73 - return 0; \ 74 - \ 75 - return 1; \ 76 - } \ 77 - \ 78 - /* \ 79 - * da_monitor_handling_event_##name - checks if the monitor is ready to handle events \ 80 - */ \ 81 - static inline bool da_monitor_handling_event_##name(struct da_monitor *da_mon) \ 82 - { \ 83 - \ 84 - if (!da_monitor_enabled_##name()) \ 85 - return 0; \ 86 - \ 87 - /* monitor is actually monitoring */ \ 88 - if (unlikely(!da_monitoring_##name(da_mon))) \ 89 - return 0; \ 90 - \ 91 - return 1; \ 25 + #define DECLARE_DA_MON_GENERIC_HELPERS(name, type) 26 + 27 + static void react(type curr_state, type event) 28 + { 29 + rv_react(&rv_this, 30 + "rv: monitor %s does not allow event %s on state %s\n", 31 + __stringify(MONITOR_NAME), 32 + model_get_event_name(event), 33 + model_get_state_name(curr_state)); 34 + } 35 + 36 + /* 37 + * da_monitor_reset - reset a monitor and setting it to init state 38 + */ 39 + static inline void da_monitor_reset(struct da_monitor *da_mon) 40 + { 41 + da_mon->monitoring = 0; 42 + da_mon->curr_state = model_get_initial_state(); 43 + } 44 + 45 + /* 46 + * da_monitor_start - start monitoring 47 + * 48 + * The monitor will ignore all events until monitoring is set to true. This 49 + * function needs to be called to tell the monitor to start monitoring. 50 + */ 51 + static inline void da_monitor_start(struct da_monitor *da_mon) 52 + { 53 + da_mon->curr_state = model_get_initial_state(); 54 + da_mon->monitoring = 1; 55 + } 56 + 57 + /* 58 + * da_monitoring - returns true if the monitor is processing events 59 + */ 60 + static inline bool da_monitoring(struct da_monitor *da_mon) 61 + { 62 + return da_mon->monitoring; 63 + } 64 + 65 + /* 66 + * da_monitor_enabled - checks if the monitor is enabled 67 + */ 68 + static inline bool da_monitor_enabled(void) 69 + { 70 + /* global switch */ 71 + if (unlikely(!rv_monitoring_on())) 72 + return 0; 73 + 74 + /* monitor enabled */ 75 + if (unlikely(!rv_this.enabled)) 76 + return 0; 77 + 78 + return 1; 79 + } 80 + 81 + /* 82 + * da_monitor_handling_event - checks if the monitor is ready to handle events 83 + */ 84 + static inline bool da_monitor_handling_event(struct da_monitor *da_mon) 85 + { 86 + 87 + if (!da_monitor_enabled()) 88 + return 0; 89 + 90 + /* monitor is actually monitoring */ 91 + if (unlikely(!da_monitoring(da_mon))) 92 + return 0; 93 + 94 + return 1; 92 95 } 93 96 94 97 /* ··· 103 100 * warn and reset the monitor if it runs out of retries. The monitor should be 104 101 * able to handle various orders. 105 102 */ 106 - #define DECLARE_DA_MON_MODEL_HANDLER_IMPLICIT(name, type) \ 107 - \ 108 - static inline bool \ 109 - da_event_##name(struct da_monitor *da_mon, enum events_##name event) \ 110 - { \ 111 - enum states_##name curr_state, next_state; \ 112 - \ 113 - curr_state = READ_ONCE(da_mon->curr_state); \ 114 - for (int i = 0; i < MAX_DA_RETRY_RACING_EVENTS; i++) { \ 115 - next_state = model_get_next_state_##name(curr_state, event); \ 116 - if (next_state == INVALID_STATE) { \ 117 - react_##name(curr_state, event); \ 118 - trace_error_##name(model_get_state_name_##name(curr_state), \ 119 - model_get_event_name_##name(event)); \ 120 - return false; \ 121 - } \ 122 - if (likely(try_cmpxchg(&da_mon->curr_state, &curr_state, next_state))) { \ 123 - trace_event_##name(model_get_state_name_##name(curr_state), \ 124 - model_get_event_name_##name(event), \ 125 - model_get_state_name_##name(next_state), \ 126 - model_is_final_state_##name(next_state)); \ 127 - return true; \ 128 - } \ 129 - } \ 130 - \ 131 - trace_rv_retries_error(#name, model_get_event_name_##name(event)); \ 132 - pr_warn("rv: " __stringify(MAX_DA_RETRY_RACING_EVENTS) \ 133 - " retries reached for event %s, resetting monitor %s", \ 134 - model_get_event_name_##name(event), #name); \ 135 - return false; \ 136 - } \ 103 + #if RV_MON_TYPE == RV_MON_GLOBAL || RV_MON_TYPE == RV_MON_PER_CPU 104 + 105 + static inline bool 106 + da_event(struct da_monitor *da_mon, enum events event) 107 + { 108 + enum states curr_state, next_state; 109 + 110 + curr_state = READ_ONCE(da_mon->curr_state); 111 + for (int i = 0; i < MAX_DA_RETRY_RACING_EVENTS; i++) { 112 + next_state = model_get_next_state(curr_state, event); 113 + if (next_state == INVALID_STATE) { 114 + react(curr_state, event); 115 + CONCATENATE(trace_error_, MONITOR_NAME)(model_get_state_name(curr_state), 116 + model_get_event_name(event)); 117 + return false; 118 + } 119 + if (likely(try_cmpxchg(&da_mon->curr_state, &curr_state, next_state))) { 120 + CONCATENATE(trace_event_, MONITOR_NAME)(model_get_state_name(curr_state), 121 + model_get_event_name(event), 122 + model_get_state_name(next_state), 123 + model_is_final_state(next_state)); 124 + return true; 125 + } 126 + } 127 + 128 + trace_rv_retries_error(__stringify(MONITOR_NAME), model_get_event_name(event)); 129 + pr_warn("rv: " __stringify(MAX_DA_RETRY_RACING_EVENTS) 130 + " retries reached for event %s, resetting monitor %s", 131 + model_get_event_name(event), __stringify(MONITOR_NAME)); 132 + return false; 133 + } 137 134 138 135 /* 139 136 * Event handler for per_task monitors. ··· 142 139 * warn and reset the monitor if it runs out of retries. The monitor should be 143 140 * able to handle various orders. 144 141 */ 145 - #define DECLARE_DA_MON_MODEL_HANDLER_PER_TASK(name, type) \ 146 - \ 147 - static inline bool da_event_##name(struct da_monitor *da_mon, struct task_struct *tsk, \ 148 - enum events_##name event) \ 149 - { \ 150 - enum states_##name curr_state, next_state; \ 151 - \ 152 - curr_state = READ_ONCE(da_mon->curr_state); \ 153 - for (int i = 0; i < MAX_DA_RETRY_RACING_EVENTS; i++) { \ 154 - next_state = model_get_next_state_##name(curr_state, event); \ 155 - if (next_state == INVALID_STATE) { \ 156 - react_##name(curr_state, event); \ 157 - trace_error_##name(tsk->pid, \ 158 - model_get_state_name_##name(curr_state), \ 159 - model_get_event_name_##name(event)); \ 160 - return false; \ 161 - } \ 162 - if (likely(try_cmpxchg(&da_mon->curr_state, &curr_state, next_state))) { \ 163 - trace_event_##name(tsk->pid, \ 164 - model_get_state_name_##name(curr_state), \ 165 - model_get_event_name_##name(event), \ 166 - model_get_state_name_##name(next_state), \ 167 - model_is_final_state_##name(next_state)); \ 168 - return true; \ 169 - } \ 170 - } \ 171 - \ 172 - trace_rv_retries_error(#name, model_get_event_name_##name(event)); \ 173 - pr_warn("rv: " __stringify(MAX_DA_RETRY_RACING_EVENTS) \ 174 - " retries reached for event %s, resetting monitor %s", \ 175 - model_get_event_name_##name(event), #name); \ 176 - return false; \ 142 + #elif RV_MON_TYPE == RV_MON_PER_TASK 143 + 144 + static inline bool da_event(struct da_monitor *da_mon, struct task_struct *tsk, 145 + enum events event) 146 + { 147 + enum states curr_state, next_state; 148 + 149 + curr_state = READ_ONCE(da_mon->curr_state); 150 + for (int i = 0; i < MAX_DA_RETRY_RACING_EVENTS; i++) { 151 + next_state = model_get_next_state(curr_state, event); 152 + if (next_state == INVALID_STATE) { 153 + react(curr_state, event); 154 + CONCATENATE(trace_error_, MONITOR_NAME)(tsk->pid, 155 + model_get_state_name(curr_state), 156 + model_get_event_name(event)); 157 + return false; 158 + } 159 + if (likely(try_cmpxchg(&da_mon->curr_state, &curr_state, next_state))) { 160 + CONCATENATE(trace_event_, MONITOR_NAME)(tsk->pid, 161 + model_get_state_name(curr_state), 162 + model_get_event_name(event), 163 + model_get_state_name(next_state), 164 + model_is_final_state(next_state)); 165 + return true; 166 + } 167 + } 168 + 169 + trace_rv_retries_error(__stringify(MONITOR_NAME), model_get_event_name(event)); 170 + pr_warn("rv: " __stringify(MAX_DA_RETRY_RACING_EVENTS) 171 + " retries reached for event %s, resetting monitor %s", 172 + model_get_event_name(event), __stringify(MONITOR_NAME)); 173 + return false; 177 174 } 175 + #endif 178 176 179 177 /* 180 178 * Functions to define, init and get a global monitor. 181 179 */ 182 - #define DECLARE_DA_MON_INIT_GLOBAL(name, type) \ 183 - \ 184 - /* \ 185 - * global monitor (a single variable) \ 186 - */ \ 187 - static struct da_monitor da_mon_##name; \ 188 - \ 189 - /* \ 190 - * da_get_monitor_##name - return the global monitor address \ 191 - */ \ 192 - static struct da_monitor *da_get_monitor_##name(void) \ 193 - { \ 194 - return &da_mon_##name; \ 195 - } \ 196 - \ 197 - /* \ 198 - * da_monitor_reset_all_##name - reset the single monitor \ 199 - */ \ 200 - static void da_monitor_reset_all_##name(void) \ 201 - { \ 202 - da_monitor_reset_##name(da_get_monitor_##name()); \ 203 - } \ 204 - \ 205 - /* \ 206 - * da_monitor_init_##name - initialize a monitor \ 207 - */ \ 208 - static inline int da_monitor_init_##name(void) \ 209 - { \ 210 - da_monitor_reset_all_##name(); \ 211 - return 0; \ 212 - } \ 213 - \ 214 - /* \ 215 - * da_monitor_destroy_##name - destroy the monitor \ 216 - */ \ 217 - static inline void da_monitor_destroy_##name(void) \ 218 - { \ 219 - return; \ 180 + #if RV_MON_TYPE == RV_MON_GLOBAL 181 + 182 + /* 183 + * global monitor (a single variable) 184 + */ 185 + static struct da_monitor da_mon_this; 186 + 187 + /* 188 + * da_get_monitor - return the global monitor address 189 + */ 190 + static struct da_monitor *da_get_monitor(void) 191 + { 192 + return &da_mon_this; 193 + } 194 + 195 + /* 196 + * da_monitor_reset_all - reset the single monitor 197 + */ 198 + static void da_monitor_reset_all(void) 199 + { 200 + da_monitor_reset(da_get_monitor()); 201 + } 202 + 203 + /* 204 + * da_monitor_init - initialize a monitor 205 + */ 206 + static inline int da_monitor_init(void) 207 + { 208 + da_monitor_reset_all(); 209 + return 0; 210 + } 211 + 212 + /* 213 + * da_monitor_destroy - destroy the monitor 214 + */ 215 + static inline void da_monitor_destroy(void) 216 + { 217 + return; 220 218 } 221 219 222 220 /* 223 221 * Functions to define, init and get a per-cpu monitor. 224 222 */ 225 - #define DECLARE_DA_MON_INIT_PER_CPU(name, type) \ 226 - \ 227 - /* \ 228 - * per-cpu monitor variables \ 229 - */ \ 230 - static DEFINE_PER_CPU(struct da_monitor, da_mon_##name); \ 231 - \ 232 - /* \ 233 - * da_get_monitor_##name - return current CPU monitor address \ 234 - */ \ 235 - static struct da_monitor *da_get_monitor_##name(void) \ 236 - { \ 237 - return this_cpu_ptr(&da_mon_##name); \ 238 - } \ 239 - \ 240 - /* \ 241 - * da_monitor_reset_all_##name - reset all CPUs' monitor \ 242 - */ \ 243 - static void da_monitor_reset_all_##name(void) \ 244 - { \ 245 - struct da_monitor *da_mon; \ 246 - int cpu; \ 247 - for_each_cpu(cpu, cpu_online_mask) { \ 248 - da_mon = per_cpu_ptr(&da_mon_##name, cpu); \ 249 - da_monitor_reset_##name(da_mon); \ 250 - } \ 251 - } \ 252 - \ 253 - /* \ 254 - * da_monitor_init_##name - initialize all CPUs' monitor \ 255 - */ \ 256 - static inline int da_monitor_init_##name(void) \ 257 - { \ 258 - da_monitor_reset_all_##name(); \ 259 - return 0; \ 260 - } \ 261 - \ 262 - /* \ 263 - * da_monitor_destroy_##name - destroy the monitor \ 264 - */ \ 265 - static inline void da_monitor_destroy_##name(void) \ 266 - { \ 267 - return; \ 223 + #elif RV_MON_TYPE == RV_MON_PER_CPU 224 + 225 + /* 226 + * per-cpu monitor variables 227 + */ 228 + static DEFINE_PER_CPU(struct da_monitor, da_mon_this); 229 + 230 + /* 231 + * da_get_monitor - return current CPU monitor address 232 + */ 233 + static struct da_monitor *da_get_monitor(void) 234 + { 235 + return this_cpu_ptr(&da_mon_this); 236 + } 237 + 238 + /* 239 + * da_monitor_reset_all - reset all CPUs' monitor 240 + */ 241 + static void da_monitor_reset_all(void) 242 + { 243 + struct da_monitor *da_mon; 244 + int cpu; 245 + for_each_cpu(cpu, cpu_online_mask) { 246 + da_mon = per_cpu_ptr(&da_mon_this, cpu); 247 + da_monitor_reset(da_mon); 248 + } 249 + } 250 + 251 + /* 252 + * da_monitor_init - initialize all CPUs' monitor 253 + */ 254 + static inline int da_monitor_init(void) 255 + { 256 + da_monitor_reset_all(); 257 + return 0; 258 + } 259 + 260 + /* 261 + * da_monitor_destroy - destroy the monitor 262 + */ 263 + static inline void da_monitor_destroy(void) 264 + { 265 + return; 268 266 } 269 267 270 268 /* 271 269 * Functions to define, init and get a per-task monitor. 272 270 */ 273 - #define DECLARE_DA_MON_INIT_PER_TASK(name, type) \ 274 - \ 275 - /* \ 276 - * The per-task monitor is stored a vector in the task struct. This variable \ 277 - * stores the position on the vector reserved for this monitor. \ 278 - */ \ 279 - static int task_mon_slot_##name = RV_PER_TASK_MONITOR_INIT; \ 280 - \ 281 - /* \ 282 - * da_get_monitor_##name - return the monitor in the allocated slot for tsk \ 283 - */ \ 284 - static inline struct da_monitor *da_get_monitor_##name(struct task_struct *tsk) \ 285 - { \ 286 - return &tsk->rv[task_mon_slot_##name].da_mon; \ 287 - } \ 288 - \ 289 - static void da_monitor_reset_all_##name(void) \ 290 - { \ 291 - struct task_struct *g, *p; \ 292 - int cpu; \ 293 - \ 294 - read_lock(&tasklist_lock); \ 295 - for_each_process_thread(g, p) \ 296 - da_monitor_reset_##name(da_get_monitor_##name(p)); \ 297 - for_each_present_cpu(cpu) \ 298 - da_monitor_reset_##name(da_get_monitor_##name(idle_task(cpu))); \ 299 - read_unlock(&tasklist_lock); \ 300 - } \ 301 - \ 302 - /* \ 303 - * da_monitor_init_##name - initialize the per-task monitor \ 304 - * \ 305 - * Try to allocate a slot in the task's vector of monitors. If there \ 306 - * is an available slot, use it and reset all task's monitor. \ 307 - */ \ 308 - static int da_monitor_init_##name(void) \ 309 - { \ 310 - int slot; \ 311 - \ 312 - slot = rv_get_task_monitor_slot(); \ 313 - if (slot < 0 || slot >= RV_PER_TASK_MONITOR_INIT) \ 314 - return slot; \ 315 - \ 316 - task_mon_slot_##name = slot; \ 317 - \ 318 - da_monitor_reset_all_##name(); \ 319 - return 0; \ 320 - } \ 321 - \ 322 - /* \ 323 - * da_monitor_destroy_##name - return the allocated slot \ 324 - */ \ 325 - static inline void da_monitor_destroy_##name(void) \ 326 - { \ 327 - if (task_mon_slot_##name == RV_PER_TASK_MONITOR_INIT) { \ 328 - WARN_ONCE(1, "Disabling a disabled monitor: " #name); \ 329 - return; \ 330 - } \ 331 - rv_put_task_monitor_slot(task_mon_slot_##name); \ 332 - task_mon_slot_##name = RV_PER_TASK_MONITOR_INIT; \ 333 - return; \ 271 + #elif RV_MON_TYPE == RV_MON_PER_TASK 272 + 273 + /* 274 + * The per-task monitor is stored a vector in the task struct. This variable 275 + * stores the position on the vector reserved for this monitor. 276 + */ 277 + static int task_mon_slot = RV_PER_TASK_MONITOR_INIT; 278 + 279 + /* 280 + * da_get_monitor - return the monitor in the allocated slot for tsk 281 + */ 282 + static inline struct da_monitor *da_get_monitor(struct task_struct *tsk) 283 + { 284 + return &tsk->rv[task_mon_slot].da_mon; 285 + } 286 + 287 + static void da_monitor_reset_all(void) 288 + { 289 + struct task_struct *g, *p; 290 + int cpu; 291 + 292 + read_lock(&tasklist_lock); 293 + for_each_process_thread(g, p) 294 + da_monitor_reset(da_get_monitor(p)); 295 + for_each_present_cpu(cpu) 296 + da_monitor_reset(da_get_monitor(idle_task(cpu))); 297 + read_unlock(&tasklist_lock); 334 298 } 335 299 336 300 /* 337 - * Handle event for implicit monitor: da_get_monitor_##name() will figure out 301 + * da_monitor_init - initialize the per-task monitor 302 + * 303 + * Try to allocate a slot in the task's vector of monitors. If there 304 + * is an available slot, use it and reset all task's monitor. 305 + */ 306 + static int da_monitor_init(void) 307 + { 308 + int slot; 309 + 310 + slot = rv_get_task_monitor_slot(); 311 + if (slot < 0 || slot >= RV_PER_TASK_MONITOR_INIT) 312 + return slot; 313 + 314 + task_mon_slot = slot; 315 + 316 + da_monitor_reset_all(); 317 + return 0; 318 + } 319 + 320 + /* 321 + * da_monitor_destroy - return the allocated slot 322 + */ 323 + static inline void da_monitor_destroy(void) 324 + { 325 + if (task_mon_slot == RV_PER_TASK_MONITOR_INIT) { 326 + WARN_ONCE(1, "Disabling a disabled monitor: " __stringify(MONITOR_NAME)); 327 + return; 328 + } 329 + rv_put_task_monitor_slot(task_mon_slot); 330 + task_mon_slot = RV_PER_TASK_MONITOR_INIT; 331 + return; 332 + } 333 + #endif 334 + 335 + /* 336 + * Handle event for implicit monitor: da_get_monitor() will figure out 338 337 * the monitor. 339 338 */ 340 - #define DECLARE_DA_MON_MONITOR_HANDLER_IMPLICIT(name, type) \ 341 - \ 342 - static inline void __da_handle_event_##name(struct da_monitor *da_mon, \ 343 - enum events_##name event) \ 344 - { \ 345 - bool retval; \ 346 - \ 347 - retval = da_event_##name(da_mon, event); \ 348 - if (!retval) \ 349 - da_monitor_reset_##name(da_mon); \ 350 - } \ 351 - \ 352 - /* \ 353 - * da_handle_event_##name - handle an event \ 354 - */ \ 355 - static inline void da_handle_event_##name(enum events_##name event) \ 356 - { \ 357 - struct da_monitor *da_mon = da_get_monitor_##name(); \ 358 - bool retval; \ 359 - \ 360 - retval = da_monitor_handling_event_##name(da_mon); \ 361 - if (!retval) \ 362 - return; \ 363 - \ 364 - __da_handle_event_##name(da_mon, event); \ 365 - } \ 366 - \ 367 - /* \ 368 - * da_handle_start_event_##name - start monitoring or handle event \ 369 - * \ 370 - * This function is used to notify the monitor that the system is returning \ 371 - * to the initial state, so the monitor can start monitoring in the next event. \ 372 - * Thus: \ 373 - * \ 374 - * If the monitor already started, handle the event. \ 375 - * If the monitor did not start yet, start the monitor but skip the event. \ 376 - */ \ 377 - static inline bool da_handle_start_event_##name(enum events_##name event) \ 378 - { \ 379 - struct da_monitor *da_mon; \ 380 - \ 381 - if (!da_monitor_enabled_##name()) \ 382 - return 0; \ 383 - \ 384 - da_mon = da_get_monitor_##name(); \ 385 - \ 386 - if (unlikely(!da_monitoring_##name(da_mon))) { \ 387 - da_monitor_start_##name(da_mon); \ 388 - return 0; \ 389 - } \ 390 - \ 391 - __da_handle_event_##name(da_mon, event); \ 392 - \ 393 - return 1; \ 394 - } \ 395 - \ 396 - /* \ 397 - * da_handle_start_run_event_##name - start monitoring and handle event \ 398 - * \ 399 - * This function is used to notify the monitor that the system is in the \ 400 - * initial state, so the monitor can start monitoring and handling event. \ 401 - */ \ 402 - static inline bool da_handle_start_run_event_##name(enum events_##name event) \ 403 - { \ 404 - struct da_monitor *da_mon; \ 405 - \ 406 - if (!da_monitor_enabled_##name()) \ 407 - return 0; \ 408 - \ 409 - da_mon = da_get_monitor_##name(); \ 410 - \ 411 - if (unlikely(!da_monitoring_##name(da_mon))) \ 412 - da_monitor_start_##name(da_mon); \ 413 - \ 414 - __da_handle_event_##name(da_mon, event); \ 415 - \ 416 - return 1; \ 339 + #if RV_MON_TYPE == RV_MON_GLOBAL || RV_MON_TYPE == RV_MON_PER_CPU 340 + 341 + static inline void __da_handle_event(struct da_monitor *da_mon, 342 + enum events event) 343 + { 344 + bool retval; 345 + 346 + retval = da_event(da_mon, event); 347 + if (!retval) 348 + da_monitor_reset(da_mon); 349 + } 350 + 351 + /* 352 + * da_handle_event - handle an event 353 + */ 354 + static inline void da_handle_event(enum events event) 355 + { 356 + struct da_monitor *da_mon = da_get_monitor(); 357 + bool retval; 358 + 359 + retval = da_monitor_handling_event(da_mon); 360 + if (!retval) 361 + return; 362 + 363 + __da_handle_event(da_mon, event); 364 + } 365 + 366 + /* 367 + * da_handle_start_event - start monitoring or handle event 368 + * 369 + * This function is used to notify the monitor that the system is returning 370 + * to the initial state, so the monitor can start monitoring in the next event. 371 + * Thus: 372 + * 373 + * If the monitor already started, handle the event. 374 + * If the monitor did not start yet, start the monitor but skip the event. 375 + */ 376 + static inline bool da_handle_start_event(enum events event) 377 + { 378 + struct da_monitor *da_mon; 379 + 380 + if (!da_monitor_enabled()) 381 + return 0; 382 + 383 + da_mon = da_get_monitor(); 384 + 385 + if (unlikely(!da_monitoring(da_mon))) { 386 + da_monitor_start(da_mon); 387 + return 0; 388 + } 389 + 390 + __da_handle_event(da_mon, event); 391 + 392 + return 1; 393 + } 394 + 395 + /* 396 + * da_handle_start_run_event - start monitoring and handle event 397 + * 398 + * This function is used to notify the monitor that the system is in the 399 + * initial state, so the monitor can start monitoring and handling event. 400 + */ 401 + static inline bool da_handle_start_run_event(enum events event) 402 + { 403 + struct da_monitor *da_mon; 404 + 405 + if (!da_monitor_enabled()) 406 + return 0; 407 + 408 + da_mon = da_get_monitor(); 409 + 410 + if (unlikely(!da_monitoring(da_mon))) 411 + da_monitor_start(da_mon); 412 + 413 + __da_handle_event(da_mon, event); 414 + 415 + return 1; 417 416 } 418 417 419 418 /* 420 419 * Handle event for per task. 421 420 */ 422 - #define DECLARE_DA_MON_MONITOR_HANDLER_PER_TASK(name, type) \ 423 - \ 424 - static inline void \ 425 - __da_handle_event_##name(struct da_monitor *da_mon, struct task_struct *tsk, \ 426 - enum events_##name event) \ 427 - { \ 428 - bool retval; \ 429 - \ 430 - retval = da_event_##name(da_mon, tsk, event); \ 431 - if (!retval) \ 432 - da_monitor_reset_##name(da_mon); \ 433 - } \ 434 - \ 435 - /* \ 436 - * da_handle_event_##name - handle an event \ 437 - */ \ 438 - static inline void \ 439 - da_handle_event_##name(struct task_struct *tsk, enum events_##name event) \ 440 - { \ 441 - struct da_monitor *da_mon = da_get_monitor_##name(tsk); \ 442 - bool retval; \ 443 - \ 444 - retval = da_monitor_handling_event_##name(da_mon); \ 445 - if (!retval) \ 446 - return; \ 447 - \ 448 - __da_handle_event_##name(da_mon, tsk, event); \ 449 - } \ 450 - \ 451 - /* \ 452 - * da_handle_start_event_##name - start monitoring or handle event \ 453 - * \ 454 - * This function is used to notify the monitor that the system is returning \ 455 - * to the initial state, so the monitor can start monitoring in the next event. \ 456 - * Thus: \ 457 - * \ 458 - * If the monitor already started, handle the event. \ 459 - * If the monitor did not start yet, start the monitor but skip the event. \ 460 - */ \ 461 - static inline bool \ 462 - da_handle_start_event_##name(struct task_struct *tsk, enum events_##name event) \ 463 - { \ 464 - struct da_monitor *da_mon; \ 465 - \ 466 - if (!da_monitor_enabled_##name()) \ 467 - return 0; \ 468 - \ 469 - da_mon = da_get_monitor_##name(tsk); \ 470 - \ 471 - if (unlikely(!da_monitoring_##name(da_mon))) { \ 472 - da_monitor_start_##name(da_mon); \ 473 - return 0; \ 474 - } \ 475 - \ 476 - __da_handle_event_##name(da_mon, tsk, event); \ 477 - \ 478 - return 1; \ 479 - } \ 480 - \ 481 - /* \ 482 - * da_handle_start_run_event_##name - start monitoring and handle event \ 483 - * \ 484 - * This function is used to notify the monitor that the system is in the \ 485 - * initial state, so the monitor can start monitoring and handling event. \ 486 - */ \ 487 - static inline bool \ 488 - da_handle_start_run_event_##name(struct task_struct *tsk, enum events_##name event) \ 489 - { \ 490 - struct da_monitor *da_mon; \ 491 - \ 492 - if (!da_monitor_enabled_##name()) \ 493 - return 0; \ 494 - \ 495 - da_mon = da_get_monitor_##name(tsk); \ 496 - \ 497 - if (unlikely(!da_monitoring_##name(da_mon))) \ 498 - da_monitor_start_##name(da_mon); \ 499 - \ 500 - __da_handle_event_##name(da_mon, tsk, event); \ 501 - \ 502 - return 1; \ 421 + #elif RV_MON_TYPE == RV_MON_PER_TASK 422 + 423 + static inline void 424 + __da_handle_event(struct da_monitor *da_mon, struct task_struct *tsk, 425 + enum events event) 426 + { 427 + bool retval; 428 + 429 + retval = da_event(da_mon, tsk, event); 430 + if (!retval) 431 + da_monitor_reset(da_mon); 503 432 } 433 + 434 + /* 435 + * da_handle_event - handle an event 436 + */ 437 + static inline void 438 + da_handle_event(struct task_struct *tsk, enum events event) 439 + { 440 + struct da_monitor *da_mon = da_get_monitor(tsk); 441 + bool retval; 442 + 443 + retval = da_monitor_handling_event(da_mon); 444 + if (!retval) 445 + return; 446 + 447 + __da_handle_event(da_mon, tsk, event); 448 + } 449 + 450 + /* 451 + * da_handle_start_event - start monitoring or handle event 452 + * 453 + * This function is used to notify the monitor that the system is returning 454 + * to the initial state, so the monitor can start monitoring in the next event. 455 + * Thus: 456 + * 457 + * If the monitor already started, handle the event. 458 + * If the monitor did not start yet, start the monitor but skip the event. 459 + */ 460 + static inline bool 461 + da_handle_start_event(struct task_struct *tsk, enum events event) 462 + { 463 + struct da_monitor *da_mon; 464 + 465 + if (!da_monitor_enabled()) 466 + return 0; 467 + 468 + da_mon = da_get_monitor(tsk); 469 + 470 + if (unlikely(!da_monitoring(da_mon))) { 471 + da_monitor_start(da_mon); 472 + return 0; 473 + } 474 + 475 + __da_handle_event(da_mon, tsk, event); 476 + 477 + return 1; 478 + } 479 + 480 + /* 481 + * da_handle_start_run_event - start monitoring and handle event 482 + * 483 + * This function is used to notify the monitor that the system is in the 484 + * initial state, so the monitor can start monitoring and handling event. 485 + */ 486 + static inline bool 487 + da_handle_start_run_event(struct task_struct *tsk, enum events event) 488 + { 489 + struct da_monitor *da_mon; 490 + 491 + if (!da_monitor_enabled()) 492 + return 0; 493 + 494 + da_mon = da_get_monitor(tsk); 495 + 496 + if (unlikely(!da_monitoring(da_mon))) 497 + da_monitor_start(da_mon); 498 + 499 + __da_handle_event(da_mon, tsk, event); 500 + 501 + return 1; 502 + } 503 + #endif 504 504 505 505 /* 506 506 * Entry point for the global monitor. 507 507 */ 508 - #define DECLARE_DA_MON_GLOBAL(name, type) \ 509 - \ 510 - DECLARE_AUTOMATA_HELPERS(name, type) \ 511 - DECLARE_DA_MON_GENERIC_HELPERS(name, type) \ 512 - DECLARE_DA_MON_MODEL_HANDLER_IMPLICIT(name, type) \ 513 - DECLARE_DA_MON_INIT_GLOBAL(name, type) \ 514 - DECLARE_DA_MON_MONITOR_HANDLER_IMPLICIT(name, type) 508 + #define DECLARE_DA_MON_GLOBAL(name, type) 515 509 516 510 /* 517 511 * Entry point for the per-cpu monitor. 518 512 */ 519 - #define DECLARE_DA_MON_PER_CPU(name, type) \ 520 - \ 521 - DECLARE_AUTOMATA_HELPERS(name, type) \ 522 - DECLARE_DA_MON_GENERIC_HELPERS(name, type) \ 523 - DECLARE_DA_MON_MODEL_HANDLER_IMPLICIT(name, type) \ 524 - DECLARE_DA_MON_INIT_PER_CPU(name, type) \ 525 - DECLARE_DA_MON_MONITOR_HANDLER_IMPLICIT(name, type) 513 + #define DECLARE_DA_MON_PER_CPU(name, type) 526 514 527 515 /* 528 516 * Entry point for the per-task monitor. 529 517 */ 530 - #define DECLARE_DA_MON_PER_TASK(name, type) \ 531 - \ 532 - DECLARE_AUTOMATA_HELPERS(name, type) \ 533 - DECLARE_DA_MON_GENERIC_HELPERS(name, type) \ 534 - DECLARE_DA_MON_MODEL_HANDLER_PER_TASK(name, type) \ 535 - DECLARE_DA_MON_INIT_PER_TASK(name, type) \ 536 - DECLARE_DA_MON_MONITOR_HANDLER_PER_TASK(name, type) 518 + #define DECLARE_DA_MON_PER_TASK(name, type)
+14 -16
kernel/trace/rv/monitors/nrp/nrp.c
··· 6 6 #include <linux/init.h> 7 7 #include <linux/rv.h> 8 8 #include <rv/instrumentation.h> 9 - #include <rv/da_monitor.h> 10 9 11 10 #define MODULE_NAME "nrp" 12 11 ··· 14 15 #include <rv_trace.h> 15 16 #include <monitors/sched/sched.h> 16 17 18 + #define RV_MON_TYPE RV_MON_PER_TASK 17 19 #include "nrp.h" 18 - 19 - static struct rv_monitor rv_nrp; 20 - DECLARE_DA_MON_PER_TASK(nrp, unsigned char); 20 + #include <rv/da_monitor.h> 21 21 22 22 #ifdef CONFIG_X86_LOCAL_APIC 23 23 #include <asm/trace/irq_vectors.h> 24 24 25 25 static void handle_vector_irq_entry(void *data, int vector) 26 26 { 27 - da_handle_event_nrp(current, irq_entry_nrp); 27 + da_handle_event(current, irq_entry_nrp); 28 28 } 29 29 30 30 static void attach_vector_irq(void) ··· 58 60 59 61 static void handle_irq_entry(void *data, int irq, struct irqaction *action) 60 62 { 61 - da_handle_event_nrp(current, irq_entry_nrp); 63 + da_handle_event(current, irq_entry_nrp); 62 64 } 63 65 64 66 static void handle_sched_need_resched(void *data, struct task_struct *tsk, ··· 70 72 * which may not mirror the system state but makes the monitor simpler, 71 73 */ 72 74 if (tif == TIF_NEED_RESCHED) 73 - da_handle_start_event_nrp(tsk, sched_need_resched_nrp); 75 + da_handle_start_event(tsk, sched_need_resched_nrp); 74 76 } 75 77 76 78 static void handle_schedule_entry(void *data, bool preempt) 77 79 { 78 80 if (preempt) 79 - da_handle_event_nrp(current, schedule_entry_preempt_nrp); 81 + da_handle_event(current, schedule_entry_preempt_nrp); 80 82 else 81 - da_handle_event_nrp(current, schedule_entry_nrp); 83 + da_handle_event(current, schedule_entry_nrp); 82 84 } 83 85 84 86 static int enable_nrp(void) 85 87 { 86 88 int retval; 87 89 88 - retval = da_monitor_init_nrp(); 90 + retval = da_monitor_init(); 89 91 if (retval) 90 92 return retval; 91 93 ··· 99 101 100 102 static void disable_nrp(void) 101 103 { 102 - rv_nrp.enabled = 0; 104 + rv_this.enabled = 0; 103 105 104 106 rv_detach_trace_probe("nrp", irq_handler_entry, handle_irq_entry); 105 107 rv_detach_trace_probe("nrp", sched_set_need_resched_tp, handle_sched_need_resched); 106 108 rv_detach_trace_probe("nrp", sched_entry_tp, handle_schedule_entry); 107 109 detach_vector_irq(); 108 110 109 - da_monitor_destroy_nrp(); 111 + da_monitor_destroy(); 110 112 } 111 113 112 - static struct rv_monitor rv_nrp = { 114 + static struct rv_monitor rv_this = { 113 115 .name = "nrp", 114 116 .description = "need resched preempts.", 115 117 .enable = enable_nrp, 116 118 .disable = disable_nrp, 117 - .reset = da_monitor_reset_all_nrp, 119 + .reset = da_monitor_reset_all, 118 120 .enabled = 0, 119 121 }; 120 122 121 123 static int __init register_nrp(void) 122 124 { 123 - return rv_register_monitor(&rv_nrp, &rv_sched); 125 + return rv_register_monitor(&rv_this, &rv_sched); 124 126 } 125 127 126 128 static void __exit unregister_nrp(void) 127 129 { 128 - rv_unregister_monitor(&rv_nrp); 130 + rv_unregister_monitor(&rv_this); 129 131 } 130 132 131 133 module_init(register_nrp);
+2
kernel/trace/rv/monitors/nrp/nrp.h
··· 5 5 * Documentation/trace/rv/deterministic_automata.rst 6 6 */ 7 7 8 + #define MONITOR_NAME nrp 9 + 8 10 enum states_nrp { 9 11 preempt_irq_nrp = 0, 10 12 any_thread_running_nrp,
+19 -21
kernel/trace/rv/monitors/opid/opid.c
··· 6 6 #include <linux/init.h> 7 7 #include <linux/rv.h> 8 8 #include <rv/instrumentation.h> 9 - #include <rv/da_monitor.h> 10 9 11 10 #define MODULE_NAME "opid" 12 11 ··· 15 16 #include <rv_trace.h> 16 17 #include <monitors/sched/sched.h> 17 18 19 + #define RV_MON_TYPE RV_MON_PER_CPU 18 20 #include "opid.h" 19 - 20 - static struct rv_monitor rv_opid; 21 - DECLARE_DA_MON_PER_CPU(opid, unsigned char); 21 + #include <rv/da_monitor.h> 22 22 23 23 #ifdef CONFIG_X86_LOCAL_APIC 24 24 #include <asm/trace/irq_vectors.h> 25 25 26 26 static void handle_vector_irq_entry(void *data, int vector) 27 27 { 28 - da_handle_event_opid(irq_entry_opid); 28 + da_handle_event(irq_entry_opid); 29 29 } 30 30 31 31 static void attach_vector_irq(void) ··· 59 61 60 62 static void handle_irq_disable(void *data, unsigned long ip, unsigned long parent_ip) 61 63 { 62 - da_handle_event_opid(irq_disable_opid); 64 + da_handle_event(irq_disable_opid); 63 65 } 64 66 65 67 static void handle_irq_enable(void *data, unsigned long ip, unsigned long parent_ip) 66 68 { 67 - da_handle_event_opid(irq_enable_opid); 69 + da_handle_event(irq_enable_opid); 68 70 } 69 71 70 72 static void handle_irq_entry(void *data, int irq, struct irqaction *action) 71 73 { 72 - da_handle_event_opid(irq_entry_opid); 74 + da_handle_event(irq_entry_opid); 73 75 } 74 76 75 77 static void handle_preempt_disable(void *data, unsigned long ip, unsigned long parent_ip) 76 78 { 77 - da_handle_event_opid(preempt_disable_opid); 79 + da_handle_event(preempt_disable_opid); 78 80 } 79 81 80 82 static void handle_preempt_enable(void *data, unsigned long ip, unsigned long parent_ip) 81 83 { 82 - da_handle_event_opid(preempt_enable_opid); 84 + da_handle_event(preempt_enable_opid); 83 85 } 84 86 85 87 static void handle_sched_need_resched(void *data, struct task_struct *tsk, int cpu, int tif) 86 88 { 87 89 /* The monitor's intitial state is not in_irq */ 88 90 if (this_cpu_read(hardirq_context)) 89 - da_handle_event_opid(sched_need_resched_opid); 91 + da_handle_event(sched_need_resched_opid); 90 92 else 91 - da_handle_start_event_opid(sched_need_resched_opid); 93 + da_handle_start_event(sched_need_resched_opid); 92 94 } 93 95 94 96 static void handle_sched_waking(void *data, struct task_struct *p) 95 97 { 96 98 /* The monitor's intitial state is not in_irq */ 97 99 if (this_cpu_read(hardirq_context)) 98 - da_handle_event_opid(sched_waking_opid); 100 + da_handle_event(sched_waking_opid); 99 101 else 100 - da_handle_start_event_opid(sched_waking_opid); 102 + da_handle_start_event(sched_waking_opid); 101 103 } 102 104 103 105 static int enable_opid(void) 104 106 { 105 107 int retval; 106 108 107 - retval = da_monitor_init_opid(); 109 + retval = da_monitor_init(); 108 110 if (retval) 109 111 return retval; 110 112 ··· 122 124 123 125 static void disable_opid(void) 124 126 { 125 - rv_opid.enabled = 0; 127 + rv_this.enabled = 0; 126 128 127 129 rv_detach_trace_probe("opid", irq_disable, handle_irq_disable); 128 130 rv_detach_trace_probe("opid", irq_enable, handle_irq_enable); ··· 133 135 rv_detach_trace_probe("opid", sched_waking, handle_sched_waking); 134 136 detach_vector_irq(); 135 137 136 - da_monitor_destroy_opid(); 138 + da_monitor_destroy(); 137 139 } 138 140 139 141 /* 140 142 * This is the monitor register section. 141 143 */ 142 - static struct rv_monitor rv_opid = { 144 + static struct rv_monitor rv_this = { 143 145 .name = "opid", 144 146 .description = "operations with preemption and irq disabled.", 145 147 .enable = enable_opid, 146 148 .disable = disable_opid, 147 - .reset = da_monitor_reset_all_opid, 149 + .reset = da_monitor_reset_all, 148 150 .enabled = 0, 149 151 }; 150 152 151 153 static int __init register_opid(void) 152 154 { 153 - return rv_register_monitor(&rv_opid, &rv_sched); 155 + return rv_register_monitor(&rv_this, &rv_sched); 154 156 } 155 157 156 158 static void __exit unregister_opid(void) 157 159 { 158 - rv_unregister_monitor(&rv_opid); 160 + rv_unregister_monitor(&rv_this); 159 161 } 160 162 161 163 module_init(register_opid);
+2
kernel/trace/rv/monitors/opid/opid.h
··· 5 5 * Documentation/trace/rv/deterministic_automata.rst 6 6 */ 7 7 8 + #define MONITOR_NAME opid 9 + 8 10 enum states_opid { 9 11 disabled_opid = 0, 10 12 enabled_opid,
+12 -14
kernel/trace/rv/monitors/sco/sco.c
··· 6 6 #include <linux/init.h> 7 7 #include <linux/rv.h> 8 8 #include <rv/instrumentation.h> 9 - #include <rv/da_monitor.h> 10 9 11 10 #define MODULE_NAME "sco" 12 11 ··· 13 14 #include <rv_trace.h> 14 15 #include <monitors/sched/sched.h> 15 16 17 + #define RV_MON_TYPE RV_MON_PER_CPU 16 18 #include "sco.h" 17 - 18 - static struct rv_monitor rv_sco; 19 - DECLARE_DA_MON_PER_CPU(sco, unsigned char); 19 + #include <rv/da_monitor.h> 20 20 21 21 static void handle_sched_set_state(void *data, struct task_struct *tsk, int state) 22 22 { 23 - da_handle_start_event_sco(sched_set_state_sco); 23 + da_handle_start_event(sched_set_state_sco); 24 24 } 25 25 26 26 static void handle_schedule_entry(void *data, bool preempt) 27 27 { 28 - da_handle_event_sco(schedule_entry_sco); 28 + da_handle_event(schedule_entry_sco); 29 29 } 30 30 31 31 static void handle_schedule_exit(void *data, bool is_switch) 32 32 { 33 - da_handle_start_event_sco(schedule_exit_sco); 33 + da_handle_start_event(schedule_exit_sco); 34 34 } 35 35 36 36 static int enable_sco(void) 37 37 { 38 38 int retval; 39 39 40 - retval = da_monitor_init_sco(); 40 + retval = da_monitor_init(); 41 41 if (retval) 42 42 return retval; 43 43 ··· 49 51 50 52 static void disable_sco(void) 51 53 { 52 - rv_sco.enabled = 0; 54 + rv_this.enabled = 0; 53 55 54 56 rv_detach_trace_probe("sco", sched_set_state_tp, handle_sched_set_state); 55 57 rv_detach_trace_probe("sco", sched_entry_tp, handle_schedule_entry); 56 58 rv_detach_trace_probe("sco", sched_exit_tp, handle_schedule_exit); 57 59 58 - da_monitor_destroy_sco(); 60 + da_monitor_destroy(); 59 61 } 60 62 61 - static struct rv_monitor rv_sco = { 63 + static struct rv_monitor rv_this = { 62 64 .name = "sco", 63 65 .description = "scheduling context operations.", 64 66 .enable = enable_sco, 65 67 .disable = disable_sco, 66 - .reset = da_monitor_reset_all_sco, 68 + .reset = da_monitor_reset_all, 67 69 .enabled = 0, 68 70 }; 69 71 70 72 static int __init register_sco(void) 71 73 { 72 - return rv_register_monitor(&rv_sco, &rv_sched); 74 + return rv_register_monitor(&rv_this, &rv_sched); 73 75 } 74 76 75 77 static void __exit unregister_sco(void) 76 78 { 77 - rv_unregister_monitor(&rv_sco); 79 + rv_unregister_monitor(&rv_this); 78 80 } 79 81 80 82 module_init(register_sco);
+2
kernel/trace/rv/monitors/sco/sco.h
··· 5 5 * Documentation/trace/rv/deterministic_automata.rst 6 6 */ 7 7 8 + #define MONITOR_NAME sco 9 + 8 10 enum states_sco { 9 11 thread_context_sco = 0, 10 12 scheduling_context_sco,
+13 -15
kernel/trace/rv/monitors/scpd/scpd.c
··· 6 6 #include <linux/init.h> 7 7 #include <linux/rv.h> 8 8 #include <rv/instrumentation.h> 9 - #include <rv/da_monitor.h> 10 9 11 10 #define MODULE_NAME "scpd" 12 11 ··· 14 15 #include <rv_trace.h> 15 16 #include <monitors/sched/sched.h> 16 17 18 + #define RV_MON_TYPE RV_MON_PER_CPU 17 19 #include "scpd.h" 18 - 19 - static struct rv_monitor rv_scpd; 20 - DECLARE_DA_MON_PER_CPU(scpd, unsigned char); 20 + #include <rv/da_monitor.h> 21 21 22 22 static void handle_preempt_disable(void *data, unsigned long ip, unsigned long parent_ip) 23 23 { 24 - da_handle_event_scpd(preempt_disable_scpd); 24 + da_handle_event(preempt_disable_scpd); 25 25 } 26 26 27 27 static void handle_preempt_enable(void *data, unsigned long ip, unsigned long parent_ip) 28 28 { 29 - da_handle_start_event_scpd(preempt_enable_scpd); 29 + da_handle_start_event(preempt_enable_scpd); 30 30 } 31 31 32 32 static void handle_schedule_entry(void *data, bool preempt) 33 33 { 34 - da_handle_event_scpd(schedule_entry_scpd); 34 + da_handle_event(schedule_entry_scpd); 35 35 } 36 36 37 37 static void handle_schedule_exit(void *data, bool is_switch) 38 38 { 39 - da_handle_event_scpd(schedule_exit_scpd); 39 + da_handle_event(schedule_exit_scpd); 40 40 } 41 41 42 42 static int enable_scpd(void) 43 43 { 44 44 int retval; 45 45 46 - retval = da_monitor_init_scpd(); 46 + retval = da_monitor_init(); 47 47 if (retval) 48 48 return retval; 49 49 ··· 56 58 57 59 static void disable_scpd(void) 58 60 { 59 - rv_scpd.enabled = 0; 61 + rv_this.enabled = 0; 60 62 61 63 rv_detach_trace_probe("scpd", preempt_disable, handle_preempt_disable); 62 64 rv_detach_trace_probe("scpd", preempt_enable, handle_preempt_enable); 63 65 rv_detach_trace_probe("scpd", sched_entry_tp, handle_schedule_entry); 64 66 rv_detach_trace_probe("scpd", sched_exit_tp, handle_schedule_exit); 65 67 66 - da_monitor_destroy_scpd(); 68 + da_monitor_destroy(); 67 69 } 68 70 69 - static struct rv_monitor rv_scpd = { 71 + static struct rv_monitor rv_this = { 70 72 .name = "scpd", 71 73 .description = "schedule called with preemption disabled.", 72 74 .enable = enable_scpd, 73 75 .disable = disable_scpd, 74 - .reset = da_monitor_reset_all_scpd, 76 + .reset = da_monitor_reset_all, 75 77 .enabled = 0, 76 78 }; 77 79 78 80 static int __init register_scpd(void) 79 81 { 80 - return rv_register_monitor(&rv_scpd, &rv_sched); 82 + return rv_register_monitor(&rv_this, &rv_sched); 81 83 } 82 84 83 85 static void __exit unregister_scpd(void) 84 86 { 85 - rv_unregister_monitor(&rv_scpd); 87 + rv_unregister_monitor(&rv_this); 86 88 } 87 89 88 90 module_init(register_scpd);
+2
kernel/trace/rv/monitors/scpd/scpd.h
··· 5 5 * Documentation/trace/rv/deterministic_automata.rst 6 6 */ 7 7 8 + #define MONITOR_NAME scpd 9 + 8 10 enum states_scpd { 9 11 cant_sched_scpd = 0, 10 12 can_sched_scpd,
+13 -15
kernel/trace/rv/monitors/snep/snep.c
··· 6 6 #include <linux/init.h> 7 7 #include <linux/rv.h> 8 8 #include <rv/instrumentation.h> 9 - #include <rv/da_monitor.h> 10 9 11 10 #define MODULE_NAME "snep" 12 11 ··· 14 15 #include <rv_trace.h> 15 16 #include <monitors/sched/sched.h> 16 17 18 + #define RV_MON_TYPE RV_MON_PER_CPU 17 19 #include "snep.h" 18 - 19 - static struct rv_monitor rv_snep; 20 - DECLARE_DA_MON_PER_CPU(snep, unsigned char); 20 + #include <rv/da_monitor.h> 21 21 22 22 static void handle_preempt_disable(void *data, unsigned long ip, unsigned long parent_ip) 23 23 { 24 - da_handle_start_event_snep(preempt_disable_snep); 24 + da_handle_start_event(preempt_disable_snep); 25 25 } 26 26 27 27 static void handle_preempt_enable(void *data, unsigned long ip, unsigned long parent_ip) 28 28 { 29 - da_handle_start_event_snep(preempt_enable_snep); 29 + da_handle_start_event(preempt_enable_snep); 30 30 } 31 31 32 32 static void handle_schedule_entry(void *data, bool preempt) 33 33 { 34 - da_handle_event_snep(schedule_entry_snep); 34 + da_handle_event(schedule_entry_snep); 35 35 } 36 36 37 37 static void handle_schedule_exit(void *data, bool is_switch) 38 38 { 39 - da_handle_start_event_snep(schedule_exit_snep); 39 + da_handle_start_event(schedule_exit_snep); 40 40 } 41 41 42 42 static int enable_snep(void) 43 43 { 44 44 int retval; 45 45 46 - retval = da_monitor_init_snep(); 46 + retval = da_monitor_init(); 47 47 if (retval) 48 48 return retval; 49 49 ··· 56 58 57 59 static void disable_snep(void) 58 60 { 59 - rv_snep.enabled = 0; 61 + rv_this.enabled = 0; 60 62 61 63 rv_detach_trace_probe("snep", preempt_disable, handle_preempt_disable); 62 64 rv_detach_trace_probe("snep", preempt_enable, handle_preempt_enable); 63 65 rv_detach_trace_probe("snep", sched_entry_tp, handle_schedule_entry); 64 66 rv_detach_trace_probe("snep", sched_exit_tp, handle_schedule_exit); 65 67 66 - da_monitor_destroy_snep(); 68 + da_monitor_destroy(); 67 69 } 68 70 69 - static struct rv_monitor rv_snep = { 71 + static struct rv_monitor rv_this = { 70 72 .name = "snep", 71 73 .description = "schedule does not enable preempt.", 72 74 .enable = enable_snep, 73 75 .disable = disable_snep, 74 - .reset = da_monitor_reset_all_snep, 76 + .reset = da_monitor_reset_all, 75 77 .enabled = 0, 76 78 }; 77 79 78 80 static int __init register_snep(void) 79 81 { 80 - return rv_register_monitor(&rv_snep, &rv_sched); 82 + return rv_register_monitor(&rv_this, &rv_sched); 81 83 } 82 84 83 85 static void __exit unregister_snep(void) 84 86 { 85 - rv_unregister_monitor(&rv_snep); 87 + rv_unregister_monitor(&rv_this); 86 88 } 87 89 88 90 module_init(register_snep);
+2
kernel/trace/rv/monitors/snep/snep.h
··· 5 5 * Documentation/trace/rv/deterministic_automata.rst 6 6 */ 7 7 8 + #define MONITOR_NAME snep 9 + 8 10 enum states_snep { 9 11 non_scheduling_context_snep = 0, 10 12 scheduling_contex_snep,
+12 -14
kernel/trace/rv/monitors/snroc/snroc.c
··· 6 6 #include <linux/init.h> 7 7 #include <linux/rv.h> 8 8 #include <rv/instrumentation.h> 9 - #include <rv/da_monitor.h> 10 9 11 10 #define MODULE_NAME "snroc" 12 11 ··· 13 14 #include <rv_trace.h> 14 15 #include <monitors/sched/sched.h> 15 16 17 + #define RV_MON_TYPE RV_MON_PER_TASK 16 18 #include "snroc.h" 17 - 18 - static struct rv_monitor rv_snroc; 19 - DECLARE_DA_MON_PER_TASK(snroc, unsigned char); 19 + #include <rv/da_monitor.h> 20 20 21 21 static void handle_sched_set_state(void *data, struct task_struct *tsk, int state) 22 22 { 23 - da_handle_event_snroc(tsk, sched_set_state_snroc); 23 + da_handle_event(tsk, sched_set_state_snroc); 24 24 } 25 25 26 26 static void handle_sched_switch(void *data, bool preempt, ··· 27 29 struct task_struct *next, 28 30 unsigned int prev_state) 29 31 { 30 - da_handle_start_event_snroc(prev, sched_switch_out_snroc); 31 - da_handle_event_snroc(next, sched_switch_in_snroc); 32 + da_handle_start_event(prev, sched_switch_out_snroc); 33 + da_handle_event(next, sched_switch_in_snroc); 32 34 } 33 35 34 36 static int enable_snroc(void) 35 37 { 36 38 int retval; 37 39 38 - retval = da_monitor_init_snroc(); 40 + retval = da_monitor_init(); 39 41 if (retval) 40 42 return retval; 41 43 ··· 47 49 48 50 static void disable_snroc(void) 49 51 { 50 - rv_snroc.enabled = 0; 52 + rv_this.enabled = 0; 51 53 52 54 rv_detach_trace_probe("snroc", sched_set_state_tp, handle_sched_set_state); 53 55 rv_detach_trace_probe("snroc", sched_switch, handle_sched_switch); 54 56 55 - da_monitor_destroy_snroc(); 57 + da_monitor_destroy(); 56 58 } 57 59 58 - static struct rv_monitor rv_snroc = { 60 + static struct rv_monitor rv_this = { 59 61 .name = "snroc", 60 62 .description = "set non runnable on its own context.", 61 63 .enable = enable_snroc, 62 64 .disable = disable_snroc, 63 - .reset = da_monitor_reset_all_snroc, 65 + .reset = da_monitor_reset_all, 64 66 .enabled = 0, 65 67 }; 66 68 67 69 static int __init register_snroc(void) 68 70 { 69 - return rv_register_monitor(&rv_snroc, &rv_sched); 71 + return rv_register_monitor(&rv_this, &rv_sched); 70 72 } 71 73 72 74 static void __exit unregister_snroc(void) 73 75 { 74 - rv_unregister_monitor(&rv_snroc); 76 + rv_unregister_monitor(&rv_this); 75 77 } 76 78 77 79 module_init(register_snroc);
+2
kernel/trace/rv/monitors/snroc/snroc.h
··· 5 5 * Documentation/trace/rv/deterministic_automata.rst 6 6 */ 7 7 8 + #define MONITOR_NAME snroc 9 + 8 10 enum states_snroc { 9 11 other_context_snroc = 0, 10 12 own_context_snroc,
+18 -20
kernel/trace/rv/monitors/sssw/sssw.c
··· 6 6 #include <linux/init.h> 7 7 #include <linux/rv.h> 8 8 #include <rv/instrumentation.h> 9 - #include <rv/da_monitor.h> 10 9 11 10 #define MODULE_NAME "sssw" 12 11 ··· 14 15 #include <rv_trace.h> 15 16 #include <monitors/sched/sched.h> 16 17 18 + #define RV_MON_TYPE RV_MON_PER_TASK 17 19 #include "sssw.h" 18 - 19 - static struct rv_monitor rv_sssw; 20 - DECLARE_DA_MON_PER_TASK(sssw, unsigned char); 20 + #include <rv/da_monitor.h> 21 21 22 22 static void handle_sched_set_state(void *data, struct task_struct *tsk, int state) 23 23 { 24 24 if (state == TASK_RUNNING) 25 - da_handle_start_event_sssw(tsk, sched_set_state_runnable_sssw); 25 + da_handle_start_event(tsk, sched_set_state_runnable_sssw); 26 26 else 27 - da_handle_event_sssw(tsk, sched_set_state_sleepable_sssw); 27 + da_handle_event(tsk, sched_set_state_sleepable_sssw); 28 28 } 29 29 30 30 static void handle_sched_switch(void *data, bool preempt, ··· 32 34 unsigned int prev_state) 33 35 { 34 36 if (preempt) 35 - da_handle_event_sssw(prev, sched_switch_preempt_sssw); 37 + da_handle_event(prev, sched_switch_preempt_sssw); 36 38 else if (prev_state == TASK_RUNNING) 37 - da_handle_event_sssw(prev, sched_switch_yield_sssw); 39 + da_handle_event(prev, sched_switch_yield_sssw); 38 40 else if (prev_state == TASK_RTLOCK_WAIT) 39 41 /* special case of sleeping task with racy conditions */ 40 - da_handle_event_sssw(prev, sched_switch_blocking_sssw); 42 + da_handle_event(prev, sched_switch_blocking_sssw); 41 43 else 42 - da_handle_event_sssw(prev, sched_switch_suspend_sssw); 43 - da_handle_event_sssw(next, sched_switch_in_sssw); 44 + da_handle_event(prev, sched_switch_suspend_sssw); 45 + da_handle_event(next, sched_switch_in_sssw); 44 46 } 45 47 46 48 static void handle_sched_wakeup(void *data, struct task_struct *p) ··· 49 51 * Wakeup can also lead to signal_wakeup although the system is 50 52 * actually runnable. The monitor can safely start with this event. 51 53 */ 52 - da_handle_start_event_sssw(p, sched_wakeup_sssw); 54 + da_handle_start_event(p, sched_wakeup_sssw); 53 55 } 54 56 55 57 static void handle_signal_deliver(void *data, int sig, 56 58 struct kernel_siginfo *info, 57 59 struct k_sigaction *ka) 58 60 { 59 - da_handle_event_sssw(current, signal_deliver_sssw); 61 + da_handle_event(current, signal_deliver_sssw); 60 62 } 61 63 62 64 static int enable_sssw(void) 63 65 { 64 66 int retval; 65 67 66 - retval = da_monitor_init_sssw(); 68 + retval = da_monitor_init(); 67 69 if (retval) 68 70 return retval; 69 71 ··· 77 79 78 80 static void disable_sssw(void) 79 81 { 80 - rv_sssw.enabled = 0; 82 + rv_this.enabled = 0; 81 83 82 84 rv_detach_trace_probe("sssw", sched_set_state_tp, handle_sched_set_state); 83 85 rv_detach_trace_probe("sssw", sched_switch, handle_sched_switch); 84 86 rv_detach_trace_probe("sssw", sched_wakeup, handle_sched_wakeup); 85 87 rv_detach_trace_probe("sssw", signal_deliver, handle_signal_deliver); 86 88 87 - da_monitor_destroy_sssw(); 89 + da_monitor_destroy(); 88 90 } 89 91 90 - static struct rv_monitor rv_sssw = { 92 + static struct rv_monitor rv_this = { 91 93 .name = "sssw", 92 94 .description = "set state sleep and wakeup.", 93 95 .enable = enable_sssw, 94 96 .disable = disable_sssw, 95 - .reset = da_monitor_reset_all_sssw, 97 + .reset = da_monitor_reset_all, 96 98 .enabled = 0, 97 99 }; 98 100 99 101 static int __init register_sssw(void) 100 102 { 101 - return rv_register_monitor(&rv_sssw, &rv_sched); 103 + return rv_register_monitor(&rv_this, &rv_sched); 102 104 } 103 105 104 106 static void __exit unregister_sssw(void) 105 107 { 106 - rv_unregister_monitor(&rv_sssw); 108 + rv_unregister_monitor(&rv_this); 107 109 } 108 110 109 111 module_init(register_sssw);
+2
kernel/trace/rv/monitors/sssw/sssw.h
··· 5 5 * Documentation/trace/rv/deterministic_automata.rst 6 6 */ 7 7 8 + #define MONITOR_NAME sssw 9 + 8 10 enum states_sssw { 9 11 runnable_sssw = 0, 10 12 signal_wakeup_sssw,
+16 -18
kernel/trace/rv/monitors/sts/sts.c
··· 6 6 #include <linux/init.h> 7 7 #include <linux/rv.h> 8 8 #include <rv/instrumentation.h> 9 - #include <rv/da_monitor.h> 10 9 11 10 #define MODULE_NAME "sts" 12 11 ··· 15 16 #include <rv_trace.h> 16 17 #include <monitors/sched/sched.h> 17 18 19 + #define RV_MON_TYPE RV_MON_PER_CPU 18 20 #include "sts.h" 19 - 20 - static struct rv_monitor rv_sts; 21 - DECLARE_DA_MON_PER_CPU(sts, unsigned char); 21 + #include <rv/da_monitor.h> 22 22 23 23 #ifdef CONFIG_X86_LOCAL_APIC 24 24 #include <asm/trace/irq_vectors.h> 25 25 26 26 static void handle_vector_irq_entry(void *data, int vector) 27 27 { 28 - da_handle_event_sts(irq_entry_sts); 28 + da_handle_event(irq_entry_sts); 29 29 } 30 30 31 31 static void attach_vector_irq(void) ··· 59 61 60 62 static void handle_irq_disable(void *data, unsigned long ip, unsigned long parent_ip) 61 63 { 62 - da_handle_event_sts(irq_disable_sts); 64 + da_handle_event(irq_disable_sts); 63 65 } 64 66 65 67 static void handle_irq_enable(void *data, unsigned long ip, unsigned long parent_ip) 66 68 { 67 - da_handle_event_sts(irq_enable_sts); 69 + da_handle_event(irq_enable_sts); 68 70 } 69 71 70 72 static void handle_irq_entry(void *data, int irq, struct irqaction *action) 71 73 { 72 - da_handle_event_sts(irq_entry_sts); 74 + da_handle_event(irq_entry_sts); 73 75 } 74 76 75 77 static void handle_sched_switch(void *data, bool preempt, ··· 77 79 struct task_struct *next, 78 80 unsigned int prev_state) 79 81 { 80 - da_handle_event_sts(sched_switch_sts); 82 + da_handle_event(sched_switch_sts); 81 83 } 82 84 83 85 static void handle_schedule_entry(void *data, bool preempt) 84 86 { 85 - da_handle_event_sts(schedule_entry_sts); 87 + da_handle_event(schedule_entry_sts); 86 88 } 87 89 88 90 static void handle_schedule_exit(void *data, bool is_switch) 89 91 { 90 - da_handle_start_event_sts(schedule_exit_sts); 92 + da_handle_start_event(schedule_exit_sts); 91 93 } 92 94 93 95 static int enable_sts(void) 94 96 { 95 97 int retval; 96 98 97 - retval = da_monitor_init_sts(); 99 + retval = da_monitor_init(); 98 100 if (retval) 99 101 return retval; 100 102 ··· 111 113 112 114 static void disable_sts(void) 113 115 { 114 - rv_sts.enabled = 0; 116 + rv_this.enabled = 0; 115 117 116 118 rv_detach_trace_probe("sts", irq_disable, handle_irq_disable); 117 119 rv_detach_trace_probe("sts", irq_enable, handle_irq_enable); ··· 121 123 rv_detach_trace_probe("sts", sched_exit_tp, handle_schedule_exit); 122 124 detach_vector_irq(); 123 125 124 - da_monitor_destroy_sts(); 126 + da_monitor_destroy(); 125 127 } 126 128 127 129 /* 128 130 * This is the monitor register section. 129 131 */ 130 - static struct rv_monitor rv_sts = { 132 + static struct rv_monitor rv_this = { 131 133 .name = "sts", 132 134 .description = "schedule implies task switch.", 133 135 .enable = enable_sts, 134 136 .disable = disable_sts, 135 - .reset = da_monitor_reset_all_sts, 137 + .reset = da_monitor_reset_all, 136 138 .enabled = 0, 137 139 }; 138 140 139 141 static int __init register_sts(void) 140 142 { 141 - return rv_register_monitor(&rv_sts, &rv_sched); 143 + return rv_register_monitor(&rv_this, &rv_sched); 142 144 } 143 145 144 146 static void __exit unregister_sts(void) 145 147 { 146 - rv_unregister_monitor(&rv_sts); 148 + rv_unregister_monitor(&rv_this); 147 149 } 148 150 149 151 module_init(register_sts);
+2
kernel/trace/rv/monitors/sts/sts.h
··· 5 5 * Documentation/trace/rv/deterministic_automata.rst 6 6 */ 7 7 8 + #define MONITOR_NAME sts 9 + 8 10 enum states_sts { 9 11 can_sched_sts = 0, 10 12 cant_sched_sts,
+12 -14
kernel/trace/rv/monitors/wip/wip.c
··· 6 6 #include <linux/init.h> 7 7 #include <linux/rv.h> 8 8 #include <rv/instrumentation.h> 9 - #include <rv/da_monitor.h> 10 9 11 10 #define MODULE_NAME "wip" 12 11 ··· 13 14 #include <trace/events/sched.h> 14 15 #include <trace/events/preemptirq.h> 15 16 17 + #define RV_MON_TYPE RV_MON_PER_CPU 16 18 #include "wip.h" 17 - 18 - static struct rv_monitor rv_wip; 19 - DECLARE_DA_MON_PER_CPU(wip, unsigned char); 19 + #include <rv/da_monitor.h> 20 20 21 21 static void handle_preempt_disable(void *data, unsigned long ip, unsigned long parent_ip) 22 22 { 23 - da_handle_event_wip(preempt_disable_wip); 23 + da_handle_event(preempt_disable_wip); 24 24 } 25 25 26 26 static void handle_preempt_enable(void *data, unsigned long ip, unsigned long parent_ip) 27 27 { 28 - da_handle_start_event_wip(preempt_enable_wip); 28 + da_handle_start_event(preempt_enable_wip); 29 29 } 30 30 31 31 static void handle_sched_waking(void *data, struct task_struct *task) 32 32 { 33 - da_handle_event_wip(sched_waking_wip); 33 + da_handle_event(sched_waking_wip); 34 34 } 35 35 36 36 static int enable_wip(void) 37 37 { 38 38 int retval; 39 39 40 - retval = da_monitor_init_wip(); 40 + retval = da_monitor_init(); 41 41 if (retval) 42 42 return retval; 43 43 ··· 49 51 50 52 static void disable_wip(void) 51 53 { 52 - rv_wip.enabled = 0; 54 + rv_this.enabled = 0; 53 55 54 56 rv_detach_trace_probe("wip", preempt_disable, handle_preempt_disable); 55 57 rv_detach_trace_probe("wip", preempt_enable, handle_preempt_enable); 56 58 rv_detach_trace_probe("wip", sched_waking, handle_sched_waking); 57 59 58 - da_monitor_destroy_wip(); 60 + da_monitor_destroy(); 59 61 } 60 62 61 - static struct rv_monitor rv_wip = { 63 + static struct rv_monitor rv_this = { 62 64 .name = "wip", 63 65 .description = "wakeup in preemptive per-cpu testing monitor.", 64 66 .enable = enable_wip, 65 67 .disable = disable_wip, 66 - .reset = da_monitor_reset_all_wip, 68 + .reset = da_monitor_reset_all, 67 69 .enabled = 0, 68 70 }; 69 71 70 72 static int __init register_wip(void) 71 73 { 72 - return rv_register_monitor(&rv_wip, NULL); 74 + return rv_register_monitor(&rv_this, NULL); 73 75 } 74 76 75 77 static void __exit unregister_wip(void) 76 78 { 77 - rv_unregister_monitor(&rv_wip); 79 + rv_unregister_monitor(&rv_this); 78 80 } 79 81 80 82 module_init(register_wip);
+2
kernel/trace/rv/monitors/wip/wip.h
··· 5 5 * Documentation/trace/rv/deterministic_automata.rst 6 6 */ 7 7 8 + #define MONITOR_NAME wip 9 + 8 10 enum states_wip { 9 11 preemptive_wip = 0, 10 12 non_preemptive_wip,
+13 -15
kernel/trace/rv/monitors/wwnr/wwnr.c
··· 6 6 #include <linux/init.h> 7 7 #include <linux/rv.h> 8 8 #include <rv/instrumentation.h> 9 - #include <rv/da_monitor.h> 10 9 11 10 #define MODULE_NAME "wwnr" 12 11 13 12 #include <rv_trace.h> 14 13 #include <trace/events/sched.h> 15 14 15 + #define RV_MON_TYPE RV_MON_PER_TASK 16 16 #include "wwnr.h" 17 - 18 - static struct rv_monitor rv_wwnr; 19 - DECLARE_DA_MON_PER_TASK(wwnr, unsigned char); 17 + #include <rv/da_monitor.h> 20 18 21 19 static void handle_switch(void *data, bool preempt, struct task_struct *p, 22 20 struct task_struct *n, unsigned int prev_state) 23 21 { 24 22 /* start monitoring only after the first suspension */ 25 23 if (prev_state == TASK_INTERRUPTIBLE) 26 - da_handle_start_event_wwnr(p, switch_out_wwnr); 24 + da_handle_start_event(p, switch_out_wwnr); 27 25 else 28 - da_handle_event_wwnr(p, switch_out_wwnr); 26 + da_handle_event(p, switch_out_wwnr); 29 27 30 - da_handle_event_wwnr(n, switch_in_wwnr); 28 + da_handle_event(n, switch_in_wwnr); 31 29 } 32 30 33 31 static void handle_wakeup(void *data, struct task_struct *p) 34 32 { 35 - da_handle_event_wwnr(p, wakeup_wwnr); 33 + da_handle_event(p, wakeup_wwnr); 36 34 } 37 35 38 36 static int enable_wwnr(void) 39 37 { 40 38 int retval; 41 39 42 - retval = da_monitor_init_wwnr(); 40 + retval = da_monitor_init(); 43 41 if (retval) 44 42 return retval; 45 43 ··· 49 51 50 52 static void disable_wwnr(void) 51 53 { 52 - rv_wwnr.enabled = 0; 54 + rv_this.enabled = 0; 53 55 54 56 rv_detach_trace_probe("wwnr", sched_switch, handle_switch); 55 57 rv_detach_trace_probe("wwnr", sched_wakeup, handle_wakeup); 56 58 57 - da_monitor_destroy_wwnr(); 59 + da_monitor_destroy(); 58 60 } 59 61 60 - static struct rv_monitor rv_wwnr = { 62 + static struct rv_monitor rv_this = { 61 63 .name = "wwnr", 62 64 .description = "wakeup while not running per-task testing model.", 63 65 .enable = enable_wwnr, 64 66 .disable = disable_wwnr, 65 - .reset = da_monitor_reset_all_wwnr, 67 + .reset = da_monitor_reset_all, 66 68 .enabled = 0, 67 69 }; 68 70 69 71 static int __init register_wwnr(void) 70 72 { 71 - return rv_register_monitor(&rv_wwnr, NULL); 73 + return rv_register_monitor(&rv_this, NULL); 72 74 } 73 75 74 76 static void __exit unregister_wwnr(void) 75 77 { 76 - rv_unregister_monitor(&rv_wwnr); 78 + rv_unregister_monitor(&rv_this); 77 79 } 78 80 79 81 module_init(register_wwnr);
+2
kernel/trace/rv/monitors/wwnr/wwnr.h
··· 5 5 * Documentation/trace/rv/deterministic_automata.rst 6 6 */ 7 7 8 + #define MONITOR_NAME wwnr 9 + 8 10 enum states_wwnr { 9 11 not_running_wwnr = 0, 10 12 running_wwnr,