Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

PM: EM: Assign a unique ID when creating a performance domain

It is necessary to refer to a specific performance domain from a
userspace. For example, the energy model of a particular performance
domain is updated.

To this end, assign a unique ID to each performance domain to address it,
and manage them in a global linked list to look up a specific one by
matching ID. IDA is used for ID assignment, and the mutex is used to
protect the global list from concurrent access.

Note that the mutex (em_pd_list_mutex) is not supposed to hold while
holding em_pd_mutex to avoid ABBA deadlock.

Signed-off-by: Changwoo Min <changwoo@igalia.com>
Reviewed-by: Lukasz Luba <lukasz.luba@arm.com>
Link: https://patch.msgid.link/20251020220914.320832-2-changwoo@igalia.com
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>

authored by

Changwoo Min and committed by
Rafael J. Wysocki
cbe5aeed 211ddde0

+33 -1
+4
include/linux/energy_model.h
··· 54 54 /** 55 55 * struct em_perf_domain - Performance domain 56 56 * @em_table: Pointer to the runtime modifiable em_perf_table 57 + * @node: node in em_pd_list (in energy_model.c) 58 + * @id: A unique ID number for each performance domain 57 59 * @nr_perf_states: Number of performance states 58 60 * @min_perf_state: Minimum allowed Performance State index 59 61 * @max_perf_state: Maximum allowed Performance State index ··· 73 71 */ 74 72 struct em_perf_domain { 75 73 struct em_perf_table __rcu *em_table; 74 + struct list_head node; 75 + int id; 76 76 int nr_perf_states; 77 77 int min_perf_state; 78 78 int max_perf_state;
+29 -1
kernel/power/energy_model.c
··· 23 23 */ 24 24 static DEFINE_MUTEX(em_pd_mutex); 25 25 26 + /* 27 + * Manage performance domains with IDs. One can iterate the performance domains 28 + * through the list and pick one with their associated ID. The mutex serializes 29 + * the list access. When holding em_pd_list_mutex, em_pd_mutex should not be 30 + * taken to avoid potential deadlock. 31 + */ 32 + static DEFINE_IDA(em_pd_ida); 33 + static LIST_HEAD(em_pd_list); 34 + static DEFINE_MUTEX(em_pd_list_mutex); 35 + 26 36 static void em_cpufreq_update_efficiencies(struct device *dev, 27 37 struct em_perf_state *table); 28 38 static void em_check_capacity_update(void); ··· 406 396 struct em_perf_table *em_table; 407 397 struct em_perf_domain *pd; 408 398 struct device *cpu_dev; 409 - int cpu, ret, num_cpus; 399 + int cpu, ret, num_cpus, id; 410 400 411 401 if (_is_cpu_device(dev)) { 412 402 num_cpus = cpumask_weight(cpus); ··· 429 419 } 430 420 431 421 pd->nr_perf_states = nr_states; 422 + 423 + INIT_LIST_HEAD(&pd->node); 424 + 425 + id = ida_alloc(&em_pd_ida, GFP_KERNEL); 426 + if (id < 0) 427 + return -ENOMEM; 428 + pd->id = id; 432 429 433 430 em_table = em_table_alloc(pd); 434 431 if (!em_table) ··· 461 444 kfree(em_table); 462 445 free_pd: 463 446 kfree(pd); 447 + ida_free(&em_pd_ida, id); 464 448 return -EINVAL; 465 449 } 466 450 ··· 678 660 unlock: 679 661 mutex_unlock(&em_pd_mutex); 680 662 663 + mutex_lock(&em_pd_list_mutex); 664 + list_add_tail(&dev->em_pd->node, &em_pd_list); 665 + mutex_unlock(&em_pd_list_mutex); 666 + 681 667 return ret; 682 668 } 683 669 EXPORT_SYMBOL_GPL(em_dev_register_pd_no_update); ··· 700 678 if (_is_cpu_device(dev)) 701 679 return; 702 680 681 + mutex_lock(&em_pd_list_mutex); 682 + list_del_init(&dev->em_pd->node); 683 + mutex_unlock(&em_pd_list_mutex); 684 + 703 685 /* 704 686 * The mutex separates all register/unregister requests and protects 705 687 * from potential clean-up/setup issues in the debugfs directories. ··· 714 688 715 689 em_table_free(rcu_dereference_protected(dev->em_pd->em_table, 716 690 lockdep_is_held(&em_pd_mutex))); 691 + 692 + ida_free(&em_pd_ida, dev->em_pd->id); 717 693 718 694 kfree(dev->em_pd); 719 695 dev->em_pd = NULL;