Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

PM / devfreq: Rework freq_table to be local to devfreq struct

On a devfreq PROBE_DEFER, the freq_table in the driver profile struct,
is never reset and may be leaved in an undefined state.

This comes from the fact that we store the freq_table in the driver
profile struct that is commonly defined as static and not reset on
PROBE_DEFER.
We currently skip the reinit of the freq_table if we found
it's already defined since a driver may declare his own freq_table.

This logic is flawed in the case devfreq core generate a freq_table, set
it in the profile struct and then PROBE_DEFER, freeing the freq_table.
In this case devfreq will found a NOT NULL freq_table that has been
freed, skip the freq_table generation and probe the driver based on the
wrong table.

To fix this and correctly handle PROBE_DEFER, use a local freq_table and
max_state in the devfreq struct and never modify the freq_table present
in the profile struct if it does provide it.

Fixes: 0ec09ac2cebe ("PM / devfreq: Set the freq_table of devfreq device")
Cc: stable@vger.kernel.org
Signed-off-by: Christian Marangi <ansuelsmth@gmail.com>
Signed-off-by: Chanwoo Choi <cw00.choi@samsung.com>

authored by

Christian Marangi and committed by
Chanwoo Choi
b5d281f6 f44b7996

+46 -44
+34 -37
drivers/devfreq/devfreq.c
··· 123 123 unsigned long *min_freq, 124 124 unsigned long *max_freq) 125 125 { 126 - unsigned long *freq_table = devfreq->profile->freq_table; 126 + unsigned long *freq_table = devfreq->freq_table; 127 127 s32 qos_min_freq, qos_max_freq; 128 128 129 129 lockdep_assert_held(&devfreq->lock); ··· 133 133 * The devfreq drivers can initialize this in either ascending or 134 134 * descending order and devfreq core supports both. 135 135 */ 136 - if (freq_table[0] < freq_table[devfreq->profile->max_state - 1]) { 136 + if (freq_table[0] < freq_table[devfreq->max_state - 1]) { 137 137 *min_freq = freq_table[0]; 138 - *max_freq = freq_table[devfreq->profile->max_state - 1]; 138 + *max_freq = freq_table[devfreq->max_state - 1]; 139 139 } else { 140 - *min_freq = freq_table[devfreq->profile->max_state - 1]; 140 + *min_freq = freq_table[devfreq->max_state - 1]; 141 141 *max_freq = freq_table[0]; 142 142 } 143 143 ··· 169 169 { 170 170 int lev; 171 171 172 - for (lev = 0; lev < devfreq->profile->max_state; lev++) 173 - if (freq == devfreq->profile->freq_table[lev]) 172 + for (lev = 0; lev < devfreq->max_state; lev++) 173 + if (freq == devfreq->freq_table[lev]) 174 174 return lev; 175 175 176 176 return -EINVAL; ··· 178 178 179 179 static int set_freq_table(struct devfreq *devfreq) 180 180 { 181 - struct devfreq_dev_profile *profile = devfreq->profile; 182 181 struct dev_pm_opp *opp; 183 182 unsigned long freq; 184 183 int i, count; ··· 187 188 if (count <= 0) 188 189 return -EINVAL; 189 190 190 - profile->max_state = count; 191 - profile->freq_table = devm_kcalloc(devfreq->dev.parent, 192 - profile->max_state, 193 - sizeof(*profile->freq_table), 194 - GFP_KERNEL); 195 - if (!profile->freq_table) { 196 - profile->max_state = 0; 191 + devfreq->max_state = count; 192 + devfreq->freq_table = devm_kcalloc(devfreq->dev.parent, 193 + devfreq->max_state, 194 + sizeof(*devfreq->freq_table), 195 + GFP_KERNEL); 196 + if (!devfreq->freq_table) 197 197 return -ENOMEM; 198 - } 199 198 200 - for (i = 0, freq = 0; i < profile->max_state; i++, freq++) { 199 + for (i = 0, freq = 0; i < devfreq->max_state; i++, freq++) { 201 200 opp = dev_pm_opp_find_freq_ceil(devfreq->dev.parent, &freq); 202 201 if (IS_ERR(opp)) { 203 - devm_kfree(devfreq->dev.parent, profile->freq_table); 204 - profile->max_state = 0; 202 + devm_kfree(devfreq->dev.parent, devfreq->freq_table); 205 203 return PTR_ERR(opp); 206 204 } 207 205 dev_pm_opp_put(opp); 208 - profile->freq_table[i] = freq; 206 + devfreq->freq_table[i] = freq; 209 207 } 210 208 211 209 return 0; ··· 242 246 243 247 if (lev != prev_lev) { 244 248 devfreq->stats.trans_table[ 245 - (prev_lev * devfreq->profile->max_state) + lev]++; 249 + (prev_lev * devfreq->max_state) + lev]++; 246 250 devfreq->stats.total_trans++; 247 251 } 248 252 ··· 831 835 if (err < 0) 832 836 goto err_dev; 833 837 mutex_lock(&devfreq->lock); 838 + } else { 839 + devfreq->freq_table = devfreq->profile->freq_table; 840 + devfreq->max_state = devfreq->profile->max_state; 834 841 } 835 842 836 843 devfreq->scaling_min_freq = find_available_min_freq(devfreq); ··· 869 870 870 871 devfreq->stats.trans_table = devm_kzalloc(&devfreq->dev, 871 872 array3_size(sizeof(unsigned int), 872 - devfreq->profile->max_state, 873 - devfreq->profile->max_state), 873 + devfreq->max_state, 874 + devfreq->max_state), 874 875 GFP_KERNEL); 875 876 if (!devfreq->stats.trans_table) { 876 877 mutex_unlock(&devfreq->lock); ··· 879 880 } 880 881 881 882 devfreq->stats.time_in_state = devm_kcalloc(&devfreq->dev, 882 - devfreq->profile->max_state, 883 + devfreq->max_state, 883 884 sizeof(*devfreq->stats.time_in_state), 884 885 GFP_KERNEL); 885 886 if (!devfreq->stats.time_in_state) { ··· 1665 1666 1666 1667 mutex_lock(&df->lock); 1667 1668 1668 - for (i = 0; i < df->profile->max_state; i++) 1669 + for (i = 0; i < df->max_state; i++) 1669 1670 count += scnprintf(&buf[count], (PAGE_SIZE - count - 2), 1670 - "%lu ", df->profile->freq_table[i]); 1671 + "%lu ", df->freq_table[i]); 1671 1672 1672 1673 mutex_unlock(&df->lock); 1673 1674 /* Truncate the trailing space */ ··· 1690 1691 1691 1692 if (!df->profile) 1692 1693 return -EINVAL; 1693 - max_state = df->profile->max_state; 1694 + max_state = df->max_state; 1694 1695 1695 1696 if (max_state == 0) 1696 1697 return sprintf(buf, "Not Supported.\n"); ··· 1707 1708 len += sprintf(buf + len, " :"); 1708 1709 for (i = 0; i < max_state; i++) 1709 1710 len += sprintf(buf + len, "%10lu", 1710 - df->profile->freq_table[i]); 1711 + df->freq_table[i]); 1711 1712 1712 1713 len += sprintf(buf + len, " time(ms)\n"); 1713 1714 1714 1715 for (i = 0; i < max_state; i++) { 1715 - if (df->profile->freq_table[i] 1716 - == df->previous_freq) { 1716 + if (df->freq_table[i] == df->previous_freq) 1717 1717 len += sprintf(buf + len, "*"); 1718 - } else { 1718 + else 1719 1719 len += sprintf(buf + len, " "); 1720 - } 1721 - len += sprintf(buf + len, "%10lu:", 1722 - df->profile->freq_table[i]); 1720 + 1721 + len += sprintf(buf + len, "%10lu:", df->freq_table[i]); 1723 1722 for (j = 0; j < max_state; j++) 1724 1723 len += sprintf(buf + len, "%10u", 1725 1724 df->stats.trans_table[(i * max_state) + j]); ··· 1741 1744 if (!df->profile) 1742 1745 return -EINVAL; 1743 1746 1744 - if (df->profile->max_state == 0) 1747 + if (df->max_state == 0) 1745 1748 return count; 1746 1749 1747 1750 err = kstrtoint(buf, 10, &value); ··· 1749 1752 return -EINVAL; 1750 1753 1751 1754 mutex_lock(&df->lock); 1752 - memset(df->stats.time_in_state, 0, (df->profile->max_state * 1755 + memset(df->stats.time_in_state, 0, (df->max_state * 1753 1756 sizeof(*df->stats.time_in_state))); 1754 1757 memset(df->stats.trans_table, 0, array3_size(sizeof(unsigned int), 1755 - df->profile->max_state, 1756 - df->profile->max_state)); 1758 + df->max_state, 1759 + df->max_state)); 1757 1760 df->stats.total_trans = 0; 1758 1761 df->stats.last_update = get_jiffies_64(); 1759 1762 mutex_unlock(&df->lock);
+7 -7
drivers/devfreq/governor_passive.c
··· 144 144 goto out; 145 145 146 146 /* Use interpolation if required opps is not available */ 147 - for (i = 0; i < parent_devfreq->profile->max_state; i++) 148 - if (parent_devfreq->profile->freq_table[i] == *freq) 147 + for (i = 0; i < parent_devfreq->max_state; i++) 148 + if (parent_devfreq->freq_table[i] == *freq) 149 149 break; 150 150 151 - if (i == parent_devfreq->profile->max_state) 151 + if (i == parent_devfreq->max_state) 152 152 return -EINVAL; 153 153 154 - if (i < devfreq->profile->max_state) { 155 - child_freq = devfreq->profile->freq_table[i]; 154 + if (i < devfreq->max_state) { 155 + child_freq = devfreq->freq_table[i]; 156 156 } else { 157 - count = devfreq->profile->max_state; 158 - child_freq = devfreq->profile->freq_table[count - 1]; 157 + count = devfreq->max_state; 158 + child_freq = devfreq->freq_table[count - 1]; 159 159 } 160 160 161 161 out:
+5
include/linux/devfreq.h
··· 148 148 * reevaluate operable frequencies. Devfreq users may use 149 149 * devfreq.nb to the corresponding register notifier call chain. 150 150 * @work: delayed work for load monitoring. 151 + * @freq_table: current frequency table used by the devfreq driver. 152 + * @max_state: count of entry present in the frequency table. 151 153 * @previous_freq: previously configured frequency value. 152 154 * @last_status: devfreq user device info, performance statistics 153 155 * @data: Private data of the governor. The devfreq framework does not ··· 186 184 struct opp_table *opp_table; 187 185 struct notifier_block nb; 188 186 struct delayed_work work; 187 + 188 + unsigned long *freq_table; 189 + unsigned int max_state; 189 190 190 191 unsigned long previous_freq; 191 192 struct devfreq_dev_status last_status;